diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index 27f9f77..f01dc9c 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -1,5 +1,5 @@ { - "name": "every-marketplace", + "name": "compound-engineering-plugin", "owner": { "name": "Kieran Klaassen", "url": "https://github.com/kieranklaassen" @@ -11,15 +11,15 @@ "plugins": [ { "name": "compound-engineering", - "description": "AI-powered development tools that get smarter with every use. Make each unit of engineering work easier than the last. Includes 25 specialized agents, 25 commands, and 24 skills.", - "version": "2.39.0", + "description": "AI-powered development tools that get smarter with every use. Make each unit of engineering work easier than the last. Includes 30 specialized agents, 56 skills, and 7 commands.", + "version": "2.40.0", "author": { "name": "Kieran Klaassen", "url": "https://github.com/kieranklaassen", "email": "kieran@every.to" }, "homepage": "https://github.com/EveryInc/compound-engineering-plugin", - "tags": ["ai-powered", "compound-engineering", "workflow-automation", "code-review", "fastapi", "python", "knowledge-management"], + "tags": ["ai-powered", "compound-engineering", "workflow-automation", "code-review", "quality", "knowledge-management", "image-generation"], "source": "./plugins/compound-engineering" }, { diff --git a/.cursor-plugin/marketplace.json b/.cursor-plugin/marketplace.json new file mode 100644 index 0000000..e9adfaa --- /dev/null +++ b/.cursor-plugin/marketplace.json @@ -0,0 +1,25 @@ +{ + "name": "compound-engineering", + "owner": { + "name": "Kieran Klaassen", + "email": "kieran@every.to", + "url": "https://github.com/kieranklaassen" + }, + "metadata": { + "description": "Cursor plugin marketplace for Every Inc plugins", + "version": "1.0.0", + "pluginRoot": "plugins" + }, + "plugins": [ + { + "name": "compound-engineering", + "source": "compound-engineering", + "description": "AI-powered development tools that get smarter with every use. Includes specialized agents, commands, skills, and Context7 MCP." + }, + { + "name": "coding-tutor", + "source": "coding-tutor", + "description": "Personalized coding tutorials with spaced repetition quizzes using your real codebase." + } + ] +} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index cbecdac..5dff6bc 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,18 +1,27 @@ name: Publish to npm on: - release: - types: [published] + push: + branches: [main] + workflow_dispatch: jobs: publish: runs-on: ubuntu-latest permissions: - contents: read + contents: write id-token: write + issues: write + pull-requests: write + + concurrency: + group: publish-${{ github.ref }} + cancel-in-progress: false steps: - uses: actions/checkout@v6 + with: + fetch-depth: 0 - name: Setup Bun uses: oven-sh/setup-bun@v2 @@ -20,18 +29,19 @@ jobs: bun-version: latest - name: Install dependencies - run: bun install + run: bun install --frozen-lockfile - name: Run tests run: bun test - - name: Setup Node.js for npm publish + - name: Setup Node.js for release uses: actions/setup-node@v4 with: - node-version: "20" - registry-url: "https://registry.npmjs.org" + # npm trusted publishing requires Node 22.14.0+. + node-version: "24" - - name: Publish to npm - run: npm publish --provenance --access public + - name: Release env: - NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NPM_TOKEN: ${{ secrets.NPM_TOKEN }} + run: npx semantic-release diff --git a/.gitignore b/.gitignore index f8f7b97..dae7aba 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ node_modules/ .codex/ todos/ +.worktrees diff --git a/.releaserc.json b/.releaserc.json new file mode 100644 index 0000000..cad12f6 --- /dev/null +++ b/.releaserc.json @@ -0,0 +1,36 @@ +{ + "branches": [ + "main" + ], + "tagFormat": "v${version}", + "plugins": [ + "@semantic-release/commit-analyzer", + "@semantic-release/release-notes-generator", + [ + "@semantic-release/changelog", + { + "changelogTitle": "# Changelog\n\nAll notable changes to the `@every-env/compound-plugin` CLI tool will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),\nand this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n\nRelease numbering now follows the repository `v*` tag line. Starting at `v2.34.0`, the root CLI package and this changelog stay on that shared version stream. Older entries below retain the previous `0.x` CLI numbering." + } + ], + "@semantic-release/npm", + [ + "@semantic-release/git", + { + "assets": [ + "CHANGELOG.md", + "package.json" + ], + "message": "chore(release): ${nextRelease.version} [skip ci]" + } + ], + [ + "@semantic-release/github", + { + "successComment": false, + "failCommentCondition": false, + "labels": false, + "releasedLabels": false + } + ] + ] +} diff --git a/AGENTS.md b/AGENTS.md index 471b900..5e730a5 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -7,7 +7,8 @@ This repository contains a Bun/TypeScript CLI that converts Claude Code plugins - **Branching:** Create a feature branch for any non-trivial change. If already on the correct branch for the task, keep using it; do not create additional branches or worktrees unless explicitly requested. - **Safety:** Do not delete or overwrite user data. Avoid destructive commands. - **Testing:** Run `bun test` after changes that affect parsing, conversion, or output. -- **Output Paths:** Keep OpenCode output at `opencode.json` and `.opencode/{agents,skills,plugins}`. +- **Release versioning:** The root CLI package (`package.json`, root `CHANGELOG.md`, and repo `v*` tags) uses one shared release line managed by semantic-release on `main`. Do not start or maintain a separate root CLI version stream. Use conventional commits and let release automation write the next root package version. Keep the root changelog header block in sync with `.releaserc.json` `changelogTitle` so generated release entries stay under the header. Embedded marketplace plugin metadata (`plugins/compound-engineering/.claude-plugin/plugin.json` and `.claude-plugin/marketplace.json`) is a separate version surface and may differ, but contributors should not guess or hand-bump release versions for it in normal PRs. The automated release process decides the next plugin/marketplace releases and changelog entries after deciding which merged changes ship together. +- **Output Paths:** Keep OpenCode output at `opencode.json` and `.opencode/{agents,skills,plugins}`. For OpenCode, command go to `~/.config/opencode/commands/.md`; `opencode.json` is deep-merged (never overwritten wholesale). - **ASCII-first:** Use ASCII unless the file already contains Unicode. ## Adding a New Target Provider (e.g., Codex) @@ -46,3 +47,17 @@ Add a new provider when at least one of these is true: - You can write fixtures + tests that validate the mapping. Avoid adding a provider if the target spec is unstable or undocumented. + +## Agent References in Skills + +When referencing agents from within skill SKILL.md files (e.g., via the `Agent` or `Task` tool), always use the **fully-qualified namespace**: `compound-engineering::`. Never use the short agent name alone. + +Example: +- `compound-engineering:research:learnings-researcher` (correct) +- `learnings-researcher` (wrong - will fail to resolve at runtime) + +This prevents resolution failures when the plugin is installed alongside other plugins that may define agents with the same short name. + +## Repository Docs Convention + +- **Plans** live in `docs/plans/` and track implementation progress. diff --git a/CHANGELOG.md b/CHANGELOG.md index 27f5f05..e725990 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,214 @@ All notable changes to the `@every-env/compound-plugin` CLI tool will be documen The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +Release numbering now follows the repository `v*` tag line. Starting at `v2.34.0`, the root CLI package and this changelog stay on that shared version stream. Older entries below retain the previous `0.x` CLI numbering. + +## [2.37.1](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.37.0...v2.37.1) (2026-03-16) + + +### Bug Fixes + +* **compound:** remove overly defensive context budget precheck ([#278](https://github.com/EveryInc/compound-engineering-plugin/issues/278)) ([#279](https://github.com/EveryInc/compound-engineering-plugin/issues/279)) ([84ca52e](https://github.com/EveryInc/compound-engineering-plugin/commit/84ca52efdb198c7c8ae6c94ca06fc02d2c3ef648)) + +# [2.37.0](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.36.5...v2.37.0) (2026-03-15) + + +### Features + +* sync agent-browser skill with upstream vercel-labs/agent-browser ([24860ec](https://github.com/EveryInc/compound-engineering-plugin/commit/24860ec3f1f1e7bfdee0f4408636ada1a3bb8f75)) + +## [2.36.5](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.36.4...v2.36.5) (2026-03-15) + + +### Bug Fixes + +* **create-agent-skills:** remove literal dynamic context directives that break skill loading ([4b4d1ae](https://github.com/EveryInc/compound-engineering-plugin/commit/4b4d1ae2707895d6d4fd2e60a64d83ca50f094a6)), closes [anthropics/claude-code#27149](https://github.com/anthropics/claude-code/issues/27149) [#13655](https://github.com/EveryInc/compound-engineering-plugin/issues/13655) + +## [2.36.4](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.36.3...v2.36.4) (2026-03-14) + + +### Bug Fixes + +* **skills:** use fully-qualified agent namespace in Task invocations ([026602e](https://github.com/EveryInc/compound-engineering-plugin/commit/026602e6247d63a83502b80e72cd318232a06af7)), closes [#251](https://github.com/EveryInc/compound-engineering-plugin/issues/251) + +## [2.36.3](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.36.2...v2.36.3) (2026-03-13) + + +### Bug Fixes + +* **targets:** nest colon-separated command names into directories ([a84682c](https://github.com/EveryInc/compound-engineering-plugin/commit/a84682cd35e94b0408f6c6a990af0732c2acf03f)), closes [#226](https://github.com/EveryInc/compound-engineering-plugin/issues/226) + +## [2.36.2](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.36.1...v2.36.2) (2026-03-13) + + +### Bug Fixes + +* **plan:** remove deprecated /technical_review references ([0ab9184](https://github.com/EveryInc/compound-engineering-plugin/commit/0ab91847f278efba45477462d8e93db5f068e058)), closes [#244](https://github.com/EveryInc/compound-engineering-plugin/issues/244) + +## [2.36.1](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.36.0...v2.36.1) (2026-03-13) + + +### Bug Fixes + +* **agents:** update learnings-researcher model from haiku to inherit ([30852b7](https://github.com/EveryInc/compound-engineering-plugin/commit/30852b72937091b0a85c22b7c8c45d513ab49fd1)), closes [#249](https://github.com/EveryInc/compound-engineering-plugin/issues/249) + +# [2.36.0](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.35.0...v2.36.0) (2026-03-11) + + +### Bug Fixes + +* **hooks:** wrap PreToolUse handlers in try-catch to prevent parallel tool call crashes ([598222e](https://github.com/EveryInc/compound-engineering-plugin/commit/598222e11cb2206a2e3347cb5dd38cacdc3830df)), closes [#85](https://github.com/EveryInc/compound-engineering-plugin/issues/85) +* **install:** merge config instead of overwriting on opencode target ([1db7680](https://github.com/EveryInc/compound-engineering-plugin/commit/1db76800f91fefcc1bb9c1798ef273ddd0b65f5c)), closes [#125](https://github.com/EveryInc/compound-engineering-plugin/issues/125) +* **review:** add serial mode to prevent context limit crashes ([d96671b](https://github.com/EveryInc/compound-engineering-plugin/commit/d96671b9e9ecbe417568b2ce7f7fa4d379c2bec2)), closes [#166](https://github.com/EveryInc/compound-engineering-plugin/issues/166) + + +### Features + +* **compound:** add context budget precheck and compact-safe mode ([c4b1358](https://github.com/EveryInc/compound-engineering-plugin/commit/c4b13584312058cb8db3ad0f25674805bbb91b2d)), closes [#198](https://github.com/EveryInc/compound-engineering-plugin/issues/198) +* **plan:** add daily sequence number to plan filenames ([e94ca04](https://github.com/EveryInc/compound-engineering-plugin/commit/e94ca0409671efcfa2d4a8fcb2d60b79a848fd85)), closes [#135](https://github.com/EveryInc/compound-engineering-plugin/issues/135) +* **plugin:** release v2.39.0 with community contributions ([d2ab6c0](https://github.com/EveryInc/compound-engineering-plugin/commit/d2ab6c076882a4dacaa787c0a6f3c9d555d38af0)) + +# [2.35.0](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.34.7...v2.35.0) (2026-03-10) + + +### Bug Fixes + +* **test-browser:** detect dev server port from project config ([94aedd5](https://github.com/EveryInc/compound-engineering-plugin/commit/94aedd5a7b6da4ce48de994b5a137953c0fd21c3)), closes [#164](https://github.com/EveryInc/compound-engineering-plugin/issues/164) + + +### Features + +* **compound:** add context budget precheck and compact-safe mode ([7266062](https://github.com/EveryInc/compound-engineering-plugin/commit/726606286873c4059261a8c5f1b75c20fe11ac77)), closes [#198](https://github.com/EveryInc/compound-engineering-plugin/issues/198) +* **plan:** add daily sequence number to plan filenames ([4fc6ddc](https://github.com/EveryInc/compound-engineering-plugin/commit/4fc6ddc5db3e2b4b398c0ffa0c156e1177b35d05)), closes [#135](https://github.com/EveryInc/compound-engineering-plugin/issues/135) + +## [2.34.7](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.34.6...v2.34.7) (2026-03-10) + + +### Bug Fixes + +* **test-browser:** detect dev server port from project config ([50cb89e](https://github.com/EveryInc/compound-engineering-plugin/commit/50cb89efde7cee7d6dcd42008e6060e1bec44fcc)), closes [#164](https://github.com/EveryInc/compound-engineering-plugin/issues/164) + +## [2.34.6](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.34.5...v2.34.6) (2026-03-10) + + +### Bug Fixes + +* **mcp:** add API key auth support for Context7 server ([c649cfc](https://github.com/EveryInc/compound-engineering-plugin/commit/c649cfc17f895b58babf737dfdec2f6cc391e40a)), closes [#153](https://github.com/EveryInc/compound-engineering-plugin/issues/153) + +## [2.34.5](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.34.4...v2.34.5) (2026-03-10) + + +### Bug Fixes + +* **lfg:** enforce plan phase with explicit step gating ([b07f43d](https://github.com/EveryInc/compound-engineering-plugin/commit/b07f43ddf59cd7f2fe54b2e0a00d2b5b508b7f11)), closes [#227](https://github.com/EveryInc/compound-engineering-plugin/issues/227) + +## [2.34.4](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.34.3...v2.34.4) (2026-03-04) + + +### Bug Fixes + +* **openclaw:** emit empty configSchema in plugin manifests ([4e9899f](https://github.com/EveryInc/compound-engineering-plugin/commit/4e9899f34693711b8997cf73eaa337f0da2321d6)), closes [#224](https://github.com/EveryInc/compound-engineering-plugin/issues/224) + +## [2.34.3](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.34.2...v2.34.3) (2026-03-03) + + +### Bug Fixes + +* **release:** keep changelog header stable ([2fd29ff](https://github.com/EveryInc/compound-engineering-plugin/commit/2fd29ff6ed99583a8539b7a1e876194df5b18dd6)) + +## [2.34.2](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.34.1...v2.34.2) (2026-03-03) + +### Bug Fixes + +* **release:** add package repository metadata ([eab77bc](https://github.com/EveryInc/compound-engineering-plugin/commit/eab77bc5b5361dc73e2ec8aa4678c8bb6114f6e7)) + +## [2.34.1](https://github.com/EveryInc/compound-engineering-plugin/compare/v2.34.0...v2.34.1) (2026-03-03) + +### Bug Fixes + +* **release:** align cli versioning with repo tags ([7c58eee](https://github.com/EveryInc/compound-engineering-plugin/commit/7c58eeeec6cf33675cbe2b9639c7d69b92ecef60)) + +## [2.34.0] - 2026-03-03 + +### Added + +- **Sync parity across supported providers** — `sync` now uses a shared target registry and supports MCP sync for Codex, Droid, Gemini, Copilot, Pi, Windsurf, Kiro, and Qwen, with OpenClaw kept validation-gated for skills-only sync. +- **Personal command sync** — Personal Claude commands from `~/.claude/commands/` now sync into provider-native command surfaces, including Codex prompts and generated skills, Gemini TOML commands, OpenCode command markdown, Windsurf workflows, and converted skills where that is the closest available equivalent. + +### Changed + +- **Global user config targets** — Copilot sync now writes to `~/.copilot/` and Gemini sync writes to `~/.gemini/`, matching current documented user-level config locations. +- **Gemini skill deduplication** — Gemini sync now avoids mirroring skills that Gemini already resolves from `~/.agents/skills`, preventing duplicate skill conflict warnings after sync. + +### Fixed + +- **Safe skill sync replacement** — When a real directory already exists at a symlink target (for example `~/.config/opencode/skills/proof`), sync now logs a warning and skips instead of throwing an error. + +--- + +## [0.12.0] - 2026-03-01 + +### Added + +- **Auto-detect install targets** — `install --to all` and `convert --to all` auto-detect installed AI coding tools and install to all of them in one command +- **Gemini sync** — `sync --target gemini` symlinks personal skills to `.gemini/skills/` and merges MCP servers into `.gemini/settings.json` +- **Sync all targets** — `sync --target all` syncs personal config to all detected tools +- **Tool detection utility** — Checks config directories for OpenCode, Codex, Droid, Cursor, Pi, and Gemini + +--- + +## [0.11.0] - 2026-03-01 + +### Added + +- **OpenClaw target** — `--to openclaw` converts plugins to OpenClaw format. Agents become `.md` files, commands become `.md` files, pass-through skills copy unchanged, and MCP servers are written to `openclaw-extension.json`. Output goes to `~/.openclaw/extensions//` by default. Use `--openclaw-home` to override. ([#217](https://github.com/EveryInc/compound-engineering-plugin/pull/217)) — thanks [@TrendpilotAI](https://github.com/TrendpilotAI)! +- **Qwen Code target** — `--to qwen` converts plugins to Qwen Code extension format. Agents become `.yaml` files with Qwen-compatible fields, commands become `.md` files, MCP servers write to `qwen-extension.json`, and a `QWEN.md` context file is generated. Output goes to `~/.qwen/extensions//` by default. Use `--qwen-home` to override. ([#220](https://github.com/EveryInc/compound-engineering-plugin/pull/220)) — thanks [@rlam3](https://github.com/rlam3)! +- **Windsurf target** — `--to windsurf` converts plugins to Windsurf format. Claude agents become Windsurf skills (`skills/{name}/SKILL.md`), commands become flat workflows (`global_workflows/{name}.md` for global scope, `workflows/{name}.md` for workspace), and pass-through skills copy unchanged. MCP servers write to `mcp_config.json` (machine-readable, merged with existing config). ([#202](https://github.com/EveryInc/compound-engineering-plugin/pull/202)) — thanks [@rburnham52](https://github.com/rburnham52)! +- **Global scope support** — New `--scope global|workspace` flag (generic, Windsurf as first adopter). `--to windsurf` defaults to global scope (`~/.codeium/windsurf/`), making installed skills, workflows, and MCP servers available across all projects. Use `--scope workspace` for project-level `.windsurf/` output. +- **`mcp_config.json` integration** — Windsurf converter writes proper machine-readable MCP config supporting stdio, Streamable HTTP, and SSE transports. Merges with existing config (user entries preserved, plugin entries take precedence). Written with `0o600` permissions. +- **Shared utilities** — Extracted `resolveTargetOutputRoot` to `src/utils/resolve-output.ts` and `hasPotentialSecrets` to `src/utils/secrets.ts` to eliminate duplication. + +### Fixed + +- **OpenClaw code injection** — `generateEntryPoint` now uses `JSON.stringify()` for all string interpolation (was escaping only `"`, leaving `\n`/`\\` unguarded). +- **Qwen `plugin.manifest.name`** — context file header was `# undefined` due to using `plugin.name` (which doesn't exist on `ClaudePlugin`); fixed to `plugin.manifest.name`. +- **Qwen remote MCP servers** — curl fallback removed; HTTP/SSE servers are now skipped with a warning (Qwen only supports stdio transport). +- **`--openclaw-home` / `--qwen-home` CLI flags** — wired through to `resolveTargetOutputRoot` so custom home directories are respected. + +--- + +## [0.9.1] - 2026-02-20 + +### Changed + +- **Remove docs/reports and docs/decisions directories** — only `docs/plans/` is retained as living documents that track implementation progress +- **OpenCode commands as Markdown** — commands are now `.md` files with deep-merged config, permissions default to none ([#201](https://github.com/EveryInc/compound-engineering-plugin/pull/201)) — thanks [@0ut5ider](https://github.com/0ut5ider)! +- **Fix changelog GitHub link** ([#215](https://github.com/EveryInc/compound-engineering-plugin/pull/215)) — thanks [@XSAM](https://github.com/XSAM)! +- **Update Claude Code install command in README** ([#218](https://github.com/EveryInc/compound-engineering-plugin/pull/218)) — thanks [@ianguelman](https://github.com/ianguelman)! + +--- + +## [0.9.0] - 2026-02-17 + +### Added + +- **Kiro CLI target** — `--to kiro` converts plugins to `.kiro/` format with custom agent JSON configs, prompt files, skills, steering files, and `mcp.json`. Only stdio MCP servers are supported ([#196](https://github.com/EveryInc/compound-engineering-plugin/pull/196)) — thanks [@krthr](https://github.com/krthr)! + +--- + +## [0.8.0] - 2026-02-17 + +### Added + +- **GitHub Copilot target** — `--to copilot` converts plugins to `.github/` format with `.agent.md` files, `SKILL.md` skills, and `copilot-mcp-config.json`. Also supports `sync --target copilot` ([#192](https://github.com/EveryInc/compound-engineering-plugin/pull/192)) — thanks [@brayanjuls](https://github.com/brayanjuls)! +- **Native Cursor plugin support** — Cursor now installs via `/add-plugin compound-engineering` using Cursor's native plugin system instead of CLI conversion ([#184](https://github.com/EveryInc/compound-engineering-plugin/pull/184)) — thanks [@ericzakariasson](https://github.com/ericzakariasson)! + +### Removed + +- Cursor CLI conversion target (`--to cursor`) — replaced by native Cursor plugin install + +--- + ## [0.6.0] - 2026-02-12 ### Added diff --git a/CLAUDE.md b/CLAUDE.md index 92ec03d..1df9ec6 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,11 +1,11 @@ -# Every Marketplace - Claude Code Plugin Marketplace +# compound-engineering-plugin - Claude Code Plugin Marketplace This repository is a Claude Code plugin marketplace that distributes the `compound-engineering` plugin to developers building with AI-powered tools. ## Repository Structure ``` -every-marketplace/ +compound-engineering-plugin/ ├── .claude-plugin/ │ └── marketplace.json # Marketplace catalog (lists available plugins) ├── docs/ # Documentation site (GitHub Pages) @@ -38,6 +38,20 @@ When working on this repository, follow the compounding engineering process: ## Working with This Repository +## CLI Release Versioning + +The repository has two separate version surfaces: + +1. **Root CLI package** — `package.json`, root `CHANGELOG.md`, and repo `v*` tags all share one release line managed by semantic-release on `main`. +2. **Embedded marketplace plugin metadata** — `plugins/compound-engineering/.claude-plugin/plugin.json` and `.claude-plugin/marketplace.json` track the distributed Claude plugin metadata and can differ from the root CLI package version. + +Rules: + +- Do not start a separate root CLI version stream. The root CLI follows the repo tag line. +- Do not hand-bump the root CLI `package.json` or root `CHANGELOG.md` for routine feature work. Use conventional commits and let semantic-release write the released root version back to git. +- Keep the root `CHANGELOG.md` header block aligned with `.releaserc.json` `changelogTitle`. If they drift, semantic-release will prepend release notes above the header. +- Do not guess or hand-bump embedded plugin release versions in routine PRs. The automated release process decides the next plugin/marketplace version and generate release changelog entries after choosing which merged changes ship together. + ### Adding a New Plugin 1. Create plugin directory: `plugins/new-plugin-name/` @@ -79,17 +93,17 @@ The description appears in multiple places and must match everywhere: Format: `"Includes X specialized agents, Y commands, and Z skill(s)."` -#### 3. Update version numbers +#### 3. Do not pre-cut release versions -When adding new functionality, bump the version in: +Contributors should not guess the next released plugin version in a normal PR: -- [ ] `plugins/compound-engineering/.claude-plugin/plugin.json` → `version` -- [ ] `.claude-plugin/marketplace.json` → plugin `version` +- [ ] No manual bump in `plugins/compound-engineering/.claude-plugin/plugin.json` → `version` +- [ ] No manual bump in `.claude-plugin/marketplace.json` → plugin `version` #### 4. Update documentation - [ ] `plugins/compound-engineering/README.md` → list all components -- [ ] `plugins/compound-engineering/CHANGELOG.md` → document changes +- [ ] Do not cut a release section in `plugins/compound-engineering/CHANGELOG.md` for a normal feature PR - [ ] `CLAUDE.md` → update structure diagram if needed #### 5. Rebuild documentation site @@ -261,7 +275,7 @@ python -m http.server 8000 1. Install the marketplace locally: ```bash - claude /plugin marketplace add /Users/yourusername/every-marketplace + claude /plugin marketplace add /Users/yourusername/compound-engineering-plugin ``` 2. Install the plugin: diff --git a/PRIVACY.md b/PRIVACY.md new file mode 100644 index 0000000..3edcbfe --- /dev/null +++ b/PRIVACY.md @@ -0,0 +1,38 @@ +# Privacy & Data Handling + +This repository contains: +- a plugin package (`plugins/compound-engineering`) made of markdown/config content +- a CLI (`@every-env/compound-plugin`) that converts and installs plugin content for different AI coding tools + +## Summary + +- The plugin package does not include telemetry or analytics code. +- The plugin package does not run a background service that uploads repository/workspace contents automatically. +- Data leaves your machine only when your host/tooling or an explicitly invoked integration performs a network request. + +## What May Send Data + +1. AI host/model providers + +If you run the plugin in tools like Claude Code, Cursor, Gemini CLI, Copilot, Kiro, Windsurf, etc., those tools may send prompts/context/code to their configured model providers. This behavior is controlled by those tools and providers, not by this plugin repository. + +2. Optional integrations and tools + +The plugin includes optional capabilities that can call external services when explicitly used, for example: +- Context7 MCP (`https://mcp.context7.com/mcp`) for documentation lookup +- Proof (`https://www.proofeditor.ai`) when using share/edit flows +- Other opt-in skills (for example image generation or cloud upload workflows) that call their own external APIs/services + +If you do not invoke these integrations, they do not transmit your project data. + +3. Package/installer infrastructure + +Installing dependencies or packages (for example `npm`, `bunx`) communicates with package registries/CDNs according to your package manager configuration. + +## Data Ownership and Retention + +This repository does not operate a backend service for collecting or storing your project/workspace data. Data retention and processing for model prompts or optional integrations are governed by the external services you use. + +## Security Reporting + +If you identify a security issue in this repository, follow the disclosure process in [SECURITY.md](SECURITY.md). diff --git a/README.md b/README.md index 3d733df..0eef127 100644 --- a/README.md +++ b/README.md @@ -8,13 +8,19 @@ A Claude Code plugin marketplace featuring the **Compound Engineering Plugin** ## Claude Code Install ```bash -/plugin marketplace add https://github.com/EveryInc/compound-engineering-plugin +/plugin marketplace add EveryInc/compound-engineering-plugin /plugin install compound-engineering ``` -## OpenCode, Codex, Droid, Cursor, Pi & Gemini (experimental) Install +## Cursor Install -This repo includes a Bun/TypeScript CLI that converts Claude Code plugins to OpenCode, Codex, Factory Droid, Cursor, Pi, and Gemini CLI. +```text +/add-plugin compound-engineering +``` + +## OpenCode, Codex, Droid, Pi, Gemini, Copilot, Kiro, Windsurf, OpenClaw & Qwen (experimental) Install + +This repo includes a Bun/TypeScript CLI that converts Claude Code plugins to OpenCode, Codex, Factory Droid, Pi, Gemini CLI, GitHub Copilot, Kiro CLI, Windsurf, OpenClaw, and Qwen Code. ```bash # convert the compound-engineering plugin into OpenCode format @@ -26,36 +32,93 @@ bunx @every-env/compound-plugin install compound-engineering --to codex # convert to Factory Droid format bunx @every-env/compound-plugin install compound-engineering --to droid -# convert to Cursor format -bunx @every-env/compound-plugin install compound-engineering --to cursor - # convert to Pi format bunx @every-env/compound-plugin install compound-engineering --to pi # convert to Gemini CLI format bunx @every-env/compound-plugin install compound-engineering --to gemini + +# convert to GitHub Copilot format +bunx @every-env/compound-plugin install compound-engineering --to copilot + +# convert to Kiro CLI format +bunx @every-env/compound-plugin install compound-engineering --to kiro + +# convert to OpenClaw format +bunx @every-env/compound-plugin install compound-engineering --to openclaw + +# convert to Windsurf format (global scope by default) +bunx @every-env/compound-plugin install compound-engineering --to windsurf + +# convert to Windsurf workspace scope +bunx @every-env/compound-plugin install compound-engineering --to windsurf --scope workspace + +# convert to Qwen Code format +bunx @every-env/compound-plugin install compound-engineering --to qwen + +# auto-detect installed tools and install to all +bunx @every-env/compound-plugin install compound-engineering --to all ``` -Local dev: +### Local Development + +When developing and testing local changes to the plugin: + +**Claude Code** — add a shell alias so your local copy loads alongside your normal plugins: + +```bash +# add to ~/.zshrc or ~/.bashrc +alias claude-dev-ce='claude --plugin-dir ~/code/compound-engineering-plugin/plugins/compound-engineering' +``` + +One-liner to append it: + +```bash +echo "alias claude-dev-ce='claude --plugin-dir ~/code/compound-engineering-plugin/plugins/compound-engineering'" >> ~/.zshrc +``` + +Then run `claude-dev-ce` instead of `claude` to test your changes. Your production install stays untouched. + +**Codex** — point the install command at your local path: + +```bash +bunx @every-env/compound-plugin install ./plugins/compound-engineering --to codex +``` + +**Other targets** — same pattern, swap the target: ```bash bun run src/index.ts install ./plugins/compound-engineering --to opencode ``` -OpenCode output is written to `~/.config/opencode` by default, with `opencode.json` at the root and `agents/`, `skills/`, and `plugins/` alongside it. -Codex output is written to `~/.codex/prompts` and `~/.codex/skills`, with each Claude command converted into both a prompt and a skill (the prompt instructs Codex to load the corresponding skill). Generated Codex skill descriptions are truncated to 1024 characters (Codex limit). -Droid output is written to `~/.factory/` with commands, droids (agents), and skills. Claude tool names are mapped to Factory equivalents (`Bash` → `Execute`, `Write` → `Create`, etc.) and namespace prefixes are stripped from commands. -Cursor output is written to `.cursor/` with rules (`.mdc`), commands, skills, and `mcp.json`. Agents become "Agent Requested" rules (`alwaysApply: false`) so Cursor's AI activates them on demand. Works with both the Cursor IDE and Cursor CLI (`cursor-agent`) — they share the same `.cursor/` config directory. -Pi output is written to `~/.pi/agent/` by default with prompts, skills, extensions, and `compound-engineering/mcporter.json` for MCPorter interoperability. -Gemini output is written to `.gemini/` with skills (from agents), commands (`.toml`), and `settings.json` (MCP servers). Namespaced commands create directory structure (`workflows:plan` → `commands/workflows/plan.toml`). Skills use the identical SKILL.md standard and pass through unchanged. +
+Output format details per target + +| Target | Output path | Notes | +|--------|------------|-------| +| `opencode` | `~/.config/opencode/` | Commands as `.md` files; `opencode.json` MCP config deep-merged; backups made before overwriting | +| `codex` | `~/.codex/prompts` + `~/.codex/skills` | Each command becomes a prompt + skill pair; descriptions truncated to 1024 chars | +| `droid` | `~/.factory/` | Tool names mapped (`Bash`→`Execute`, `Write`→`Create`); namespace prefixes stripped | +| `pi` | `~/.pi/agent/` | Prompts, skills, extensions, and `mcporter.json` for MCPorter interoperability | +| `gemini` | `.gemini/` | Skills from agents; commands as `.toml`; namespaced commands become directories (`workflows:plan` → `commands/workflows/plan.toml`) | +| `copilot` | `.github/` | Agents as `.agent.md` with Copilot frontmatter; MCP env vars prefixed with `COPILOT_MCP_` | +| `kiro` | `.kiro/` | Agents as JSON configs + prompt `.md` files; only stdio MCP servers supported | +| `openclaw` | `~/.openclaw/extensions//` | Entry-point TypeScript skill file; `openclaw-extension.json` for MCP servers | +| `windsurf` | `~/.codeium/windsurf/` (global) or `.windsurf/` (workspace) | Agents become skills; commands become flat workflows; `mcp_config.json` merged | +| `qwen` | `~/.qwen/extensions//` | Agents as `.yaml`; env vars with placeholders extracted as settings; colon separator for nested commands | All provider targets are experimental and may change as the formats evolve. +
+ ## Sync Personal Config -Sync your personal Claude Code config (`~/.claude/`) to other AI coding tools: +Sync your personal Claude Code config (`~/.claude/`) to other AI coding tools. Omit `--target` to sync to all detected supported tools automatically: ```bash +# Sync to all detected tools (default) +bunx @every-env/compound-plugin sync + # Sync skills and MCP servers to OpenCode bunx @every-env/compound-plugin sync --target opencode @@ -65,33 +128,75 @@ bunx @every-env/compound-plugin sync --target codex # Sync to Pi bunx @every-env/compound-plugin sync --target pi -# Sync to Droid (skills only) +# Sync to Droid bunx @every-env/compound-plugin sync --target droid -# Sync to Cursor (skills + MCP servers) -bunx @every-env/compound-plugin sync --target cursor +# Sync to GitHub Copilot (skills + MCP servers) +bunx @every-env/compound-plugin sync --target copilot + +# Sync to Gemini (skills + MCP servers) +bunx @every-env/compound-plugin sync --target gemini + +# Sync to Windsurf +bunx @every-env/compound-plugin sync --target windsurf + +# Sync to Kiro +bunx @every-env/compound-plugin sync --target kiro + +# Sync to Qwen +bunx @every-env/compound-plugin sync --target qwen + +# Sync to OpenClaw (skills only; MCP is validation-gated) +bunx @every-env/compound-plugin sync --target openclaw + +# Sync to all detected tools +bunx @every-env/compound-plugin sync --target all ``` This syncs: - Personal skills from `~/.claude/skills/` (as symlinks) +- Personal slash commands from `~/.claude/commands/` (as provider-native prompts, workflows, or converted skills where supported) - MCP servers from `~/.claude/settings.json` Skills are symlinked (not copied) so changes in Claude Code are reflected immediately. +Supported sync targets: +- `opencode` +- `codex` +- `pi` +- `droid` +- `copilot` +- `gemini` +- `windsurf` +- `kiro` +- `qwen` +- `openclaw` + +Notes: +- Codex sync preserves non-managed `config.toml` content and now includes remote MCP servers. +- Command sync reuses each provider's existing Claude command conversion, so some targets receive prompts or workflows while others receive converted skills. +- Copilot sync writes personal skills to `~/.copilot/skills/` and MCP config to `~/.copilot/mcp-config.json`. +- Gemini sync writes MCP config to `~/.gemini/` and avoids mirroring skills that Gemini already discovers from `~/.agents/skills`, which prevents duplicate-skill warnings. +- Droid, Windsurf, Kiro, and Qwen sync merge MCP servers into the provider's documented user config. +- OpenClaw currently syncs skills only. Personal command sync is skipped because this repo does not yet have a documented user-level OpenClaw command surface, and MCP sync is skipped because the current official OpenClaw docs do not clearly document an MCP server config contract. + ## Workflow ``` -Plan → Work → Review → Compound → Repeat +Brainstorm → Plan → Work → Review → Compound → Repeat ``` | Command | Purpose | |---------|---------| -| `/workflows:plan` | Turn feature ideas into detailed implementation plans | -| `/workflows:work` | Execute plans with worktrees and task tracking | -| `/workflows:review` | Multi-agent code review before merging | -| `/workflows:compound` | Document learnings to make future work easier | +| `/ce:brainstorm` | Explore requirements and approaches before planning | +| `/ce:plan` | Turn feature ideas into detailed implementation plans | +| `/ce:work` | Execute plans with worktrees and task tracking | +| `/ce:review` | Multi-agent code review before merging | +| `/ce:compound` | Document learnings to make future work easier | -Each cycle compounds: plans inform future plans, reviews catch more issues, patterns get documented. +The `brainstorming` skill supports `/ce:brainstorm` with collaborative dialogue to clarify requirements and compare approaches before committing to a plan. + +Each cycle compounds: brainstorms sharpen plans, plans inform future plans, reviews catch more issues, patterns get documented. ## Philosophy diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..d8446f9 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,29 @@ +# Security Policy + +## Supported Versions + +Security fixes are applied to the latest version on `main`. + +## Reporting a Vulnerability + +Please do not open a public issue for undisclosed vulnerabilities. + +Instead, report privately by emailing: +- `kieran@every.to` + +Include: +- A clear description of the issue +- Reproduction steps or proof of concept +- Impact assessment (what an attacker can do) +- Any suggested mitigation + +We will acknowledge receipt as soon as possible and work with you on validation, remediation, and coordinated disclosure timing. + +## Scope Notes + +This repository primarily contains plugin instructions/configuration plus a conversion/install CLI. + +- Plugin instruction content itself does not run as a server process. +- Security/privacy behavior also depends on the host AI tool and any external integrations you explicitly invoke. + +For data-handling details, see [PRIVACY.md](PRIVACY.md). diff --git a/bun.lock b/bun.lock index 3a07728..02ca117 100644 --- a/bun.lock +++ b/bun.lock @@ -9,23 +9,1000 @@ "js-yaml": "^4.1.0", }, "devDependencies": { + "@semantic-release/changelog": "^6.0.3", + "@semantic-release/git": "^10.0.1", "bun-types": "^1.0.0", + "semantic-release": "^25.0.3", }, }, }, "packages": { + "@actions/core": ["@actions/core@3.0.0", "", { "dependencies": { "@actions/exec": "^3.0.0", "@actions/http-client": "^4.0.0" } }, "sha512-zYt6cz+ivnTmiT/ksRVriMBOiuoUpDCJJlZ5KPl2/FRdvwU3f7MPh9qftvbkXJThragzUZieit2nyHUyw53Seg=="], + + "@actions/exec": ["@actions/exec@3.0.0", "", { "dependencies": { "@actions/io": "^3.0.2" } }, "sha512-6xH/puSoNBXb72VPlZVm7vQ+svQpFyA96qdDBvhB8eNZOE8LtPf9L4oAsfzK/crCL8YZ+19fKYVnM63Sl+Xzlw=="], + + "@actions/http-client": ["@actions/http-client@4.0.0", "", { "dependencies": { "tunnel": "^0.0.6", "undici": "^6.23.0" } }, "sha512-QuwPsgVMsD6qaPD57GLZi9sqzAZCtiJT8kVBCDpLtxhL5MydQ4gS+DrejtZZPdIYyB1e95uCK9Luyds7ybHI3g=="], + + "@actions/io": ["@actions/io@3.0.2", "", {}, "sha512-nRBchcMM+QK1pdjO7/idu86rbJI5YHUKCvKs0KxnSYbVe3F51UfGxuZX4Qy/fWlp6l7gWFwIkrOzN+oUK03kfw=="], + + "@babel/code-frame": ["@babel/code-frame@7.29.0", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.28.5", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw=="], + + "@babel/helper-validator-identifier": ["@babel/helper-validator-identifier@7.28.5", "", {}, "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q=="], + + "@colors/colors": ["@colors/colors@1.5.0", "", {}, "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ=="], + + "@octokit/auth-token": ["@octokit/auth-token@6.0.0", "", {}, "sha512-P4YJBPdPSpWTQ1NU4XYdvHvXJJDxM6YwpS0FZHRgP7YFkdVxsWcpWGy/NVqlAA7PcPCnMacXlRm1y2PFZRWL/w=="], + + "@octokit/core": ["@octokit/core@7.0.6", "", { "dependencies": { "@octokit/auth-token": "^6.0.0", "@octokit/graphql": "^9.0.3", "@octokit/request": "^10.0.6", "@octokit/request-error": "^7.0.2", "@octokit/types": "^16.0.0", "before-after-hook": "^4.0.0", "universal-user-agent": "^7.0.0" } }, "sha512-DhGl4xMVFGVIyMwswXeyzdL4uXD5OGILGX5N8Y+f6W7LhC1Ze2poSNrkF/fedpVDHEEZ+PHFW0vL14I+mm8K3Q=="], + + "@octokit/endpoint": ["@octokit/endpoint@11.0.3", "", { "dependencies": { "@octokit/types": "^16.0.0", "universal-user-agent": "^7.0.2" } }, "sha512-FWFlNxghg4HrXkD3ifYbS/IdL/mDHjh9QcsNyhQjN8dplUoZbejsdpmuqdA76nxj2xoWPs7p8uX2SNr9rYu0Ag=="], + + "@octokit/graphql": ["@octokit/graphql@9.0.3", "", { "dependencies": { "@octokit/request": "^10.0.6", "@octokit/types": "^16.0.0", "universal-user-agent": "^7.0.0" } }, "sha512-grAEuupr/C1rALFnXTv6ZQhFuL1D8G5y8CN04RgrO4FIPMrtm+mcZzFG7dcBm+nq+1ppNixu+Jd78aeJOYxlGA=="], + + "@octokit/openapi-types": ["@octokit/openapi-types@27.0.0", "", {}, "sha512-whrdktVs1h6gtR+09+QsNk2+FO+49j6ga1c55YZudfEG+oKJVvJLQi3zkOm5JjiUXAagWK2tI2kTGKJ2Ys7MGA=="], + + "@octokit/plugin-paginate-rest": ["@octokit/plugin-paginate-rest@14.0.0", "", { "dependencies": { "@octokit/types": "^16.0.0" }, "peerDependencies": { "@octokit/core": ">=6" } }, "sha512-fNVRE7ufJiAA3XUrha2omTA39M6IXIc6GIZLvlbsm8QOQCYvpq/LkMNGyFlB1d8hTDzsAXa3OKtybdMAYsV/fw=="], + + "@octokit/plugin-retry": ["@octokit/plugin-retry@8.1.0", "", { "dependencies": { "@octokit/request-error": "^7.0.2", "@octokit/types": "^16.0.0", "bottleneck": "^2.15.3" }, "peerDependencies": { "@octokit/core": ">=7" } }, "sha512-O1FZgXeiGb2sowEr/hYTr6YunGdSAFWnr2fyW39Ah85H8O33ELASQxcvOFF5LE6Tjekcyu2ms4qAzJVhSaJxTw=="], + + "@octokit/plugin-throttling": ["@octokit/plugin-throttling@11.0.3", "", { "dependencies": { "@octokit/types": "^16.0.0", "bottleneck": "^2.15.3" }, "peerDependencies": { "@octokit/core": "^7.0.0" } }, "sha512-34eE0RkFCKycLl2D2kq7W+LovheM/ex3AwZCYN8udpi6bxsyjZidb2McXs69hZhLmJlDqTSP8cH+jSRpiaijBg=="], + + "@octokit/request": ["@octokit/request@10.0.8", "", { "dependencies": { "@octokit/endpoint": "^11.0.3", "@octokit/request-error": "^7.0.2", "@octokit/types": "^16.0.0", "fast-content-type-parse": "^3.0.0", "json-with-bigint": "^3.5.3", "universal-user-agent": "^7.0.2" } }, "sha512-SJZNwY9pur9Agf7l87ywFi14W+Hd9Jg6Ifivsd33+/bGUQIjNujdFiXII2/qSlN2ybqUHfp5xpekMEjIBTjlSw=="], + + "@octokit/request-error": ["@octokit/request-error@7.1.0", "", { "dependencies": { "@octokit/types": "^16.0.0" } }, "sha512-KMQIfq5sOPpkQYajXHwnhjCC0slzCNScLHs9JafXc4RAJI+9f+jNDlBNaIMTvazOPLgb4BnlhGJOTbnN0wIjPw=="], + + "@octokit/types": ["@octokit/types@16.0.0", "", { "dependencies": { "@octokit/openapi-types": "^27.0.0" } }, "sha512-sKq+9r1Mm4efXW1FCk7hFSeJo4QKreL/tTbR0rz/qx/r1Oa2VV83LTA/H/MuCOX7uCIJmQVRKBcbmWoySjAnSg=="], + + "@pnpm/config.env-replace": ["@pnpm/config.env-replace@1.1.0", "", {}, "sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w=="], + + "@pnpm/network.ca-file": ["@pnpm/network.ca-file@1.0.2", "", { "dependencies": { "graceful-fs": "4.2.10" } }, "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA=="], + + "@pnpm/npm-conf": ["@pnpm/npm-conf@3.0.2", "", { "dependencies": { "@pnpm/config.env-replace": "^1.1.0", "@pnpm/network.ca-file": "^1.0.1", "config-chain": "^1.1.11" } }, "sha512-h104Kh26rR8tm+a3Qkc5S4VLYint3FE48as7+/5oCEcKR2idC/pF1G6AhIXKI+eHPJa/3J9i5z0Al47IeGHPkA=="], + + "@sec-ant/readable-stream": ["@sec-ant/readable-stream@0.4.1", "", {}, "sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg=="], + + "@semantic-release/changelog": ["@semantic-release/changelog@6.0.3", "", { "dependencies": { "@semantic-release/error": "^3.0.0", "aggregate-error": "^3.0.0", "fs-extra": "^11.0.0", "lodash": "^4.17.4" }, "peerDependencies": { "semantic-release": ">=18.0.0" } }, "sha512-dZuR5qByyfe3Y03TpmCvAxCyTnp7r5XwtHRf/8vD9EAn4ZWbavUX8adMtXYzE86EVh0gyLA7lm5yW4IV30XUag=="], + + "@semantic-release/commit-analyzer": ["@semantic-release/commit-analyzer@13.0.1", "", { "dependencies": { "conventional-changelog-angular": "^8.0.0", "conventional-changelog-writer": "^8.0.0", "conventional-commits-filter": "^5.0.0", "conventional-commits-parser": "^6.0.0", "debug": "^4.0.0", "import-from-esm": "^2.0.0", "lodash-es": "^4.17.21", "micromatch": "^4.0.2" }, "peerDependencies": { "semantic-release": ">=20.1.0" } }, "sha512-wdnBPHKkr9HhNhXOhZD5a2LNl91+hs8CC2vsAVYxtZH3y0dV3wKn+uZSN61rdJQZ8EGxzWB3inWocBHV9+u/CQ=="], + + "@semantic-release/error": ["@semantic-release/error@3.0.0", "", {}, "sha512-5hiM4Un+tpl4cKw3lV4UgzJj+SmfNIDCLLw0TepzQxz9ZGV5ixnqkzIVF+3tp0ZHgcMKE+VNGHJjEeyFG2dcSw=="], + + "@semantic-release/git": ["@semantic-release/git@10.0.1", "", { "dependencies": { "@semantic-release/error": "^3.0.0", "aggregate-error": "^3.0.0", "debug": "^4.0.0", "dir-glob": "^3.0.0", "execa": "^5.0.0", "lodash": "^4.17.4", "micromatch": "^4.0.0", "p-reduce": "^2.0.0" }, "peerDependencies": { "semantic-release": ">=18.0.0" } }, "sha512-eWrx5KguUcU2wUPaO6sfvZI0wPafUKAMNC18aXY4EnNcrZL86dEmpNVnC9uMpGZkmZJ9EfCVJBQx4pV4EMGT1w=="], + + "@semantic-release/github": ["@semantic-release/github@12.0.6", "", { "dependencies": { "@octokit/core": "^7.0.0", "@octokit/plugin-paginate-rest": "^14.0.0", "@octokit/plugin-retry": "^8.0.0", "@octokit/plugin-throttling": "^11.0.0", "@semantic-release/error": "^4.0.0", "aggregate-error": "^5.0.0", "debug": "^4.3.4", "dir-glob": "^3.0.1", "http-proxy-agent": "^7.0.0", "https-proxy-agent": "^7.0.0", "issue-parser": "^7.0.0", "lodash-es": "^4.17.21", "mime": "^4.0.0", "p-filter": "^4.0.0", "tinyglobby": "^0.2.14", "undici": "^7.0.0", "url-join": "^5.0.0" }, "peerDependencies": { "semantic-release": ">=24.1.0" } }, "sha512-aYYFkwHW3c6YtHwQF0t0+lAjlU+87NFOZuH2CvWFD0Ylivc7MwhZMiHOJ0FMpIgPpCVib/VUAcOwvrW0KnxQtA=="], + + "@semantic-release/npm": ["@semantic-release/npm@13.1.5", "", { "dependencies": { "@actions/core": "^3.0.0", "@semantic-release/error": "^4.0.0", "aggregate-error": "^5.0.0", "env-ci": "^11.2.0", "execa": "^9.0.0", "fs-extra": "^11.0.0", "lodash-es": "^4.17.21", "nerf-dart": "^1.0.0", "normalize-url": "^9.0.0", "npm": "^11.6.2", "rc": "^1.2.8", "read-pkg": "^10.0.0", "registry-auth-token": "^5.0.0", "semver": "^7.1.2", "tempy": "^3.0.0" }, "peerDependencies": { "semantic-release": ">=20.1.0" } }, "sha512-Hq5UxzoatN3LHiq2rTsWS54nCdqJHlsssGERCo8WlvdfFA9LoN0vO+OuKVSjtNapIc/S8C2LBj206wKLHg62mg=="], + + "@semantic-release/release-notes-generator": ["@semantic-release/release-notes-generator@14.1.0", "", { "dependencies": { "conventional-changelog-angular": "^8.0.0", "conventional-changelog-writer": "^8.0.0", "conventional-commits-filter": "^5.0.0", "conventional-commits-parser": "^6.0.0", "debug": "^4.0.0", "get-stream": "^7.0.0", "import-from-esm": "^2.0.0", "into-stream": "^7.0.0", "lodash-es": "^4.17.21", "read-package-up": "^11.0.0" }, "peerDependencies": { "semantic-release": ">=20.1.0" } }, "sha512-CcyDRk7xq+ON/20YNR+1I/jP7BYKICr1uKd1HHpROSnnTdGqOTburi4jcRiTYz0cpfhxSloQO3cGhnoot7IEkA=="], + + "@simple-libs/stream-utils": ["@simple-libs/stream-utils@1.2.0", "", {}, "sha512-KxXvfapcixpz6rVEB6HPjOUZT22yN6v0vI0urQSk1L8MlEWPDFCZkhw2xmkyoTGYeFw7tWTZd7e3lVzRZRN/EA=="], + + "@sindresorhus/is": ["@sindresorhus/is@4.6.0", "", {}, "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw=="], + + "@sindresorhus/merge-streams": ["@sindresorhus/merge-streams@4.0.0", "", {}, "sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ=="], + "@types/node": ["@types/node@25.0.9", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-/rpCXHlCWeqClNBwUhDcusJxXYDjZTyE8v5oTO7WbL8eij2nKhUeU89/6xgjU7N4/Vh3He0BtyhJdQbDyhiXAw=="], + "@types/normalize-package-data": ["@types/normalize-package-data@2.4.4", "", {}, "sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA=="], + + "agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], + + "aggregate-error": ["aggregate-error@3.1.0", "", { "dependencies": { "clean-stack": "^2.0.0", "indent-string": "^4.0.0" } }, "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA=="], + + "ansi-escapes": ["ansi-escapes@7.3.0", "", { "dependencies": { "environment": "^1.0.0" } }, "sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg=="], + + "ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], + + "ansi-styles": ["ansi-styles@3.2.1", "", { "dependencies": { "color-convert": "^1.9.0" } }, "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA=="], + + "any-promise": ["any-promise@1.3.0", "", {}, "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A=="], + "argparse": ["argparse@2.0.1", "", {}, "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="], + "argv-formatter": ["argv-formatter@1.0.0", "", {}, "sha512-F2+Hkm9xFaRg+GkaNnbwXNDV5O6pnCFEmqyhvfC/Ic5LbgOWjJh3L+mN/s91rxVL3znE7DYVpW0GJFT+4YBgWw=="], + + "array-ify": ["array-ify@1.0.0", "", {}, "sha512-c5AMf34bKdvPhQ7tBGhqkgKNUzMr4WUs+WDtC2ZUGOUncbxKMTvqxYctiseW3+L4bA8ec+GcZ6/A/FW4m8ukng=="], + + "before-after-hook": ["before-after-hook@4.0.0", "", {}, "sha512-q6tR3RPqIB1pMiTRMFcZwuG5T8vwp+vUvEG0vuI6B+Rikh5BfPp2fQ82c925FOs+b0lcFQ8CFrL+KbilfZFhOQ=="], + + "bottleneck": ["bottleneck@2.19.5", "", {}, "sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw=="], + + "braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="], + "bun-types": ["bun-types@1.3.6", "", { "dependencies": { "@types/node": "*" } }, "sha512-OlFwHcnNV99r//9v5IIOgQ9Uk37gZqrNMCcqEaExdkVq3Avwqok1bJFmvGMCkCE0FqzdY8VMOZpfpR3lwI+CsQ=="], + "callsites": ["callsites@3.1.0", "", {}, "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ=="], + + "chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], + + "char-regex": ["char-regex@1.0.2", "", {}, "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw=="], + "citty": ["citty@0.1.6", "", { "dependencies": { "consola": "^3.2.3" } }, "sha512-tskPPKEs8D2KPafUypv2gxwJP8h/OaJmC82QQGGDQcHvXX43xF2VDACcJVmZ0EuSxkpO9Kc4MlrA3q0+FG58AQ=="], + "clean-stack": ["clean-stack@2.2.0", "", {}, "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A=="], + + "cli-highlight": ["cli-highlight@2.1.11", "", { "dependencies": { "chalk": "^4.0.0", "highlight.js": "^10.7.1", "mz": "^2.4.0", "parse5": "^5.1.1", "parse5-htmlparser2-tree-adapter": "^6.0.0", "yargs": "^16.0.0" }, "bin": { "highlight": "bin/highlight" } }, "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg=="], + + "cli-table3": ["cli-table3@0.6.5", "", { "dependencies": { "string-width": "^4.2.0" }, "optionalDependencies": { "@colors/colors": "1.5.0" } }, "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ=="], + + "cliui": ["cliui@9.0.1", "", { "dependencies": { "string-width": "^7.2.0", "strip-ansi": "^7.1.0", "wrap-ansi": "^9.0.0" } }, "sha512-k7ndgKhwoQveBL+/1tqGJYNz097I7WOvwbmmU2AR5+magtbjPWQTS1C5vzGkBC8Ym8UWRzfKUzUUqFLypY4Q+w=="], + + "color-convert": ["color-convert@1.9.3", "", { "dependencies": { "color-name": "1.1.3" } }, "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg=="], + + "color-name": ["color-name@1.1.3", "", {}, "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw=="], + + "compare-func": ["compare-func@2.0.0", "", { "dependencies": { "array-ify": "^1.0.0", "dot-prop": "^5.1.0" } }, "sha512-zHig5N+tPWARooBnb0Zx1MFcdfpyJrfTJ3Y5L+IFvUm8rM74hHz66z0gw0x4tijh5CorKkKUCnW82R2vmpeCRA=="], + + "config-chain": ["config-chain@1.1.13", "", { "dependencies": { "ini": "^1.3.4", "proto-list": "~1.2.1" } }, "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ=="], + "consola": ["consola@3.4.2", "", {}, "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA=="], + "conventional-changelog-angular": ["conventional-changelog-angular@8.2.0", "", { "dependencies": { "compare-func": "^2.0.0" } }, "sha512-4YB1zEXqB17oBI8yRsAs1T+ZhbdsOgJqkl6Trz+GXt/eKf1e4jnA0oW+sOd9BEENzEViuNW0DNoFFjSf3CeC5Q=="], + + "conventional-changelog-writer": ["conventional-changelog-writer@8.3.0", "", { "dependencies": { "@simple-libs/stream-utils": "^1.2.0", "conventional-commits-filter": "^5.0.0", "handlebars": "^4.7.7", "meow": "^13.0.0", "semver": "^7.5.2" }, "bin": { "conventional-changelog-writer": "dist/cli/index.js" } }, "sha512-l5hDOHjcTUVtnZJapoqXMCJ3IbyF6oV/vnxKL13AHulFH7mDp4PMJARxI7LWzob6UDDvhxIUWGTNUPW84JabQg=="], + + "conventional-commits-filter": ["conventional-commits-filter@5.0.0", "", {}, "sha512-tQMagCOC59EVgNZcC5zl7XqO30Wki9i9J3acbUvkaosCT6JX3EeFwJD7Qqp4MCikRnzS18WXV3BLIQ66ytu6+Q=="], + + "conventional-commits-parser": ["conventional-commits-parser@6.3.0", "", { "dependencies": { "@simple-libs/stream-utils": "^1.2.0", "meow": "^13.0.0" }, "bin": { "conventional-commits-parser": "dist/cli/index.js" } }, "sha512-RfOq/Cqy9xV9bOA8N+ZH6DlrDR+5S3Mi0B5kACEjESpE+AviIpAptx9a9cFpWCCvgRtWT+0BbUw+e1BZfts9jg=="], + + "convert-hrtime": ["convert-hrtime@5.0.0", "", {}, "sha512-lOETlkIeYSJWcbbcvjRKGxVMXJR+8+OQb/mTPbA4ObPMytYIsUbuOE0Jzy60hjARYszq1id0j8KgVhC+WGZVTg=="], + + "core-util-is": ["core-util-is@1.0.3", "", {}, "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ=="], + + "cosmiconfig": ["cosmiconfig@9.0.1", "", { "dependencies": { "env-paths": "^2.2.1", "import-fresh": "^3.3.0", "js-yaml": "^4.1.0", "parse-json": "^5.2.0" }, "peerDependencies": { "typescript": ">=4.9.5" }, "optionalPeers": ["typescript"] }, "sha512-hr4ihw+DBqcvrsEDioRO31Z17x71pUYoNe/4h6Z0wB72p7MU7/9gH8Q3s12NFhHPfYBBOV3qyfUxmr/Yn3shnQ=="], + + "cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="], + + "crypto-random-string": ["crypto-random-string@4.0.0", "", { "dependencies": { "type-fest": "^1.0.1" } }, "sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA=="], + + "debug": ["debug@4.4.3", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA=="], + + "deep-extend": ["deep-extend@0.6.0", "", {}, "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA=="], + + "dir-glob": ["dir-glob@3.0.1", "", { "dependencies": { "path-type": "^4.0.0" } }, "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA=="], + + "dot-prop": ["dot-prop@5.3.0", "", { "dependencies": { "is-obj": "^2.0.0" } }, "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q=="], + + "duplexer2": ["duplexer2@0.1.4", "", { "dependencies": { "readable-stream": "^2.0.2" } }, "sha512-asLFVfWWtJ90ZyOUHMqk7/S2w2guQKxUI2itj3d92ADHhxUSbCMGi1f1cBcJ7xM1To+pE/Khbwo1yuNbMEPKeA=="], + + "emoji-regex": ["emoji-regex@10.6.0", "", {}, "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A=="], + + "emojilib": ["emojilib@2.4.0", "", {}, "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw=="], + + "env-ci": ["env-ci@11.2.0", "", { "dependencies": { "execa": "^8.0.0", "java-properties": "^1.0.2" } }, "sha512-D5kWfzkmaOQDioPmiviWAVtKmpPT4/iJmMVQxWxMPJTFyTkdc5JQUfc5iXEeWxcOdsYTKSAiA/Age4NUOqKsRA=="], + + "env-paths": ["env-paths@2.2.1", "", {}, "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A=="], + + "environment": ["environment@1.1.0", "", {}, "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q=="], + + "error-ex": ["error-ex@1.3.4", "", { "dependencies": { "is-arrayish": "^0.2.1" } }, "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ=="], + + "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], + + "escape-string-regexp": ["escape-string-regexp@5.0.0", "", {}, "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="], + + "execa": ["execa@5.1.1", "", { "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.0", "human-signals": "^2.1.0", "is-stream": "^2.0.0", "merge-stream": "^2.0.0", "npm-run-path": "^4.0.1", "onetime": "^5.1.2", "signal-exit": "^3.0.3", "strip-final-newline": "^2.0.0" } }, "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg=="], + + "fast-content-type-parse": ["fast-content-type-parse@3.0.0", "", {}, "sha512-ZvLdcY8P+N8mGQJahJV5G4U88CSvT1rP8ApL6uETe88MBXrBHAkZlSEySdUlyztF7ccb+Znos3TFqaepHxdhBg=="], + + "fdir": ["fdir@6.5.0", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg=="], + + "figures": ["figures@6.1.0", "", { "dependencies": { "is-unicode-supported": "^2.0.0" } }, "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg=="], + + "fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="], + + "find-up": ["find-up@2.1.0", "", { "dependencies": { "locate-path": "^2.0.0" } }, "sha512-NWzkk0jSJtTt08+FBFMvXoeZnOJD+jTtsRmBYbAIzJdX6l7dLgR7CTubCM5/eDdPUBvLCeVasP1brfVR/9/EZQ=="], + + "find-up-simple": ["find-up-simple@1.0.1", "", {}, "sha512-afd4O7zpqHeRyg4PfDQsXmlDe2PfdHtJt6Akt8jOWaApLOZk5JXs6VMR29lz03pRe9mpykrRCYIYxaJYcfpncQ=="], + + "find-versions": ["find-versions@6.0.0", "", { "dependencies": { "semver-regex": "^4.0.5", "super-regex": "^1.0.0" } }, "sha512-2kCCtc+JvcZ86IGAz3Z2Y0A1baIz9fL31pH/0S1IqZr9Iwnjq8izfPtrCyQKO6TLMPELLsQMre7VDqeIKCsHkA=="], + + "from2": ["from2@2.3.0", "", { "dependencies": { "inherits": "^2.0.1", "readable-stream": "^2.0.0" } }, "sha512-OMcX/4IC/uqEPVgGeyfN22LJk6AZrMkRZHxcHBMBvHScDGgwTm2GT2Wkgtocyd3JfZffjj2kYUDXXII0Fk9W0g=="], + + "fs-extra": ["fs-extra@11.3.3", "", { "dependencies": { "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", "universalify": "^2.0.0" } }, "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg=="], + + "function-timeout": ["function-timeout@1.0.2", "", {}, "sha512-939eZS4gJ3htTHAldmyyuzlrD58P03fHG49v2JfFXbV6OhvZKRC9j2yAtdHw/zrp2zXHuv05zMIy40F0ge7spA=="], + + "get-caller-file": ["get-caller-file@2.0.5", "", {}, "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="], + + "get-east-asian-width": ["get-east-asian-width@1.5.0", "", {}, "sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA=="], + + "get-stream": ["get-stream@6.0.1", "", {}, "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg=="], + + "git-log-parser": ["git-log-parser@1.2.1", "", { "dependencies": { "argv-formatter": "~1.0.0", "spawn-error-forwarder": "~1.0.0", "split2": "~1.0.0", "stream-combiner2": "~1.1.1", "through2": "~2.0.0", "traverse": "0.6.8" } }, "sha512-PI+sPDvHXNPl5WNOErAK05s3j0lgwUzMN6o8cyQrDaKfT3qd7TmNJKeXX+SknI5I0QhG5fVPAEwSY4tRGDtYoQ=="], + + "graceful-fs": ["graceful-fs@4.2.11", "", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="], + + "handlebars": ["handlebars@4.7.8", "", { "dependencies": { "minimist": "^1.2.5", "neo-async": "^2.6.2", "source-map": "^0.6.1", "wordwrap": "^1.0.0" }, "optionalDependencies": { "uglify-js": "^3.1.4" }, "bin": { "handlebars": "bin/handlebars" } }, "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ=="], + + "has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="], + + "highlight.js": ["highlight.js@10.7.3", "", {}, "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A=="], + + "hook-std": ["hook-std@4.0.0", "", {}, "sha512-IHI4bEVOt3vRUDJ+bFA9VUJlo7SzvFARPNLw75pqSmAOP2HmTWfFJtPvLBrDrlgjEYXY9zs7SFdHPQaJShkSCQ=="], + + "hosted-git-info": ["hosted-git-info@9.0.2", "", { "dependencies": { "lru-cache": "^11.1.0" } }, "sha512-M422h7o/BR3rmCQ8UHi7cyyMqKltdP9Uo+J2fXK+RSAY+wTcKOIRyhTuKv4qn+DJf3g+PL890AzId5KZpX+CBg=="], + + "http-proxy-agent": ["http-proxy-agent@7.0.2", "", { "dependencies": { "agent-base": "^7.1.0", "debug": "^4.3.4" } }, "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig=="], + + "https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + + "human-signals": ["human-signals@2.1.0", "", {}, "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw=="], + + "import-fresh": ["import-fresh@3.3.1", "", { "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" } }, "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ=="], + + "import-from-esm": ["import-from-esm@2.0.0", "", { "dependencies": { "debug": "^4.3.4", "import-meta-resolve": "^4.0.0" } }, "sha512-YVt14UZCgsX1vZQ3gKjkWVdBdHQ6eu3MPU1TBgL1H5orXe2+jWD006WCPPtOuwlQm10NuzOW5WawiF1Q9veW8g=="], + + "import-meta-resolve": ["import-meta-resolve@4.2.0", "", {}, "sha512-Iqv2fzaTQN28s/FwZAoFq0ZSs/7hMAHJVX+w8PZl3cY19Pxk6jFFalxQoIfW2826i/fDLXv8IiEZRIT0lDuWcg=="], + + "indent-string": ["indent-string@4.0.0", "", {}, "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg=="], + + "index-to-position": ["index-to-position@1.2.0", "", {}, "sha512-Yg7+ztRkqslMAS2iFaU+Oa4KTSidr63OsFGlOrJoW981kIYO3CGCS3wA95P1mUi/IVSJkn0D479KTJpVpvFNuw=="], + + "inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="], + + "ini": ["ini@1.3.8", "", {}, "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="], + + "into-stream": ["into-stream@7.0.0", "", { "dependencies": { "from2": "^2.3.0", "p-is-promise": "^3.0.0" } }, "sha512-2dYz766i9HprMBasCMvHMuazJ7u4WzhJwo5kb3iPSiW/iRYV6uPari3zHoqZlnuaR7V1bEiNMxikhp37rdBXbw=="], + + "is-arrayish": ["is-arrayish@0.2.1", "", {}, "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg=="], + + "is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], + + "is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="], + + "is-obj": ["is-obj@2.0.0", "", {}, "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w=="], + + "is-plain-obj": ["is-plain-obj@4.1.0", "", {}, "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg=="], + + "is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="], + + "is-unicode-supported": ["is-unicode-supported@2.1.0", "", {}, "sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ=="], + + "isarray": ["isarray@1.0.0", "", {}, "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ=="], + + "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], + + "issue-parser": ["issue-parser@7.0.1", "", { "dependencies": { "lodash.capitalize": "^4.2.1", "lodash.escaperegexp": "^4.1.2", "lodash.isplainobject": "^4.0.6", "lodash.isstring": "^4.0.1", "lodash.uniqby": "^4.7.0" } }, "sha512-3YZcUUR2Wt1WsapF+S/WiA2WmlW0cWAoPccMqne7AxEBhCdFeTPjfv/Axb8V2gyCgY3nRw+ksZ3xSUX+R47iAg=="], + + "java-properties": ["java-properties@1.0.2", "", {}, "sha512-qjdpeo2yKlYTH7nFdK0vbZWuTCesk4o63v5iVOlhMQPfuIZQfW/HI35SjfhA+4qpg36rnFSvUK5b1m+ckIblQQ=="], + + "js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="], + "js-yaml": ["js-yaml@4.1.1", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA=="], + "json-parse-better-errors": ["json-parse-better-errors@1.0.2", "", {}, "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw=="], + + "json-parse-even-better-errors": ["json-parse-even-better-errors@2.3.1", "", {}, "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w=="], + + "json-with-bigint": ["json-with-bigint@3.5.7", "", {}, "sha512-7ei3MdAI5+fJPVnKlW77TKNKwQ5ppSzWvhPuSuINT/GYW9ZOC1eRKOuhV9yHG5aEsUPj9BBx5JIekkmoLHxZOw=="], + + "jsonfile": ["jsonfile@6.2.0", "", { "dependencies": { "universalify": "^2.0.0" }, "optionalDependencies": { "graceful-fs": "^4.1.6" } }, "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg=="], + + "lines-and-columns": ["lines-and-columns@1.2.4", "", {}, "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg=="], + + "load-json-file": ["load-json-file@4.0.0", "", { "dependencies": { "graceful-fs": "^4.1.2", "parse-json": "^4.0.0", "pify": "^3.0.0", "strip-bom": "^3.0.0" } }, "sha512-Kx8hMakjX03tiGTLAIdJ+lL0htKnXjEZN6hk/tozf/WOuYGdZBJrZ+rCJRbVCugsjB3jMLn9746NsQIf5VjBMw=="], + + "locate-path": ["locate-path@2.0.0", "", { "dependencies": { "p-locate": "^2.0.0", "path-exists": "^3.0.0" } }, "sha512-NCI2kiDkyR7VeEKm27Kda/iQHyKJe1Bu0FlTbYp3CqJu+9IFe9bLyAjMxf5ZDDbEg+iMPzB5zYyUTSm8wVTKmA=="], + + "lodash": ["lodash@4.17.23", "", {}, "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w=="], + + "lodash-es": ["lodash-es@4.17.23", "", {}, "sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg=="], + + "lodash.capitalize": ["lodash.capitalize@4.2.1", "", {}, "sha512-kZzYOKspf8XVX5AvmQF94gQW0lejFVgb80G85bU4ZWzoJ6C03PQg3coYAUpSTpQWelrZELd3XWgHzw4Ck5kaIw=="], + + "lodash.escaperegexp": ["lodash.escaperegexp@4.1.2", "", {}, "sha512-TM9YBvyC84ZxE3rgfefxUWiQKLilstD6k7PTGt6wfbtXF8ixIJLOL3VYyV/z+ZiPLsVxAsKAFVwWlWeb2Y8Yyw=="], + + "lodash.isplainobject": ["lodash.isplainobject@4.0.6", "", {}, "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA=="], + + "lodash.isstring": ["lodash.isstring@4.0.1", "", {}, "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw=="], + + "lodash.uniqby": ["lodash.uniqby@4.7.0", "", {}, "sha512-e/zcLx6CSbmaEgFHCA7BnoQKyCtKMxnuWrJygbwPs/AIn+IMKl66L8/s+wBUn5LRw2pZx3bUHibiV1b6aTWIww=="], + + "lru-cache": ["lru-cache@11.2.6", "", {}, "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ=="], + + "make-asynchronous": ["make-asynchronous@1.1.0", "", { "dependencies": { "p-event": "^6.0.0", "type-fest": "^4.6.0", "web-worker": "^1.5.0" } }, "sha512-ayF7iT+44LXdxJLTrTd3TLQpFDDvPCBxXxbv+pMUSuHA5Q8zyAfwkRP6aHHwNVFBUFWtxAHqwNJxF8vMZLAbVg=="], + + "marked": ["marked@15.0.12", "", { "bin": { "marked": "bin/marked.js" } }, "sha512-8dD6FusOQSrpv9Z1rdNMdlSgQOIP880DHqnohobOmYLElGEqAL/JvxvuxZO16r4HtjTlfPRDC1hbvxC9dPN2nA=="], + + "marked-terminal": ["marked-terminal@7.3.0", "", { "dependencies": { "ansi-escapes": "^7.0.0", "ansi-regex": "^6.1.0", "chalk": "^5.4.1", "cli-highlight": "^2.1.11", "cli-table3": "^0.6.5", "node-emoji": "^2.2.0", "supports-hyperlinks": "^3.1.0" }, "peerDependencies": { "marked": ">=1 <16" } }, "sha512-t4rBvPsHc57uE/2nJOLmMbZCQ4tgAccAED3ngXQqW6g+TxA488JzJ+FK3lQkzBQOI1mRV/r/Kq+1ZlJ4D0owQw=="], + + "meow": ["meow@13.2.0", "", {}, "sha512-pxQJQzB6djGPXh08dacEloMFopsOqGVRKFPYvPOt9XDZ1HasbgDZA74CJGreSU4G3Ak7EFJGoiH2auq+yXISgA=="], + + "merge-stream": ["merge-stream@2.0.0", "", {}, "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w=="], + + "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], + + "mime": ["mime@4.1.0", "", { "bin": { "mime": "bin/cli.js" } }, "sha512-X5ju04+cAzsojXKes0B/S4tcYtFAJ6tTMuSPBEn9CPGlrWr8Fiw7qYeLT0XyH80HSoAoqWCaz+MWKh22P7G1cw=="], + + "mimic-fn": ["mimic-fn@2.1.0", "", {}, "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg=="], + + "minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], + + "ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "mz": ["mz@2.7.0", "", { "dependencies": { "any-promise": "^1.0.0", "object-assign": "^4.0.1", "thenify-all": "^1.0.0" } }, "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q=="], + + "neo-async": ["neo-async@2.6.2", "", {}, "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw=="], + + "nerf-dart": ["nerf-dart@1.0.0", "", {}, "sha512-EZSPZB70jiVsivaBLYDCyntd5eH8NTSMOn3rB+HxwdmKThGELLdYv8qVIMWvZEFy9w8ZZpW9h9OB32l1rGtj7g=="], + + "node-emoji": ["node-emoji@2.2.0", "", { "dependencies": { "@sindresorhus/is": "^4.6.0", "char-regex": "^1.0.2", "emojilib": "^2.4.0", "skin-tone": "^2.0.0" } }, "sha512-Z3lTE9pLaJF47NyMhd4ww1yFTAP8YhYI8SleJiHzM46Fgpm5cnNzSl9XfzFNqbaz+VlJrIj3fXQ4DeN1Rjm6cw=="], + + "normalize-package-data": ["normalize-package-data@8.0.0", "", { "dependencies": { "hosted-git-info": "^9.0.0", "semver": "^7.3.5", "validate-npm-package-license": "^3.0.4" } }, "sha512-RWk+PI433eESQ7ounYxIp67CYuVsS1uYSonX3kA6ps/3LWfjVQa/ptEg6Y3T6uAMq1mWpX9PQ+qx+QaHpsc7gQ=="], + + "normalize-url": ["normalize-url@9.0.0", "", {}, "sha512-z9nC87iaZXXySbWWtTHfCFJyFvKaUAW6lODhikG7ILSbVgmwuFjUqkgnheHvAUcGedO29e2QGBRXMUD64aurqQ=="], + + "npm": ["npm@11.11.0", "", { "dependencies": { "@isaacs/string-locale-compare": "^1.1.0", "@npmcli/arborist": "^9.4.0", "@npmcli/config": "^10.7.1", "@npmcli/fs": "^5.0.0", "@npmcli/map-workspaces": "^5.0.3", "@npmcli/metavuln-calculator": "^9.0.3", "@npmcli/package-json": "^7.0.5", "@npmcli/promise-spawn": "^9.0.1", "@npmcli/redact": "^4.0.0", "@npmcli/run-script": "^10.0.3", "@sigstore/tuf": "^4.0.1", "abbrev": "^4.0.0", "archy": "~1.0.0", "cacache": "^20.0.3", "chalk": "^5.6.2", "ci-info": "^4.4.0", "fastest-levenshtein": "^1.0.16", "fs-minipass": "^3.0.3", "glob": "^13.0.6", "graceful-fs": "^4.2.11", "hosted-git-info": "^9.0.2", "ini": "^6.0.0", "init-package-json": "^8.2.5", "is-cidr": "^6.0.3", "json-parse-even-better-errors": "^5.0.0", "libnpmaccess": "^10.0.3", "libnpmdiff": "^8.1.3", "libnpmexec": "^10.2.3", "libnpmfund": "^7.0.17", "libnpmorg": "^8.0.1", "libnpmpack": "^9.1.3", "libnpmpublish": "^11.1.3", "libnpmsearch": "^9.0.1", "libnpmteam": "^8.0.2", "libnpmversion": "^8.0.3", "make-fetch-happen": "^15.0.4", "minimatch": "^10.2.2", "minipass": "^7.1.3", "minipass-pipeline": "^1.2.4", "ms": "^2.1.2", "node-gyp": "^12.2.0", "nopt": "^9.0.0", "npm-audit-report": "^7.0.0", "npm-install-checks": "^8.0.0", "npm-package-arg": "^13.0.2", "npm-pick-manifest": "^11.0.3", "npm-profile": "^12.0.1", "npm-registry-fetch": "^19.1.1", "npm-user-validate": "^4.0.0", "p-map": "^7.0.4", "pacote": "^21.4.0", "parse-conflict-json": "^5.0.1", "proc-log": "^6.1.0", "qrcode-terminal": "^0.12.0", "read": "^5.0.1", "semver": "^7.7.4", "spdx-expression-parse": "^4.0.0", "ssri": "^13.0.1", "supports-color": "^10.2.2", "tar": "^7.5.9", "text-table": "~0.2.0", "tiny-relative-date": "^2.0.2", "treeverse": "^3.0.0", "validate-npm-package-name": "^7.0.2", "which": "^6.0.1" }, "bin": { "npm": "bin/npm-cli.js", "npx": "bin/npx-cli.js" } }, "sha512-82gRxKrh/eY5UnNorkTFcdBQAGpgjWehkfGVqAGlJjejEtJZGGJUqjo3mbBTNbc5BTnPKGVtGPBZGhElujX5cw=="], + + "npm-run-path": ["npm-run-path@4.0.1", "", { "dependencies": { "path-key": "^3.0.0" } }, "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw=="], + + "object-assign": ["object-assign@4.1.1", "", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="], + + "onetime": ["onetime@5.1.2", "", { "dependencies": { "mimic-fn": "^2.1.0" } }, "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg=="], + + "p-each-series": ["p-each-series@3.0.0", "", {}, "sha512-lastgtAdoH9YaLyDa5i5z64q+kzOcQHsQ5SsZJD3q0VEyI8mq872S3geuNbRUQLVAE9siMfgKrpj7MloKFHruw=="], + + "p-event": ["p-event@6.0.1", "", { "dependencies": { "p-timeout": "^6.1.2" } }, "sha512-Q6Bekk5wpzW5qIyUP4gdMEujObYstZl6DMMOSenwBvV0BlE5LkDwkjs5yHbZmdCEq2o4RJx4tE1vwxFVf2FG1w=="], + + "p-filter": ["p-filter@4.1.0", "", { "dependencies": { "p-map": "^7.0.1" } }, "sha512-37/tPdZ3oJwHaS3gNJdenCDB3Tz26i9sjhnguBtvN0vYlRIiDNnvTWkuh+0hETV9rLPdJ3rlL3yVOYPIAnM8rw=="], + + "p-is-promise": ["p-is-promise@3.0.0", "", {}, "sha512-Wo8VsW4IRQSKVXsJCn7TomUaVtyfjVDn3nUP7kE967BQk0CwFpdbZs0X0uk5sW9mkBa9eNM7hCMaG93WUAwxYQ=="], + + "p-limit": ["p-limit@1.3.0", "", { "dependencies": { "p-try": "^1.0.0" } }, "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q=="], + + "p-locate": ["p-locate@2.0.0", "", { "dependencies": { "p-limit": "^1.1.0" } }, "sha512-nQja7m7gSKuewoVRen45CtVfODR3crN3goVQ0DDZ9N3yHxgpkuBhZqsaiotSQRrADUrne346peY7kT3TSACykg=="], + + "p-map": ["p-map@7.0.4", "", {}, "sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ=="], + + "p-reduce": ["p-reduce@2.1.0", "", {}, "sha512-2USApvnsutq8uoxZBGbbWM0JIYLiEMJ9RlaN7fAzVNb9OZN0SHjjTTfIcb667XynS5Y1VhwDJVDa72TnPzAYWw=="], + + "p-timeout": ["p-timeout@6.1.4", "", {}, "sha512-MyIV3ZA/PmyBN/ud8vV9XzwTrNtR4jFrObymZYnZqMmW0zA8Z17vnT0rBgFE/TlohB+YCHqXMgZzb3Csp49vqg=="], + + "p-try": ["p-try@1.0.0", "", {}, "sha512-U1etNYuMJoIz3ZXSrrySFjsXQTWOx2/jdi86L+2pRvph/qMKL6sbcCYdH23fqsbm8TH2Gn0OybpT4eSFlCVHww=="], + + "parent-module": ["parent-module@1.0.1", "", { "dependencies": { "callsites": "^3.0.0" } }, "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g=="], + + "parse-json": ["parse-json@5.2.0", "", { "dependencies": { "@babel/code-frame": "^7.0.0", "error-ex": "^1.3.1", "json-parse-even-better-errors": "^2.3.0", "lines-and-columns": "^1.1.6" } }, "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg=="], + + "parse-ms": ["parse-ms@4.0.0", "", {}, "sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw=="], + + "parse5": ["parse5@5.1.1", "", {}, "sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug=="], + + "parse5-htmlparser2-tree-adapter": ["parse5-htmlparser2-tree-adapter@6.0.1", "", { "dependencies": { "parse5": "^6.0.1" } }, "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA=="], + + "path-exists": ["path-exists@3.0.0", "", {}, "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ=="], + + "path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="], + + "path-type": ["path-type@4.0.0", "", {}, "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw=="], + + "picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="], + + "picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], + + "pify": ["pify@3.0.0", "", {}, "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg=="], + + "pkg-conf": ["pkg-conf@2.1.0", "", { "dependencies": { "find-up": "^2.0.0", "load-json-file": "^4.0.0" } }, "sha512-C+VUP+8jis7EsQZIhDYmS5qlNtjv2yP4SNtjXK9AP1ZcTRlnSfuumaTnRfYZnYgUUYVIKqL0fRvmUGDV2fmp6g=="], + + "pretty-ms": ["pretty-ms@9.3.0", "", { "dependencies": { "parse-ms": "^4.0.0" } }, "sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ=="], + + "process-nextick-args": ["process-nextick-args@2.0.1", "", {}, "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="], + + "proto-list": ["proto-list@1.2.4", "", {}, "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA=="], + + "rc": ["rc@1.2.8", "", { "dependencies": { "deep-extend": "^0.6.0", "ini": "~1.3.0", "minimist": "^1.2.0", "strip-json-comments": "~2.0.1" }, "bin": { "rc": "./cli.js" } }, "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw=="], + + "read-package-up": ["read-package-up@12.0.0", "", { "dependencies": { "find-up-simple": "^1.0.1", "read-pkg": "^10.0.0", "type-fest": "^5.2.0" } }, "sha512-Q5hMVBYur/eQNWDdbF4/Wqqr9Bjvtrw2kjGxxBbKLbx8bVCL8gcArjTy8zDUuLGQicftpMuU0riQNcAsbtOVsw=="], + + "read-pkg": ["read-pkg@10.1.0", "", { "dependencies": { "@types/normalize-package-data": "^2.4.4", "normalize-package-data": "^8.0.0", "parse-json": "^8.3.0", "type-fest": "^5.4.4", "unicorn-magic": "^0.4.0" } }, "sha512-I8g2lArQiP78ll51UeMZojewtYgIRCKCWqZEgOO8c/uefTI+XDXvCSXu3+YNUaTNvZzobrL5+SqHjBrByRRTdg=="], + + "readable-stream": ["readable-stream@2.3.8", "", { "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA=="], + + "registry-auth-token": ["registry-auth-token@5.1.1", "", { "dependencies": { "@pnpm/npm-conf": "^3.0.2" } }, "sha512-P7B4+jq8DeD2nMsAcdfaqHbssgHtZ7Z5+++a5ask90fvmJ8p5je4mOa+wzu+DB4vQ5tdJV/xywY+UnVFeQLV5Q=="], + + "require-directory": ["require-directory@2.1.1", "", {}, "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="], + + "resolve-from": ["resolve-from@5.0.0", "", {}, "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw=="], + + "safe-buffer": ["safe-buffer@5.1.2", "", {}, "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="], + + "semantic-release": ["semantic-release@25.0.3", "", { "dependencies": { "@semantic-release/commit-analyzer": "^13.0.1", "@semantic-release/error": "^4.0.0", "@semantic-release/github": "^12.0.0", "@semantic-release/npm": "^13.1.1", "@semantic-release/release-notes-generator": "^14.1.0", "aggregate-error": "^5.0.0", "cosmiconfig": "^9.0.0", "debug": "^4.0.0", "env-ci": "^11.0.0", "execa": "^9.0.0", "figures": "^6.0.0", "find-versions": "^6.0.0", "get-stream": "^6.0.0", "git-log-parser": "^1.2.0", "hook-std": "^4.0.0", "hosted-git-info": "^9.0.0", "import-from-esm": "^2.0.0", "lodash-es": "^4.17.21", "marked": "^15.0.0", "marked-terminal": "^7.3.0", "micromatch": "^4.0.2", "p-each-series": "^3.0.0", "p-reduce": "^3.0.0", "read-package-up": "^12.0.0", "resolve-from": "^5.0.0", "semver": "^7.3.2", "signale": "^1.2.1", "yargs": "^18.0.0" }, "bin": { "semantic-release": "bin/semantic-release.js" } }, "sha512-WRgl5GcypwramYX4HV+eQGzUbD7UUbljVmS+5G1uMwX/wLgYuJAxGeerXJDMO2xshng4+FXqCgyB5QfClV6WjA=="], + + "semver": ["semver@7.7.4", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA=="], + + "semver-regex": ["semver-regex@4.0.5", "", {}, "sha512-hunMQrEy1T6Jr2uEVjrAIqjwWcQTgOAcIM52C8MY1EZSD3DDNft04XzvYKPqjED65bNVVko0YI38nYeEHCX3yw=="], + + "shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="], + + "shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="], + + "signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], + + "signale": ["signale@1.4.0", "", { "dependencies": { "chalk": "^2.3.2", "figures": "^2.0.0", "pkg-conf": "^2.1.0" } }, "sha512-iuh+gPf28RkltuJC7W5MRi6XAjTDCAPC/prJUpQoG4vIP3MJZ+GTydVnodXA7pwvTKb2cA0m9OFZW/cdWy/I/w=="], + + "skin-tone": ["skin-tone@2.0.0", "", { "dependencies": { "unicode-emoji-modifier-base": "^1.0.0" } }, "sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA=="], + + "source-map": ["source-map@0.6.1", "", {}, "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="], + + "spawn-error-forwarder": ["spawn-error-forwarder@1.0.0", "", {}, "sha512-gRjMgK5uFjbCvdibeGJuy3I5OYz6VLoVdsOJdA6wV0WlfQVLFueoqMxwwYD9RODdgb6oUIvlRlsyFSiQkMKu0g=="], + + "spdx-correct": ["spdx-correct@3.2.0", "", { "dependencies": { "spdx-expression-parse": "^3.0.0", "spdx-license-ids": "^3.0.0" } }, "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA=="], + + "spdx-exceptions": ["spdx-exceptions@2.5.0", "", {}, "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w=="], + + "spdx-expression-parse": ["spdx-expression-parse@3.0.1", "", { "dependencies": { "spdx-exceptions": "^2.1.0", "spdx-license-ids": "^3.0.0" } }, "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q=="], + + "spdx-license-ids": ["spdx-license-ids@3.0.23", "", {}, "sha512-CWLcCCH7VLu13TgOH+r8p1O/Znwhqv/dbb6lqWy67G+pT1kHmeD/+V36AVb/vq8QMIQwVShJ6Ssl5FPh0fuSdw=="], + + "split2": ["split2@1.0.0", "", { "dependencies": { "through2": "~2.0.0" } }, "sha512-NKywug4u4pX/AZBB1FCPzZ6/7O+Xhz1qMVbzTvvKvikjO99oPN87SkK08mEY9P63/5lWjK+wgOOgApnTg5r6qg=="], + + "stream-combiner2": ["stream-combiner2@1.1.1", "", { "dependencies": { "duplexer2": "~0.1.0", "readable-stream": "^2.0.2" } }, "sha512-3PnJbYgS56AeWgtKF5jtJRT6uFJe56Z0Hc5Ngg/6sI6rIt8iiMBTa9cvdyFfpMQjaVHr8dusbNeFGIIonxOvKw=="], + + "string-width": ["string-width@7.2.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="], + + "string_decoder": ["string_decoder@1.1.1", "", { "dependencies": { "safe-buffer": "~5.1.0" } }, "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg=="], + + "strip-ansi": ["strip-ansi@7.2.0", "", { "dependencies": { "ansi-regex": "^6.2.2" } }, "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w=="], + + "strip-bom": ["strip-bom@3.0.0", "", {}, "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA=="], + + "strip-final-newline": ["strip-final-newline@2.0.0", "", {}, "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA=="], + + "strip-json-comments": ["strip-json-comments@2.0.1", "", {}, "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ=="], + + "super-regex": ["super-regex@1.1.0", "", { "dependencies": { "function-timeout": "^1.0.1", "make-asynchronous": "^1.0.1", "time-span": "^5.1.0" } }, "sha512-WHkws2ZflZe41zj6AolvvmaTrWds/VuyeYr9iPVv/oQeaIoVxMKaushfFWpOGDT+GuBrM/sVqF8KUCYQlSSTdQ=="], + + "supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], + + "supports-hyperlinks": ["supports-hyperlinks@3.2.0", "", { "dependencies": { "has-flag": "^4.0.0", "supports-color": "^7.0.0" } }, "sha512-zFObLMyZeEwzAoKCyu1B91U79K2t7ApXuQfo8OuxwXLDgcKxuwM+YvcbIhm6QWqz7mHUH1TVytR1PwVVjEuMig=="], + + "tagged-tag": ["tagged-tag@1.0.0", "", {}, "sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng=="], + + "temp-dir": ["temp-dir@3.0.0", "", {}, "sha512-nHc6S/bwIilKHNRgK/3jlhDoIHcp45YgyiwcAk46Tr0LfEqGBVpmiAyuiuxeVE44m3mXnEeVhaipLOEWmH+Njw=="], + + "tempy": ["tempy@3.2.0", "", { "dependencies": { "is-stream": "^3.0.0", "temp-dir": "^3.0.0", "type-fest": "^2.12.2", "unique-string": "^3.0.0" } }, "sha512-d79HhZya5Djd7am0q+W4RTsSU+D/aJzM+4Y4AGJGuGlgM2L6sx5ZvOYTmZjqPhrDrV6xJTtRSm1JCLj6V6LHLQ=="], + + "thenify": ["thenify@3.3.1", "", { "dependencies": { "any-promise": "^1.0.0" } }, "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw=="], + + "thenify-all": ["thenify-all@1.6.0", "", { "dependencies": { "thenify": ">= 3.1.0 < 4" } }, "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA=="], + + "through2": ["through2@2.0.5", "", { "dependencies": { "readable-stream": "~2.3.6", "xtend": "~4.0.1" } }, "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ=="], + + "time-span": ["time-span@5.1.0", "", { "dependencies": { "convert-hrtime": "^5.0.0" } }, "sha512-75voc/9G4rDIJleOo4jPvN4/YC4GRZrY8yy1uU4lwrB3XEQbWve8zXoO5No4eFrGcTAMYyoY67p8jRQdtA1HbA=="], + + "tinyglobby": ["tinyglobby@0.2.15", "", { "dependencies": { "fdir": "^6.5.0", "picomatch": "^4.0.3" } }, "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ=="], + + "to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="], + + "traverse": ["traverse@0.6.8", "", {}, "sha512-aXJDbk6SnumuaZSANd21XAo15ucCDE38H4fkqiGsc3MhCK+wOlZvLP9cB/TvpHT0mOyWgC4Z8EwRlzqYSUzdsA=="], + + "tunnel": ["tunnel@0.0.6", "", {}, "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg=="], + + "type-fest": ["type-fest@5.4.4", "", { "dependencies": { "tagged-tag": "^1.0.0" } }, "sha512-JnTrzGu+zPV3aXIUhnyWJj4z/wigMsdYajGLIYakqyOW1nPllzXEJee0QQbHj+CTIQtXGlAjuK0UY+2xTyjVAw=="], + + "uglify-js": ["uglify-js@3.19.3", "", { "bin": { "uglifyjs": "bin/uglifyjs" } }, "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ=="], + + "undici": ["undici@7.22.0", "", {}, "sha512-RqslV2Us5BrllB+JeiZnK4peryVTndy9Dnqq62S3yYRRTj0tFQCwEniUy2167skdGOy3vqRzEvl1Dm4sV2ReDg=="], + "undici-types": ["undici-types@7.16.0", "", {}, "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw=="], + + "unicode-emoji-modifier-base": ["unicode-emoji-modifier-base@1.0.0", "", {}, "sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g=="], + + "unicorn-magic": ["unicorn-magic@0.4.0", "", {}, "sha512-wH590V9VNgYH9g3lH9wWjTrUoKsjLF6sGLjhR4sH1LWpLmCOH0Zf7PukhDA8BiS7KHe4oPNkcTHqYkj7SOGUOw=="], + + "unique-string": ["unique-string@3.0.0", "", { "dependencies": { "crypto-random-string": "^4.0.0" } }, "sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ=="], + + "universal-user-agent": ["universal-user-agent@7.0.3", "", {}, "sha512-TmnEAEAsBJVZM/AADELsK76llnwcf9vMKuPz8JflO1frO8Lchitr0fNaN9d+Ap0BjKtqWqd/J17qeDnXh8CL2A=="], + + "universalify": ["universalify@2.0.1", "", {}, "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw=="], + + "url-join": ["url-join@5.0.0", "", {}, "sha512-n2huDr9h9yzd6exQVnH/jU5mr+Pfx08LRXXZhkLLetAMESRj+anQsTAh940iMrIetKAmry9coFuZQ2jY8/p3WA=="], + + "util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="], + + "validate-npm-package-license": ["validate-npm-package-license@3.0.4", "", { "dependencies": { "spdx-correct": "^3.0.0", "spdx-expression-parse": "^3.0.0" } }, "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew=="], + + "web-worker": ["web-worker@1.5.0", "", {}, "sha512-RiMReJrTAiA+mBjGONMnjVDP2u3p9R1vkcGz6gDIrOMT3oGuYwX2WRMYI9ipkphSuE5XKEhydbhNEJh4NY9mlw=="], + + "which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="], + + "wordwrap": ["wordwrap@1.0.0", "", {}, "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q=="], + + "wrap-ansi": ["wrap-ansi@9.0.2", "", { "dependencies": { "ansi-styles": "^6.2.1", "string-width": "^7.0.0", "strip-ansi": "^7.1.0" } }, "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww=="], + + "xtend": ["xtend@4.0.2", "", {}, "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="], + + "y18n": ["y18n@5.0.8", "", {}, "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA=="], + + "yargs": ["yargs@18.0.0", "", { "dependencies": { "cliui": "^9.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "string-width": "^7.2.0", "y18n": "^5.0.5", "yargs-parser": "^22.0.0" } }, "sha512-4UEqdc2RYGHZc7Doyqkrqiln3p9X2DZVxaGbwhn2pi7MrRagKaOcIKe8L3OxYcbhXLgLFUS3zAYuQjKBQgmuNg=="], + + "yargs-parser": ["yargs-parser@22.0.0", "", {}, "sha512-rwu/ClNdSMpkSrUb+d6BRsSkLUq1fmfsY6TOpYzTwvwkg1/NRG85KBy3kq++A8LKQwX6lsu+aWad+2khvuXrqw=="], + + "yoctocolors": ["yoctocolors@2.1.2", "", {}, "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug=="], + + "@actions/http-client/undici": ["undici@6.23.0", "", {}, "sha512-VfQPToRA5FZs/qJxLIinmU59u0r7LXqoJkCzinq3ckNJp3vKEh7jTWN589YQ5+aoAC/TGRLyJLCPKcLQbM8r9g=="], + + "@pnpm/network.ca-file/graceful-fs": ["graceful-fs@4.2.10", "", {}, "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA=="], + + "@semantic-release/github/@semantic-release/error": ["@semantic-release/error@4.0.0", "", {}, "sha512-mgdxrHTLOjOddRVYIYDo0fR3/v61GNN1YGkfbrjuIKg/uMgCd+Qzo3UAXJ+woLQQpos4pl5Esuw5A7AoNlzjUQ=="], + + "@semantic-release/github/aggregate-error": ["aggregate-error@5.0.0", "", { "dependencies": { "clean-stack": "^5.2.0", "indent-string": "^5.0.0" } }, "sha512-gOsf2YwSlleG6IjRYG2A7k0HmBMEo6qVNk9Bp/EaLgAJT5ngH6PXbqa4ItvnEwCm/velL5jAnQgsHsWnjhGmvw=="], + + "@semantic-release/npm/@semantic-release/error": ["@semantic-release/error@4.0.0", "", {}, "sha512-mgdxrHTLOjOddRVYIYDo0fR3/v61GNN1YGkfbrjuIKg/uMgCd+Qzo3UAXJ+woLQQpos4pl5Esuw5A7AoNlzjUQ=="], + + "@semantic-release/npm/aggregate-error": ["aggregate-error@5.0.0", "", { "dependencies": { "clean-stack": "^5.2.0", "indent-string": "^5.0.0" } }, "sha512-gOsf2YwSlleG6IjRYG2A7k0HmBMEo6qVNk9Bp/EaLgAJT5ngH6PXbqa4ItvnEwCm/velL5jAnQgsHsWnjhGmvw=="], + + "@semantic-release/npm/execa": ["execa@9.6.1", "", { "dependencies": { "@sindresorhus/merge-streams": "^4.0.0", "cross-spawn": "^7.0.6", "figures": "^6.1.0", "get-stream": "^9.0.0", "human-signals": "^8.0.1", "is-plain-obj": "^4.1.0", "is-stream": "^4.0.1", "npm-run-path": "^6.0.0", "pretty-ms": "^9.2.0", "signal-exit": "^4.1.0", "strip-final-newline": "^4.0.0", "yoctocolors": "^2.1.1" } }, "sha512-9Be3ZoN4LmYR90tUoVu2te2BsbzHfhJyfEiAVfz7N5/zv+jduIfLrV2xdQXOHbaD6KgpGdO9PRPM1Y4Q9QkPkA=="], + + "@semantic-release/release-notes-generator/get-stream": ["get-stream@7.0.1", "", {}, "sha512-3M8C1EOFN6r8AMUhwUAACIoXZJEOufDU5+0gFFN5uNs6XYOralD2Pqkl7m046va6x77FwposWXbAhPPIOus7mQ=="], + + "@semantic-release/release-notes-generator/read-package-up": ["read-package-up@11.0.0", "", { "dependencies": { "find-up-simple": "^1.0.0", "read-pkg": "^9.0.0", "type-fest": "^4.6.0" } }, "sha512-MbgfoNPANMdb4oRBNg5eqLbB2t2r+o5Ua1pNt8BqGp4I0FJZhuVSOj3PaBPni4azWuSzEdNn2evevzVmEk1ohQ=="], + + "cli-highlight/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], + + "cli-highlight/yargs": ["yargs@16.2.0", "", { "dependencies": { "cliui": "^7.0.2", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.0", "y18n": "^5.0.5", "yargs-parser": "^20.2.2" } }, "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw=="], + + "cli-table3/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], + + "crypto-random-string/type-fest": ["type-fest@1.4.0", "", {}, "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA=="], + + "env-ci/execa": ["execa@8.0.1", "", { "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^8.0.1", "human-signals": "^5.0.0", "is-stream": "^3.0.0", "merge-stream": "^2.0.0", "npm-run-path": "^5.1.0", "onetime": "^6.0.0", "signal-exit": "^4.1.0", "strip-final-newline": "^3.0.0" } }, "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg=="], + + "fdir/picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="], + + "import-fresh/resolve-from": ["resolve-from@4.0.0", "", {}, "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g=="], + + "load-json-file/parse-json": ["parse-json@4.0.0", "", { "dependencies": { "error-ex": "^1.3.1", "json-parse-better-errors": "^1.0.1" } }, "sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw=="], + + "make-asynchronous/type-fest": ["type-fest@4.41.0", "", {}, "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA=="], + + "npm/@gar/promise-retry": ["@gar/promise-retry@1.0.2", "", { "dependencies": { "retry": "^0.13.1" } }, "sha512-Lm/ZLhDZcBECta3TmCQSngiQykFdfw+QtI1/GYMsZd4l3nG+P8WLB16XuS7WaBGLQ+9E+cOcWQsth9cayuGt8g=="], + + "npm/@isaacs/fs-minipass": ["@isaacs/fs-minipass@4.0.1", "", { "dependencies": { "minipass": "^7.0.4" } }, "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w=="], + + "npm/@isaacs/string-locale-compare": ["@isaacs/string-locale-compare@1.1.0", "", { "bundled": true }, "sha512-SQ7Kzhh9+D+ZW9MA0zkYv3VXhIDNx+LzM6EJ+/65I3QY+enU6Itte7E5XX7EWrqLW2FN4n06GWzBnPoC3th2aQ=="], + + "npm/@npmcli/agent": ["@npmcli/agent@4.0.0", "", { "dependencies": { "agent-base": "^7.1.0", "http-proxy-agent": "^7.0.0", "https-proxy-agent": "^7.0.1", "lru-cache": "^11.2.1", "socks-proxy-agent": "^8.0.3" } }, "sha512-kAQTcEN9E8ERLVg5AsGwLNoFb+oEG6engbqAU2P43gD4JEIkNGMHdVQ096FsOAAYpZPB0RSt0zgInKIAS1l5QA=="], + + "npm/@npmcli/arborist": ["@npmcli/arborist@9.4.0", "", { "dependencies": { "@isaacs/string-locale-compare": "^1.1.0", "@npmcli/fs": "^5.0.0", "@npmcli/installed-package-contents": "^4.0.0", "@npmcli/map-workspaces": "^5.0.0", "@npmcli/metavuln-calculator": "^9.0.2", "@npmcli/name-from-folder": "^4.0.0", "@npmcli/node-gyp": "^5.0.0", "@npmcli/package-json": "^7.0.0", "@npmcli/query": "^5.0.0", "@npmcli/redact": "^4.0.0", "@npmcli/run-script": "^10.0.0", "bin-links": "^6.0.0", "cacache": "^20.0.1", "common-ancestor-path": "^2.0.0", "hosted-git-info": "^9.0.0", "json-stringify-nice": "^1.1.4", "lru-cache": "^11.2.1", "minimatch": "^10.0.3", "nopt": "^9.0.0", "npm-install-checks": "^8.0.0", "npm-package-arg": "^13.0.0", "npm-pick-manifest": "^11.0.1", "npm-registry-fetch": "^19.0.0", "pacote": "^21.0.2", "parse-conflict-json": "^5.0.1", "proc-log": "^6.0.0", "proggy": "^4.0.0", "promise-all-reject-late": "^1.0.0", "promise-call-limit": "^3.0.1", "semver": "^7.3.7", "ssri": "^13.0.0", "treeverse": "^3.0.0", "walk-up-path": "^4.0.0" }, "bundled": true, "bin": { "arborist": "bin/index.js" } }, "sha512-4Bm8hNixJG/sii1PMnag0V9i/sGOX9VRzFrUiZMSBJpGlLR38f+Btl85d07G9GL56xO0l0OZjvrGNYsDYp0xKA=="], + + "npm/@npmcli/config": ["@npmcli/config@10.7.1", "", { "dependencies": { "@npmcli/map-workspaces": "^5.0.0", "@npmcli/package-json": "^7.0.0", "ci-info": "^4.0.0", "ini": "^6.0.0", "nopt": "^9.0.0", "proc-log": "^6.0.0", "semver": "^7.3.5", "walk-up-path": "^4.0.0" }, "bundled": true }, "sha512-lh0eZYOknIpIKYKxbQKX7xFmb4FbmrOHUD25+0iEo3djRQP6YleHwBFgjH3X7QvUVM4t+Xm7rGsjDwJp63WkAg=="], + + "npm/@npmcli/fs": ["@npmcli/fs@5.0.0", "", { "dependencies": { "semver": "^7.3.5" }, "bundled": true }, "sha512-7OsC1gNORBEawOa5+j2pXN9vsicaIOH5cPXxoR6fJOmH6/EXpJB2CajXOu1fPRFun2m1lktEFX11+P89hqO/og=="], + + "npm/@npmcli/git": ["@npmcli/git@7.0.2", "", { "dependencies": { "@gar/promise-retry": "^1.0.0", "@npmcli/promise-spawn": "^9.0.0", "ini": "^6.0.0", "lru-cache": "^11.2.1", "npm-pick-manifest": "^11.0.1", "proc-log": "^6.0.0", "semver": "^7.3.5", "which": "^6.0.0" } }, "sha512-oeolHDjExNAJAnlYP2qzNjMX/Xi9bmu78C9dIGr4xjobrSKbuMYCph8lTzn4vnW3NjIqVmw/f8BCfouqyJXlRg=="], + + "npm/@npmcli/installed-package-contents": ["@npmcli/installed-package-contents@4.0.0", "", { "dependencies": { "npm-bundled": "^5.0.0", "npm-normalize-package-bin": "^5.0.0" }, "bin": { "installed-package-contents": "bin/index.js" } }, "sha512-yNyAdkBxB72gtZ4GrwXCM0ZUedo9nIbOMKfGjt6Cu6DXf0p8y1PViZAKDC8q8kv/fufx0WTjRBdSlyrvnP7hmA=="], + + "npm/@npmcli/map-workspaces": ["@npmcli/map-workspaces@5.0.3", "", { "dependencies": { "@npmcli/name-from-folder": "^4.0.0", "@npmcli/package-json": "^7.0.0", "glob": "^13.0.0", "minimatch": "^10.0.3" }, "bundled": true }, "sha512-o2grssXo1e774E5OtEwwrgoszYRh0lqkJH+Pb9r78UcqdGJRDRfhpM8DvZPjzNLLNYeD/rNbjOKM3Ss5UABROw=="], + + "npm/@npmcli/metavuln-calculator": ["@npmcli/metavuln-calculator@9.0.3", "", { "dependencies": { "cacache": "^20.0.0", "json-parse-even-better-errors": "^5.0.0", "pacote": "^21.0.0", "proc-log": "^6.0.0", "semver": "^7.3.5" }, "bundled": true }, "sha512-94GLSYhLXF2t2LAC7pDwLaM4uCARzxShyAQKsirmlNcpidH89VA4/+K1LbJmRMgz5gy65E/QBBWQdUvGLe2Frg=="], + + "npm/@npmcli/name-from-folder": ["@npmcli/name-from-folder@4.0.0", "", {}, "sha512-qfrhVlOSqmKM8i6rkNdZzABj8MKEITGFAY+4teqBziksCQAOLutiAxM1wY2BKEd8KjUSpWmWCYxvXr0y4VTlPg=="], + + "npm/@npmcli/node-gyp": ["@npmcli/node-gyp@5.0.0", "", {}, "sha512-uuG5HZFXLfyFKqg8QypsmgLQW7smiRjVc45bqD/ofZZcR/uxEjgQU8qDPv0s9TEeMUiAAU/GC5bR6++UdTirIQ=="], + + "npm/@npmcli/package-json": ["@npmcli/package-json@7.0.5", "", { "dependencies": { "@npmcli/git": "^7.0.0", "glob": "^13.0.0", "hosted-git-info": "^9.0.0", "json-parse-even-better-errors": "^5.0.0", "proc-log": "^6.0.0", "semver": "^7.5.3", "spdx-expression-parse": "^4.0.0" }, "bundled": true }, "sha512-iVuTlG3ORq2iaVa1IWUxAO/jIp77tUKBhoMjuzYW2kL4MLN1bi/ofqkZ7D7OOwh8coAx1/S2ge0rMdGv8sLSOQ=="], + + "npm/@npmcli/promise-spawn": ["@npmcli/promise-spawn@9.0.1", "", { "dependencies": { "which": "^6.0.0" }, "bundled": true }, "sha512-OLUaoqBuyxeTqUvjA3FZFiXUfYC1alp3Sa99gW3EUDz3tZ3CbXDdcZ7qWKBzicrJleIgucoWamWH1saAmH/l2Q=="], + + "npm/@npmcli/query": ["@npmcli/query@5.0.0", "", { "dependencies": { "postcss-selector-parser": "^7.0.0" } }, "sha512-8TZWfTQOsODpLqo9SVhVjHovmKXNpevHU0gO9e+y4V4fRIOneiXy0u0sMP9LmS71XivrEWfZWg50ReH4WRT4aQ=="], + + "npm/@npmcli/redact": ["@npmcli/redact@4.0.0", "", { "bundled": true }, "sha512-gOBg5YHMfZy+TfHArfVogwgfBeQnKbbGo3pSUyK/gSI0AVu+pEiDVcKlQb0D8Mg1LNRZILZ6XG8I5dJ4KuAd9Q=="], + + "npm/@npmcli/run-script": ["@npmcli/run-script@10.0.4", "", { "dependencies": { "@npmcli/node-gyp": "^5.0.0", "@npmcli/package-json": "^7.0.0", "@npmcli/promise-spawn": "^9.0.0", "node-gyp": "^12.1.0", "proc-log": "^6.0.0" }, "bundled": true }, "sha512-mGUWr1uMnf0le2TwfOZY4SFxZGXGfm4Jtay/nwAa2FLNAKXUoUwaGwBMNH36UHPtinWfTSJ3nqFQr0091CxVGg=="], + + "npm/@sigstore/bundle": ["@sigstore/bundle@4.0.0", "", { "dependencies": { "@sigstore/protobuf-specs": "^0.5.0" } }, "sha512-NwCl5Y0V6Di0NexvkTqdoVfmjTaQwoLM236r89KEojGmq/jMls8S+zb7yOwAPdXvbwfKDlP+lmXgAL4vKSQT+A=="], + + "npm/@sigstore/core": ["@sigstore/core@3.1.0", "", {}, "sha512-o5cw1QYhNQ9IroioJxpzexmPjfCe7gzafd2RY3qnMpxr4ZEja+Jad/U8sgFpaue6bOaF+z7RVkyKVV44FN+N8A=="], + + "npm/@sigstore/protobuf-specs": ["@sigstore/protobuf-specs@0.5.0", "", {}, "sha512-MM8XIwUjN2bwvCg1QvrMtbBmpcSHrkhFSCu1D11NyPvDQ25HEc4oG5/OcQfd/Tlf/OxmKWERDj0zGE23jQaMwA=="], + + "npm/@sigstore/sign": ["@sigstore/sign@4.1.0", "", { "dependencies": { "@sigstore/bundle": "^4.0.0", "@sigstore/core": "^3.1.0", "@sigstore/protobuf-specs": "^0.5.0", "make-fetch-happen": "^15.0.3", "proc-log": "^6.1.0", "promise-retry": "^2.0.1" } }, "sha512-Vx1RmLxLGnSUqx/o5/VsCjkuN5L7y+vxEEwawvc7u+6WtX2W4GNa7b9HEjmcRWohw/d6BpATXmvOwc78m+Swdg=="], + + "npm/@sigstore/tuf": ["@sigstore/tuf@4.0.1", "", { "dependencies": { "@sigstore/protobuf-specs": "^0.5.0", "tuf-js": "^4.1.0" }, "bundled": true }, "sha512-OPZBg8y5Vc9yZjmWCHrlWPMBqW5yd8+wFNl+thMdtcWz3vjVSoJQutF8YkrzI0SLGnkuFof4HSsWUhXrf219Lw=="], + + "npm/@sigstore/verify": ["@sigstore/verify@3.1.0", "", { "dependencies": { "@sigstore/bundle": "^4.0.0", "@sigstore/core": "^3.1.0", "@sigstore/protobuf-specs": "^0.5.0" } }, "sha512-mNe0Iigql08YupSOGv197YdHpPPr+EzDZmfCgMc7RPNaZTw5aLN01nBl6CHJOh3BGtnMIj83EeN4butBchc8Ag=="], + + "npm/@tufjs/canonical-json": ["@tufjs/canonical-json@2.0.0", "", {}, "sha512-yVtV8zsdo8qFHe+/3kw81dSLyF7D576A5cCFCi4X7B39tWT7SekaEFUnvnWJHz+9qO7qJTah1JbrDjWKqFtdWA=="], + + "npm/@tufjs/models": ["@tufjs/models@4.1.0", "", { "dependencies": { "@tufjs/canonical-json": "2.0.0", "minimatch": "^10.1.1" } }, "sha512-Y8cK9aggNRsqJVaKUlEYs4s7CvQ1b1ta2DVPyAimb0I2qhzjNk+A+mxvll/klL0RlfuIUei8BF7YWiua4kQqww=="], + + "npm/abbrev": ["abbrev@4.0.0", "", { "bundled": true }, "sha512-a1wflyaL0tHtJSmLSOVybYhy22vRih4eduhhrkcjgrWGnRfrZtovJ2FRjxuTtkkj47O/baf0R86QU5OuYpz8fA=="], + + "npm/agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], + + "npm/aproba": ["aproba@2.1.0", "", {}, "sha512-tLIEcj5GuR2RSTnxNKdkK0dJ/GrC7P38sUkiDmDuHfsHmbagTFAxDVIBltoklXEVIQ/f14IL8IMJ5pn9Hez1Ew=="], + + "npm/archy": ["archy@1.0.0", "", { "bundled": true }, "sha512-Xg+9RwCg/0p32teKdGMPTPnVXKD0w3DfHnFTficozsAgsvq2XenPJq/MYpzzQ/v8zrOyJn6Ds39VA4JIDwFfqw=="], + + "npm/balanced-match": ["balanced-match@4.0.4", "", {}, "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA=="], + + "npm/bin-links": ["bin-links@6.0.0", "", { "dependencies": { "cmd-shim": "^8.0.0", "npm-normalize-package-bin": "^5.0.0", "proc-log": "^6.0.0", "read-cmd-shim": "^6.0.0", "write-file-atomic": "^7.0.0" } }, "sha512-X4CiKlcV2GjnCMwnKAfbVWpHa++65th9TuzAEYtZoATiOE2DQKhSp4CJlyLoTqdhBKlXjpXjCTYPNNFS33Fi6w=="], + + "npm/binary-extensions": ["binary-extensions@3.1.0", "", {}, "sha512-Jvvd9hy1w+xUad8+ckQsWA/V1AoyubOvqn0aygjMOVM4BfIaRav1NFS3LsTSDaV4n4FtcCtQXvzep1E6MboqwQ=="], + + "npm/brace-expansion": ["brace-expansion@5.0.4", "", { "dependencies": { "balanced-match": "^4.0.2" } }, "sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg=="], + + "npm/cacache": ["cacache@20.0.3", "", { "dependencies": { "@npmcli/fs": "^5.0.0", "fs-minipass": "^3.0.0", "glob": "^13.0.0", "lru-cache": "^11.1.0", "minipass": "^7.0.3", "minipass-collect": "^2.0.1", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", "p-map": "^7.0.2", "ssri": "^13.0.0", "unique-filename": "^5.0.0" }, "bundled": true }, "sha512-3pUp4e8hv07k1QlijZu6Kn7c9+ZpWWk4j3F8N3xPuCExULobqJydKYOTj1FTq58srkJsXvO7LbGAH4C0ZU3WGw=="], + + "npm/chalk": ["chalk@5.6.2", "", { "bundled": true }, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], + + "npm/chownr": ["chownr@3.0.0", "", {}, "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g=="], + + "npm/ci-info": ["ci-info@4.4.0", "", { "bundled": true }, "sha512-77PSwercCZU2Fc4sX94eF8k8Pxte6JAwL4/ICZLFjJLqegs7kCuAsqqj/70NQF6TvDpgFjkubQB2FW2ZZddvQg=="], + + "npm/cidr-regex": ["cidr-regex@5.0.3", "", {}, "sha512-zfPT2uurEroxXqefaL2L7/fT5ED2XTutC6UwFbSZfqSOk1vk5VFY6xa6/R6pBxB4Uc8MNPbRW5ykqutFG5P5ww=="], + + "npm/cmd-shim": ["cmd-shim@8.0.0", "", {}, "sha512-Jk/BK6NCapZ58BKUxlSI+ouKRbjH1NLZCgJkYoab+vEHUY3f6OzpNBN9u7HFSv9J6TRDGs4PLOHezoKGaFRSCA=="], + + "npm/common-ancestor-path": ["common-ancestor-path@2.0.0", "", {}, "sha512-dnN3ibLeoRf2HNC+OlCiNc5d2zxbLJXOtiZUudNFSXZrNSydxcCsSpRzXwfu7BBWCIfHPw+xTayeBvJCP/D8Ng=="], + + "npm/cssesc": ["cssesc@3.0.0", "", { "bin": { "cssesc": "bin/cssesc" } }, "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg=="], + + "npm/debug": ["debug@4.4.3", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA=="], + + "npm/diff": ["diff@8.0.3", "", {}, "sha512-qejHi7bcSD4hQAZE0tNAawRK1ZtafHDmMTMkrrIGgSLl7hTnQHmKCeB45xAcbfTqK2zowkM3j3bHt/4b/ARbYQ=="], + + "npm/env-paths": ["env-paths@2.2.1", "", {}, "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A=="], + + "npm/err-code": ["err-code@2.0.3", "", {}, "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA=="], + + "npm/exponential-backoff": ["exponential-backoff@3.1.3", "", {}, "sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA=="], + + "npm/fastest-levenshtein": ["fastest-levenshtein@1.0.16", "", { "bundled": true }, "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg=="], + + "npm/fdir": ["fdir@6.5.0", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg=="], + + "npm/fs-minipass": ["fs-minipass@3.0.3", "", { "dependencies": { "minipass": "^7.0.3" }, "bundled": true }, "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw=="], + + "npm/glob": ["glob@13.0.6", "", { "dependencies": { "minimatch": "^10.2.2", "minipass": "^7.1.3", "path-scurry": "^2.0.2" }, "bundled": true }, "sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw=="], + + "npm/graceful-fs": ["graceful-fs@4.2.11", "", { "bundled": true }, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="], + + "npm/hosted-git-info": ["hosted-git-info@9.0.2", "", { "dependencies": { "lru-cache": "^11.1.0" }, "bundled": true }, "sha512-M422h7o/BR3rmCQ8UHi7cyyMqKltdP9Uo+J2fXK+RSAY+wTcKOIRyhTuKv4qn+DJf3g+PL890AzId5KZpX+CBg=="], + + "npm/http-cache-semantics": ["http-cache-semantics@4.2.0", "", {}, "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ=="], + + "npm/http-proxy-agent": ["http-proxy-agent@7.0.2", "", { "dependencies": { "agent-base": "^7.1.0", "debug": "^4.3.4" } }, "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig=="], + + "npm/https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + + "npm/iconv-lite": ["iconv-lite@0.7.2", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw=="], + + "npm/ignore-walk": ["ignore-walk@8.0.0", "", { "dependencies": { "minimatch": "^10.0.3" } }, "sha512-FCeMZT4NiRQGh+YkeKMtWrOmBgWjHjMJ26WQWrRQyoyzqevdaGSakUaJW5xQYmjLlUVk2qUnCjYVBax9EKKg8A=="], + + "npm/imurmurhash": ["imurmurhash@0.1.4", "", {}, "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA=="], + + "npm/ini": ["ini@6.0.0", "", { "bundled": true }, "sha512-IBTdIkzZNOpqm7q3dRqJvMaldXjDHWkEDfrwGEQTs5eaQMWV+djAhR+wahyNNMAa+qpbDUhBMVt4ZKNwpPm7xQ=="], + + "npm/init-package-json": ["init-package-json@8.2.5", "", { "dependencies": { "@npmcli/package-json": "^7.0.0", "npm-package-arg": "^13.0.0", "promzard": "^3.0.1", "read": "^5.0.1", "semver": "^7.7.2", "validate-npm-package-name": "^7.0.0" }, "bundled": true }, "sha512-IknQ+upLuJU6t3p0uo9wS3GjFD/1GtxIwcIGYOWR8zL2HxQeJwvxYTgZr9brJ8pyZ4kvpkebM8ZKcyqOeLOHSg=="], + + "npm/ip-address": ["ip-address@10.1.0", "", {}, "sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q=="], + + "npm/is-cidr": ["is-cidr@6.0.3", "", { "dependencies": { "cidr-regex": "^5.0.1" }, "bundled": true }, "sha512-tPdsizbDiISrc4PoII6ZfpmAokx0oDKeYqAUp5bXOfznauOFXfEeosKBRrl0o0SriE4xoRR05Czn4YPCFMjSHA=="], + + "npm/isexe": ["isexe@4.0.0", "", {}, "sha512-FFUtZMpoZ8RqHS3XeXEmHWLA4thH+ZxCv2lOiPIn1Xc7CxrqhWzNSDzD+/chS/zbYezmiwWLdQC09JdQKmthOw=="], + + "npm/json-parse-even-better-errors": ["json-parse-even-better-errors@5.0.0", "", { "bundled": true }, "sha512-ZF1nxZ28VhQouRWhUcVlUIN3qwSgPuswK05s/HIaoetAoE/9tngVmCHjSxmSQPav1nd+lPtTL0YZ/2AFdR/iYQ=="], + + "npm/json-stringify-nice": ["json-stringify-nice@1.1.4", "", {}, "sha512-5Z5RFW63yxReJ7vANgW6eZFGWaQvnPE3WNmZoOJrSkGju2etKA2L5rrOa1sm877TVTFt57A80BH1bArcmlLfPw=="], + + "npm/jsonparse": ["jsonparse@1.3.1", "", {}, "sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg=="], + + "npm/just-diff": ["just-diff@6.0.2", "", {}, "sha512-S59eriX5u3/QhMNq3v/gm8Kd0w8OS6Tz2FS1NG4blv+z0MuQcBRJyFWjdovM0Rad4/P4aUPFtnkNjMjyMlMSYA=="], + + "npm/just-diff-apply": ["just-diff-apply@5.5.0", "", {}, "sha512-OYTthRfSh55WOItVqwpefPtNt2VdKsq5AnAK6apdtR6yCH8pr0CmSr710J0Mf+WdQy7K/OzMy7K2MgAfdQURDw=="], + + "npm/libnpmaccess": ["libnpmaccess@10.0.3", "", { "dependencies": { "npm-package-arg": "^13.0.0", "npm-registry-fetch": "^19.0.0" }, "bundled": true }, "sha512-JPHTfWJxIK+NVPdNMNGnkz4XGX56iijPbe0qFWbdt68HL+kIvSzh+euBL8npLZvl2fpaxo+1eZSdoG15f5YdIQ=="], + + "npm/libnpmdiff": ["libnpmdiff@8.1.3", "", { "dependencies": { "@npmcli/arborist": "^9.4.0", "@npmcli/installed-package-contents": "^4.0.0", "binary-extensions": "^3.0.0", "diff": "^8.0.2", "minimatch": "^10.0.3", "npm-package-arg": "^13.0.0", "pacote": "^21.0.2", "tar": "^7.5.1" }, "bundled": true }, "sha512-QZ9rpchNXSzvxTRHzEqxCfYBK2h+6j4J7IbBViBGy3xSJDBl026BCMhmlZQ0a69GeQkjkbM9X1hzRV9N5cdQog=="], + + "npm/libnpmexec": ["libnpmexec@10.2.3", "", { "dependencies": { "@gar/promise-retry": "^1.0.0", "@npmcli/arborist": "^9.4.0", "@npmcli/package-json": "^7.0.0", "@npmcli/run-script": "^10.0.0", "ci-info": "^4.0.0", "npm-package-arg": "^13.0.0", "pacote": "^21.0.2", "proc-log": "^6.0.0", "read": "^5.0.1", "semver": "^7.3.7", "signal-exit": "^4.1.0", "walk-up-path": "^4.0.0" }, "bundled": true }, "sha512-tCeneLdUhmn8GTORbui7QZrr1Rv8Y2/mQRwMjUeyY8IrhCjv29RkoH3gFz+1CCPGGMp26eT8KI977G74+rXMpw=="], + + "npm/libnpmfund": ["libnpmfund@7.0.17", "", { "dependencies": { "@npmcli/arborist": "^9.4.0" }, "bundled": true }, "sha512-0VRPO+Bs21kneI3J01QqnuxiNnHn1lErTqLIbI3zGM9LvsPtc2q2/xhjACuXbkcejuHVm3T9mWaky0IjM9gQeQ=="], + + "npm/libnpmorg": ["libnpmorg@8.0.1", "", { "dependencies": { "aproba": "^2.0.0", "npm-registry-fetch": "^19.0.0" }, "bundled": true }, "sha512-/QeyXXg4hqMw0ESM7pERjIT2wbR29qtFOWIOug/xO4fRjS3jJJhoAPQNsnHtdwnCqgBdFpGQ45aIdFFZx2YhTA=="], + + "npm/libnpmpack": ["libnpmpack@9.1.3", "", { "dependencies": { "@npmcli/arborist": "^9.4.0", "@npmcli/run-script": "^10.0.0", "npm-package-arg": "^13.0.0", "pacote": "^21.0.2" }, "bundled": true }, "sha512-7Uvo0mDIidFCOGwZJghTuk9glaR6Es9FxmLWJobOS857/cb5SO5YPqgYLlC1TZB6L0c2jtu8XB1GfxKRf4W4GA=="], + + "npm/libnpmpublish": ["libnpmpublish@11.1.3", "", { "dependencies": { "@npmcli/package-json": "^7.0.0", "ci-info": "^4.0.0", "npm-package-arg": "^13.0.0", "npm-registry-fetch": "^19.0.0", "proc-log": "^6.0.0", "semver": "^7.3.7", "sigstore": "^4.0.0", "ssri": "^13.0.0" }, "bundled": true }, "sha512-NVPTth/71cfbdYHqypcO9Lt5WFGTzFEcx81lWd7GDJIgZ95ERdYHGUfCtFejHCyqodKsQkNEx2JCkMpreDty/A=="], + + "npm/libnpmsearch": ["libnpmsearch@9.0.1", "", { "dependencies": { "npm-registry-fetch": "^19.0.0" }, "bundled": true }, "sha512-oKw58X415ERY/BOGV3jQPVMcep8YeMRWMzuuqB0BAIM5VxicOU1tQt19ExCu4SV77SiTOEoziHxGEgJGw3FBYQ=="], + + "npm/libnpmteam": ["libnpmteam@8.0.2", "", { "dependencies": { "aproba": "^2.0.0", "npm-registry-fetch": "^19.0.0" }, "bundled": true }, "sha512-ypLrDUQoi8EhG+gzx5ENMcYq23YjPV17Mfvx4nOnQiHOi8vp47+4GvZBrMsEM4yeHPwxguF/HZoXH4rJfHdH/w=="], + + "npm/libnpmversion": ["libnpmversion@8.0.3", "", { "dependencies": { "@npmcli/git": "^7.0.0", "@npmcli/run-script": "^10.0.0", "json-parse-even-better-errors": "^5.0.0", "proc-log": "^6.0.0", "semver": "^7.3.7" }, "bundled": true }, "sha512-Avj1GG3DT6MGzWOOk3yA7rORcMDUPizkIGbI8glHCO7WoYn3NYNmskLDwxg2NMY1Tyf2vrHAqTuSG58uqd1lJg=="], + + "npm/lru-cache": ["lru-cache@11.2.6", "", {}, "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ=="], + + "npm/make-fetch-happen": ["make-fetch-happen@15.0.4", "", { "dependencies": { "@gar/promise-retry": "^1.0.0", "@npmcli/agent": "^4.0.0", "cacache": "^20.0.1", "http-cache-semantics": "^4.1.1", "minipass": "^7.0.2", "minipass-fetch": "^5.0.0", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", "negotiator": "^1.0.0", "proc-log": "^6.0.0", "ssri": "^13.0.0" }, "bundled": true }, "sha512-vM2sG+wbVeVGYcCm16mM3d5fuem9oC28n436HjsGO3LcxoTI8LNVa4rwZDn3f76+cWyT4GGJDxjTYU1I2nr6zw=="], + + "npm/minimatch": ["minimatch@10.2.4", "", { "dependencies": { "brace-expansion": "^5.0.2" }, "bundled": true }, "sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg=="], + + "npm/minipass": ["minipass@7.1.3", "", { "bundled": true }, "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A=="], + + "npm/minipass-collect": ["minipass-collect@2.0.1", "", { "dependencies": { "minipass": "^7.0.3" } }, "sha512-D7V8PO9oaz7PWGLbCACuI1qEOsq7UKfLotx/C0Aet43fCUB/wfQ7DYeq2oR/svFJGYDHPr38SHATeaj/ZoKHKw=="], + + "npm/minipass-fetch": ["minipass-fetch@5.0.2", "", { "dependencies": { "minipass": "^7.0.3", "minipass-sized": "^2.0.0", "minizlib": "^3.0.1" }, "optionalDependencies": { "iconv-lite": "^0.7.2" } }, "sha512-2d0q2a8eCi2IRg/IGubCNRJoYbA1+YPXAzQVRFmB45gdGZafyivnZ5YSEfo3JikbjGxOdntGFvBQGqaSMXlAFQ=="], + + "npm/minipass-flush": ["minipass-flush@1.0.5", "", { "dependencies": { "minipass": "^3.0.0" } }, "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw=="], + + "npm/minipass-pipeline": ["minipass-pipeline@1.2.4", "", { "dependencies": { "minipass": "^3.0.0" }, "bundled": true }, "sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A=="], + + "npm/minipass-sized": ["minipass-sized@2.0.0", "", { "dependencies": { "minipass": "^7.1.2" } }, "sha512-zSsHhto5BcUVM2m1LurnXY6M//cGhVaegT71OfOXoprxT6o780GZd792ea6FfrQkuU4usHZIUczAQMRUE2plzA=="], + + "npm/minizlib": ["minizlib@3.1.0", "", { "dependencies": { "minipass": "^7.1.2" } }, "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw=="], + + "npm/ms": ["ms@2.1.3", "", { "bundled": true }, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "npm/mute-stream": ["mute-stream@3.0.0", "", {}, "sha512-dkEJPVvun4FryqBmZ5KhDo0K9iDXAwn08tMLDinNdRBNPcYEDiWYysLcc6k3mjTMlbP9KyylvRpd4wFtwrT9rw=="], + + "npm/negotiator": ["negotiator@1.0.0", "", {}, "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg=="], + + "npm/node-gyp": ["node-gyp@12.2.0", "", { "dependencies": { "env-paths": "^2.2.0", "exponential-backoff": "^3.1.1", "graceful-fs": "^4.2.6", "make-fetch-happen": "^15.0.0", "nopt": "^9.0.0", "proc-log": "^6.0.0", "semver": "^7.3.5", "tar": "^7.5.4", "tinyglobby": "^0.2.12", "which": "^6.0.0" }, "bundled": true, "bin": { "node-gyp": "bin/node-gyp.js" } }, "sha512-q23WdzrQv48KozXlr0U1v9dwO/k59NHeSzn6loGcasyf0UnSrtzs8kRxM+mfwJSf0DkX0s43hcqgnSO4/VNthQ=="], + + "npm/nopt": ["nopt@9.0.0", "", { "dependencies": { "abbrev": "^4.0.0" }, "bundled": true, "bin": { "nopt": "bin/nopt.js" } }, "sha512-Zhq3a+yFKrYwSBluL4H9XP3m3y5uvQkB/09CwDruCiRmR/UJYnn9W4R48ry0uGC70aeTPKLynBtscP9efFFcPw=="], + + "npm/npm-audit-report": ["npm-audit-report@7.0.0", "", { "bundled": true }, "sha512-bluLL4xwGr/3PERYz50h2Upco0TJMDcLcymuFnfDWeGO99NqH724MNzhWi5sXXuXf2jbytFF0LyR8W+w1jTI6A=="], + + "npm/npm-bundled": ["npm-bundled@5.0.0", "", { "dependencies": { "npm-normalize-package-bin": "^5.0.0" } }, "sha512-JLSpbzh6UUXIEoqPsYBvVNVmyrjVZ1fzEFbqxKkTJQkWBO3xFzFT+KDnSKQWwOQNbuWRwt5LSD6HOTLGIWzfrw=="], + + "npm/npm-install-checks": ["npm-install-checks@8.0.0", "", { "dependencies": { "semver": "^7.1.1" }, "bundled": true }, "sha512-ScAUdMpyzkbpxoNekQ3tNRdFI8SJ86wgKZSQZdUxT+bj0wVFpsEMWnkXP0twVe1gJyNF5apBWDJhhIbgrIViRA=="], + + "npm/npm-normalize-package-bin": ["npm-normalize-package-bin@5.0.0", "", {}, "sha512-CJi3OS4JLsNMmr2u07OJlhcrPxCeOeP/4xq67aWNai6TNWWbTrlNDgl8NcFKVlcBKp18GPj+EzbNIgrBfZhsag=="], + + "npm/npm-package-arg": ["npm-package-arg@13.0.2", "", { "dependencies": { "hosted-git-info": "^9.0.0", "proc-log": "^6.0.0", "semver": "^7.3.5", "validate-npm-package-name": "^7.0.0" }, "bundled": true }, "sha512-IciCE3SY3uE84Ld8WZU23gAPPV9rIYod4F+rc+vJ7h7cwAJt9Vk6TVsK60ry7Uj3SRS3bqRRIGuTp9YVlk6WNA=="], + + "npm/npm-packlist": ["npm-packlist@10.0.4", "", { "dependencies": { "ignore-walk": "^8.0.0", "proc-log": "^6.0.0" } }, "sha512-uMW73iajD8hiH4ZBxEV3HC+eTnppIqwakjOYuvgddnalIw2lJguKviK1pcUJDlIWm1wSJkchpDZDSVVsZEYRng=="], + + "npm/npm-pick-manifest": ["npm-pick-manifest@11.0.3", "", { "dependencies": { "npm-install-checks": "^8.0.0", "npm-normalize-package-bin": "^5.0.0", "npm-package-arg": "^13.0.0", "semver": "^7.3.5" }, "bundled": true }, "sha512-buzyCfeoGY/PxKqmBqn1IUJrZnUi1VVJTdSSRPGI60tJdUhUoSQFhs0zycJokDdOznQentgrpf8LayEHyyYlqQ=="], + + "npm/npm-profile": ["npm-profile@12.0.1", "", { "dependencies": { "npm-registry-fetch": "^19.0.0", "proc-log": "^6.0.0" }, "bundled": true }, "sha512-Xs1mejJ1/9IKucCxdFMkiBJUre0xaxfCpbsO7DB7CadITuT4k68eI05HBlw4kj+Em1rsFMgeFNljFPYvPETbVQ=="], + + "npm/npm-registry-fetch": ["npm-registry-fetch@19.1.1", "", { "dependencies": { "@npmcli/redact": "^4.0.0", "jsonparse": "^1.3.1", "make-fetch-happen": "^15.0.0", "minipass": "^7.0.2", "minipass-fetch": "^5.0.0", "minizlib": "^3.0.1", "npm-package-arg": "^13.0.0", "proc-log": "^6.0.0" }, "bundled": true }, "sha512-TakBap6OM1w0H73VZVDf44iFXsOS3h+L4wVMXmbWOQroZgFhMch0juN6XSzBNlD965yIKvWg2dfu7NSiaYLxtw=="], + + "npm/npm-user-validate": ["npm-user-validate@4.0.0", "", { "bundled": true }, "sha512-TP+Ziq/qPi/JRdhaEhnaiMkqfMGjhDLoh/oRfW+t5aCuIfJxIUxvwk6Sg/6ZJ069N/Be6gs00r+aZeJTfS9uHQ=="], + + "npm/p-map": ["p-map@7.0.4", "", { "bundled": true }, "sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ=="], + + "npm/pacote": ["pacote@21.4.0", "", { "dependencies": { "@gar/promise-retry": "^1.0.0", "@npmcli/git": "^7.0.0", "@npmcli/installed-package-contents": "^4.0.0", "@npmcli/package-json": "^7.0.0", "@npmcli/promise-spawn": "^9.0.0", "@npmcli/run-script": "^10.0.0", "cacache": "^20.0.0", "fs-minipass": "^3.0.0", "minipass": "^7.0.2", "npm-package-arg": "^13.0.0", "npm-packlist": "^10.0.1", "npm-pick-manifest": "^11.0.1", "npm-registry-fetch": "^19.0.0", "proc-log": "^6.0.0", "sigstore": "^4.0.0", "ssri": "^13.0.0", "tar": "^7.4.3" }, "bundled": true, "bin": { "pacote": "bin/index.js" } }, "sha512-DR7mn7HUOomAX1BORnpYy678qVIidbvOojkBscqy27dRKN+s/hLeQT1MeYYrx1Cxh62jyKjiWiDV7RTTqB+ZEQ=="], + + "npm/parse-conflict-json": ["parse-conflict-json@5.0.1", "", { "dependencies": { "json-parse-even-better-errors": "^5.0.0", "just-diff": "^6.0.0", "just-diff-apply": "^5.2.0" }, "bundled": true }, "sha512-ZHEmNKMq1wyJXNwLxyHnluPfRAFSIliBvbK/UiOceROt4Xh9Pz0fq49NytIaeaCUf5VR86hwQ/34FCcNU5/LKQ=="], + + "npm/path-scurry": ["path-scurry@2.0.2", "", { "dependencies": { "lru-cache": "^11.0.0", "minipass": "^7.1.2" } }, "sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg=="], + + "npm/picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="], + + "npm/postcss-selector-parser": ["postcss-selector-parser@7.1.1", "", { "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" } }, "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg=="], + + "npm/proc-log": ["proc-log@6.1.0", "", { "bundled": true }, "sha512-iG+GYldRf2BQ0UDUAd6JQ/RwzaQy6mXmsk/IzlYyal4A4SNFw54MeH4/tLkF4I5WoWG9SQwuqWzS99jaFQHBuQ=="], + + "npm/proggy": ["proggy@4.0.0", "", {}, "sha512-MbA4R+WQT76ZBm/5JUpV9yqcJt92175+Y0Bodg3HgiXzrmKu7Ggq+bpn6y6wHH+gN9NcyKn3yg1+d47VaKwNAQ=="], + + "npm/promise-all-reject-late": ["promise-all-reject-late@1.0.1", "", {}, "sha512-vuf0Lf0lOxyQREH7GDIOUMLS7kz+gs8i6B+Yi8dC68a2sychGrHTJYghMBD6k7eUcH0H5P73EckCA48xijWqXw=="], + + "npm/promise-call-limit": ["promise-call-limit@3.0.2", "", {}, "sha512-mRPQO2T1QQVw11E7+UdCJu7S61eJVWknzml9sC1heAdj1jxl0fWMBypIt9ZOcLFf8FkG995ZD7RnVk7HH72fZw=="], + + "npm/promise-retry": ["promise-retry@2.0.1", "", { "dependencies": { "err-code": "^2.0.2", "retry": "^0.12.0" } }, "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g=="], + + "npm/promzard": ["promzard@3.0.1", "", { "dependencies": { "read": "^5.0.0" } }, "sha512-M5mHhWh+Adz0BIxgSrqcc6GTCSconR7zWQV9vnOSptNtr6cSFlApLc28GbQhuN6oOWBQeV2C0bNE47JCY/zu3Q=="], + + "npm/qrcode-terminal": ["qrcode-terminal@0.12.0", "", { "bundled": true, "bin": { "qrcode-terminal": "./bin/qrcode-terminal.js" } }, "sha512-EXtzRZmC+YGmGlDFbXKxQiMZNwCLEO6BANKXG4iCtSIM0yqc/pappSx3RIKr4r0uh5JsBckOXeKrB3Iz7mdQpQ=="], + + "npm/read": ["read@5.0.1", "", { "dependencies": { "mute-stream": "^3.0.0" }, "bundled": true }, "sha512-+nsqpqYkkpet2UVPG8ZiuE8d113DK4vHYEoEhcrXBAlPiq6di7QRTuNiKQAbaRYegobuX2BpZ6QjanKOXnJdTA=="], + + "npm/read-cmd-shim": ["read-cmd-shim@6.0.0", "", {}, "sha512-1zM5HuOfagXCBWMN83fuFI/x+T/UhZ7k+KIzhrHXcQoeX5+7gmaDYjELQHmmzIodumBHeByBJT4QYS7ufAgs7A=="], + + "npm/retry": ["retry@0.13.1", "", {}, "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg=="], + + "npm/safer-buffer": ["safer-buffer@2.1.2", "", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="], + + "npm/semver": ["semver@7.7.4", "", { "bundled": true, "bin": { "semver": "bin/semver.js" } }, "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA=="], + + "npm/signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], + + "npm/sigstore": ["sigstore@4.1.0", "", { "dependencies": { "@sigstore/bundle": "^4.0.0", "@sigstore/core": "^3.1.0", "@sigstore/protobuf-specs": "^0.5.0", "@sigstore/sign": "^4.1.0", "@sigstore/tuf": "^4.0.1", "@sigstore/verify": "^3.1.0" } }, "sha512-/fUgUhYghuLzVT/gaJoeVehLCgZiUxPCPMcyVNY0lIf/cTCz58K/WTI7PefDarXxp9nUKpEwg1yyz3eSBMTtgA=="], + + "npm/smart-buffer": ["smart-buffer@4.2.0", "", {}, "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg=="], + + "npm/socks": ["socks@2.8.7", "", { "dependencies": { "ip-address": "^10.0.1", "smart-buffer": "^4.2.0" } }, "sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A=="], + + "npm/socks-proxy-agent": ["socks-proxy-agent@8.0.5", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "^4.3.4", "socks": "^2.8.3" } }, "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw=="], + + "npm/spdx-exceptions": ["spdx-exceptions@2.5.0", "", {}, "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w=="], + + "npm/spdx-expression-parse": ["spdx-expression-parse@4.0.0", "", { "dependencies": { "spdx-exceptions": "^2.1.0", "spdx-license-ids": "^3.0.0" }, "bundled": true }, "sha512-Clya5JIij/7C6bRR22+tnGXbc4VKlibKSVj2iHvVeX5iMW7s1SIQlqu699JkODJJIhh/pUu8L0/VLh8xflD+LQ=="], + + "npm/spdx-license-ids": ["spdx-license-ids@3.0.23", "", {}, "sha512-CWLcCCH7VLu13TgOH+r8p1O/Znwhqv/dbb6lqWy67G+pT1kHmeD/+V36AVb/vq8QMIQwVShJ6Ssl5FPh0fuSdw=="], + + "npm/ssri": ["ssri@13.0.1", "", { "dependencies": { "minipass": "^7.0.3" }, "bundled": true }, "sha512-QUiRf1+u9wPTL/76GTYlKttDEBWV1ga9ZXW8BG6kfdeyyM8LGPix9gROyg9V2+P0xNyF3X2Go526xKFdMZrHSQ=="], + + "npm/supports-color": ["supports-color@10.2.2", "", { "bundled": true }, "sha512-SS+jx45GF1QjgEXQx4NJZV9ImqmO2NPz5FNsIHrsDjh2YsHnawpan7SNQ1o8NuhrbHZy9AZhIoCUiCeaW/C80g=="], + + "npm/tar": ["tar@7.5.9", "", { "dependencies": { "@isaacs/fs-minipass": "^4.0.0", "chownr": "^3.0.0", "minipass": "^7.1.2", "minizlib": "^3.1.0", "yallist": "^5.0.0" }, "bundled": true }, "sha512-BTLcK0xsDh2+PUe9F6c2TlRp4zOOBMTkoQHQIWSIzI0R7KG46uEwq4OPk2W7bZcprBMsuaeFsqwYr7pjh6CuHg=="], + + "npm/text-table": ["text-table@0.2.0", "", { "bundled": true }, "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw=="], + + "npm/tiny-relative-date": ["tiny-relative-date@2.0.2", "", { "bundled": true }, "sha512-rGxAbeL9z3J4pI2GtBEoFaavHdO4RKAU54hEuOef5kfx5aPqiQtbhYktMOTL5OA33db8BjsDcLXuNp+/v19PHw=="], + + "npm/tinyglobby": ["tinyglobby@0.2.15", "", { "dependencies": { "fdir": "^6.5.0", "picomatch": "^4.0.3" } }, "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ=="], + + "npm/treeverse": ["treeverse@3.0.0", "", { "bundled": true }, "sha512-gcANaAnd2QDZFmHFEOF4k7uc1J/6a6z3DJMd/QwEyxLoKGiptJRwid582r7QIsFlFMIZ3SnxfS52S4hm2DHkuQ=="], + + "npm/tuf-js": ["tuf-js@4.1.0", "", { "dependencies": { "@tufjs/models": "4.1.0", "debug": "^4.4.3", "make-fetch-happen": "^15.0.1" } }, "sha512-50QV99kCKH5P/Vs4E2Gzp7BopNV+KzTXqWeaxrfu5IQJBOULRsTIS9seSsOVT8ZnGXzCyx55nYWAi4qJzpZKEQ=="], + + "npm/unique-filename": ["unique-filename@5.0.0", "", { "dependencies": { "unique-slug": "^6.0.0" } }, "sha512-2RaJTAvAb4owyjllTfXzFClJ7WsGxlykkPvCr9pA//LD9goVq+m4PPAeBgNodGZ7nSrntT/auWpJ6Y5IFXcfjg=="], + + "npm/unique-slug": ["unique-slug@6.0.0", "", { "dependencies": { "imurmurhash": "^0.1.4" } }, "sha512-4Lup7Ezn8W3d52/xBhZBVdx323ckxa7DEvd9kPQHppTkLoJXw6ltrBCyj5pnrxj0qKDxYMJ56CoxNuFCscdTiw=="], + + "npm/util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="], + + "npm/validate-npm-package-name": ["validate-npm-package-name@7.0.2", "", { "bundled": true }, "sha512-hVDIBwsRruT73PbK7uP5ebUt+ezEtCmzZz3F59BSr2F6OVFnJ/6h8liuvdLrQ88Xmnk6/+xGGuq+pG9WwTuy3A=="], + + "npm/walk-up-path": ["walk-up-path@4.0.0", "", {}, "sha512-3hu+tD8YzSLGuFYtPRb48vdhKMi0KQV5sn+uWr8+7dMEq/2G/dtLrdDinkLjqq5TIbIBjYJ4Ax/n3YiaW7QM8A=="], + + "npm/which": ["which@6.0.1", "", { "dependencies": { "isexe": "^4.0.0" }, "bundled": true, "bin": { "node-which": "bin/which.js" } }, "sha512-oGLe46MIrCRqX7ytPUf66EAYvdeMIZYn3WaocqqKZAxrBpkqHfL/qvTyJ/bTk5+AqHCjXmrv3CEWgy368zhRUg=="], + + "npm/write-file-atomic": ["write-file-atomic@7.0.1", "", { "dependencies": { "signal-exit": "^4.0.1" } }, "sha512-OTIk8iR8/aCRWBqvxrzxR0hgxWpnYBblY1S5hDWBQfk/VFmJwzmJgQFN3WsoUKHISv2eAwe+PpbUzyL1CKTLXg=="], + + "npm/yallist": ["yallist@5.0.0", "", {}, "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw=="], + + "parse5-htmlparser2-tree-adapter/parse5": ["parse5@6.0.1", "", {}, "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw=="], + + "read-pkg/parse-json": ["parse-json@8.3.0", "", { "dependencies": { "@babel/code-frame": "^7.26.2", "index-to-position": "^1.1.0", "type-fest": "^4.39.1" } }, "sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ=="], + + "semantic-release/@semantic-release/error": ["@semantic-release/error@4.0.0", "", {}, "sha512-mgdxrHTLOjOddRVYIYDo0fR3/v61GNN1YGkfbrjuIKg/uMgCd+Qzo3UAXJ+woLQQpos4pl5Esuw5A7AoNlzjUQ=="], + + "semantic-release/aggregate-error": ["aggregate-error@5.0.0", "", { "dependencies": { "clean-stack": "^5.2.0", "indent-string": "^5.0.0" } }, "sha512-gOsf2YwSlleG6IjRYG2A7k0HmBMEo6qVNk9Bp/EaLgAJT5ngH6PXbqa4ItvnEwCm/velL5jAnQgsHsWnjhGmvw=="], + + "semantic-release/execa": ["execa@9.6.1", "", { "dependencies": { "@sindresorhus/merge-streams": "^4.0.0", "cross-spawn": "^7.0.6", "figures": "^6.1.0", "get-stream": "^9.0.0", "human-signals": "^8.0.1", "is-plain-obj": "^4.1.0", "is-stream": "^4.0.1", "npm-run-path": "^6.0.0", "pretty-ms": "^9.2.0", "signal-exit": "^4.1.0", "strip-final-newline": "^4.0.0", "yoctocolors": "^2.1.1" } }, "sha512-9Be3ZoN4LmYR90tUoVu2te2BsbzHfhJyfEiAVfz7N5/zv+jduIfLrV2xdQXOHbaD6KgpGdO9PRPM1Y4Q9QkPkA=="], + + "semantic-release/p-reduce": ["p-reduce@3.0.0", "", {}, "sha512-xsrIUgI0Kn6iyDYm9StOpOeK29XM1aboGji26+QEortiFST1hGZaUQOLhtEbqHErPpGW/aSz6allwK2qcptp0Q=="], + + "signale/chalk": ["chalk@2.4.2", "", { "dependencies": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", "supports-color": "^5.3.0" } }, "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ=="], + + "signale/figures": ["figures@2.0.0", "", { "dependencies": { "escape-string-regexp": "^1.0.5" } }, "sha512-Oa2M9atig69ZkfwiApY8F2Yy+tzMbazyvqv21R0NsSC8floSOC09BbT1ITWAdoMGQvJ/aZnR1KMwdx9tvHnTNA=="], + + "tempy/is-stream": ["is-stream@3.0.0", "", {}, "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA=="], + + "tempy/type-fest": ["type-fest@2.19.0", "", {}, "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA=="], + + "tinyglobby/picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="], + + "wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], + + "@semantic-release/github/aggregate-error/clean-stack": ["clean-stack@5.3.0", "", { "dependencies": { "escape-string-regexp": "5.0.0" } }, "sha512-9ngPTOhYGQqNVSfeJkYXHmF7AGWp4/nN5D/QqNQs3Dvxd1Kk/WpjHfNujKHYUQ/5CoGyOyFNoWSPk5afzP0QVg=="], + + "@semantic-release/github/aggregate-error/indent-string": ["indent-string@5.0.0", "", {}, "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg=="], + + "@semantic-release/npm/aggregate-error/clean-stack": ["clean-stack@5.3.0", "", { "dependencies": { "escape-string-regexp": "5.0.0" } }, "sha512-9ngPTOhYGQqNVSfeJkYXHmF7AGWp4/nN5D/QqNQs3Dvxd1Kk/WpjHfNujKHYUQ/5CoGyOyFNoWSPk5afzP0QVg=="], + + "@semantic-release/npm/aggregate-error/indent-string": ["indent-string@5.0.0", "", {}, "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg=="], + + "@semantic-release/npm/execa/get-stream": ["get-stream@9.0.1", "", { "dependencies": { "@sec-ant/readable-stream": "^0.4.1", "is-stream": "^4.0.1" } }, "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA=="], + + "@semantic-release/npm/execa/human-signals": ["human-signals@8.0.1", "", {}, "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ=="], + + "@semantic-release/npm/execa/is-stream": ["is-stream@4.0.1", "", {}, "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A=="], + + "@semantic-release/npm/execa/npm-run-path": ["npm-run-path@6.0.0", "", { "dependencies": { "path-key": "^4.0.0", "unicorn-magic": "^0.3.0" } }, "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA=="], + + "@semantic-release/npm/execa/signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], + + "@semantic-release/npm/execa/strip-final-newline": ["strip-final-newline@4.0.0", "", {}, "sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw=="], + + "@semantic-release/release-notes-generator/read-package-up/read-pkg": ["read-pkg@9.0.1", "", { "dependencies": { "@types/normalize-package-data": "^2.4.3", "normalize-package-data": "^6.0.0", "parse-json": "^8.0.0", "type-fest": "^4.6.0", "unicorn-magic": "^0.1.0" } }, "sha512-9viLL4/n1BJUCT1NXVTdS1jtm80yDEgR5T4yCelII49Mbj0v1rZdKqj7zCiYdbB0CuCgdrvHcNogAKTFPBocFA=="], + + "@semantic-release/release-notes-generator/read-package-up/type-fest": ["type-fest@4.41.0", "", {}, "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA=="], + + "cli-highlight/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], + + "cli-highlight/yargs/cliui": ["cliui@7.0.4", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.0", "wrap-ansi": "^7.0.0" } }, "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ=="], + + "cli-highlight/yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], + + "cli-highlight/yargs/yargs-parser": ["yargs-parser@20.2.9", "", {}, "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w=="], + + "cli-table3/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + + "cli-table3/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "env-ci/execa/get-stream": ["get-stream@8.0.1", "", {}, "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA=="], + + "env-ci/execa/human-signals": ["human-signals@5.0.0", "", {}, "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ=="], + + "env-ci/execa/is-stream": ["is-stream@3.0.0", "", {}, "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA=="], + + "env-ci/execa/npm-run-path": ["npm-run-path@5.3.0", "", { "dependencies": { "path-key": "^4.0.0" } }, "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ=="], + + "env-ci/execa/onetime": ["onetime@6.0.0", "", { "dependencies": { "mimic-fn": "^4.0.0" } }, "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ=="], + + "env-ci/execa/signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], + + "env-ci/execa/strip-final-newline": ["strip-final-newline@3.0.0", "", {}, "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw=="], + + "npm/minipass-flush/minipass": ["minipass@3.3.6", "", { "dependencies": { "yallist": "^4.0.0" } }, "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw=="], + + "npm/minipass-pipeline/minipass": ["minipass@3.3.6", "", { "dependencies": { "yallist": "^4.0.0" } }, "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw=="], + + "npm/promise-retry/retry": ["retry@0.12.0", "", {}, "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow=="], + + "read-pkg/parse-json/type-fest": ["type-fest@4.41.0", "", {}, "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA=="], + + "semantic-release/aggregate-error/clean-stack": ["clean-stack@5.3.0", "", { "dependencies": { "escape-string-regexp": "5.0.0" } }, "sha512-9ngPTOhYGQqNVSfeJkYXHmF7AGWp4/nN5D/QqNQs3Dvxd1Kk/WpjHfNujKHYUQ/5CoGyOyFNoWSPk5afzP0QVg=="], + + "semantic-release/aggregate-error/indent-string": ["indent-string@5.0.0", "", {}, "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg=="], + + "semantic-release/execa/get-stream": ["get-stream@9.0.1", "", { "dependencies": { "@sec-ant/readable-stream": "^0.4.1", "is-stream": "^4.0.1" } }, "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA=="], + + "semantic-release/execa/human-signals": ["human-signals@8.0.1", "", {}, "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ=="], + + "semantic-release/execa/is-stream": ["is-stream@4.0.1", "", {}, "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A=="], + + "semantic-release/execa/npm-run-path": ["npm-run-path@6.0.0", "", { "dependencies": { "path-key": "^4.0.0", "unicorn-magic": "^0.3.0" } }, "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA=="], + + "semantic-release/execa/signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], + + "semantic-release/execa/strip-final-newline": ["strip-final-newline@4.0.0", "", {}, "sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw=="], + + "signale/chalk/escape-string-regexp": ["escape-string-regexp@1.0.5", "", {}, "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg=="], + + "signale/chalk/supports-color": ["supports-color@5.5.0", "", { "dependencies": { "has-flag": "^3.0.0" } }, "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow=="], + + "signale/figures/escape-string-regexp": ["escape-string-regexp@1.0.5", "", {}, "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg=="], + + "@semantic-release/npm/execa/npm-run-path/path-key": ["path-key@4.0.0", "", {}, "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ=="], + + "@semantic-release/npm/execa/npm-run-path/unicorn-magic": ["unicorn-magic@0.3.0", "", {}, "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA=="], + + "@semantic-release/release-notes-generator/read-package-up/read-pkg/normalize-package-data": ["normalize-package-data@6.0.2", "", { "dependencies": { "hosted-git-info": "^7.0.0", "semver": "^7.3.5", "validate-npm-package-license": "^3.0.4" } }, "sha512-V6gygoYb/5EmNI+MEGrWkC+e6+Rr7mTmfHrxDbLzxQogBkgzo76rkok0Am6thgSF7Mv2nLOajAJj5vDJZEFn7g=="], + + "@semantic-release/release-notes-generator/read-package-up/read-pkg/parse-json": ["parse-json@8.3.0", "", { "dependencies": { "@babel/code-frame": "^7.26.2", "index-to-position": "^1.1.0", "type-fest": "^4.39.1" } }, "sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ=="], + + "@semantic-release/release-notes-generator/read-package-up/read-pkg/unicorn-magic": ["unicorn-magic@0.1.0", "", {}, "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ=="], + + "cli-highlight/chalk/ansi-styles/color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], + + "cli-highlight/yargs/cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "cli-highlight/yargs/cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="], + + "cli-highlight/yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + + "cli-highlight/yargs/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "cli-table3/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "env-ci/execa/npm-run-path/path-key": ["path-key@4.0.0", "", {}, "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ=="], + + "env-ci/execa/onetime/mimic-fn": ["mimic-fn@4.0.0", "", {}, "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw=="], + + "npm/minipass-flush/minipass/yallist": ["yallist@4.0.0", "", {}, "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="], + + "npm/minipass-pipeline/minipass/yallist": ["yallist@4.0.0", "", {}, "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="], + + "semantic-release/execa/npm-run-path/path-key": ["path-key@4.0.0", "", {}, "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ=="], + + "semantic-release/execa/npm-run-path/unicorn-magic": ["unicorn-magic@0.3.0", "", {}, "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA=="], + + "signale/chalk/supports-color/has-flag": ["has-flag@3.0.0", "", {}, "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw=="], + + "@semantic-release/release-notes-generator/read-package-up/read-pkg/normalize-package-data/hosted-git-info": ["hosted-git-info@7.0.2", "", { "dependencies": { "lru-cache": "^10.0.1" } }, "sha512-puUZAUKT5m8Zzvs72XWy3HtvVbTWljRE66cP60bxJzAqf2DgICo7lYTY2IHUmLnNpjYvw5bvmoHvPc0QO2a62w=="], + + "cli-highlight/chalk/ansi-styles/color-convert/color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], + + "cli-highlight/yargs/cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "cli-highlight/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], + + "cli-highlight/yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "@semantic-release/release-notes-generator/read-package-up/read-pkg/normalize-package-data/hosted-git-info/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], + + "cli-highlight/yargs/cliui/wrap-ansi/ansi-styles/color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], + + "cli-highlight/yargs/cliui/wrap-ansi/ansi-styles/color-convert/color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], } } diff --git a/docs/brainstorms/2026-02-14-copilot-converter-target-brainstorm.md b/docs/brainstorms/2026-02-14-copilot-converter-target-brainstorm.md new file mode 100644 index 0000000..9bdec41 --- /dev/null +++ b/docs/brainstorms/2026-02-14-copilot-converter-target-brainstorm.md @@ -0,0 +1,117 @@ +--- +date: 2026-02-14 +topic: copilot-converter-target +--- + +# Add GitHub Copilot Converter Target + +## What We're Building + +A new converter target that transforms the compound-engineering Claude Code plugin into GitHub Copilot's native format. This follows the same established pattern as the existing converters (Cursor, Codex, OpenCode, Droid, Pi) and outputs files that Copilot can consume directly from `.github/` (repo-level) or `~/.copilot/` (user-wide). + +Copilot's customization system (as of early 2026) supports: custom agents (`.agent.md`), agent skills (`SKILL.md`), prompt files (`.prompt.md`), custom instructions (`copilot-instructions.md`), and MCP servers (via repo settings). + +## Why This Approach + +The repository already has a robust multi-target converter infrastructure with a consistent `TargetHandler` pattern. Adding Copilot as a new target follows this proven pattern rather than inventing something new. Copilot's format is close enough to Claude Code's that the conversion is straightforward, and the SKILL.md format is already cross-compatible. + +### Approaches Considered + +1. **Full converter target (chosen)** — Follow the existing pattern with types, converter, writer, and target registration. Most consistent with codebase conventions. +2. **Minimal agent-only converter** — Only convert agents, skip commands/skills. Too limited; users would lose most of the plugin's value. +3. **Documentation-only approach** — Just document how to manually set up Copilot. Doesn't compound — every user would repeat the work. + +## Key Decisions + +### Component Mapping + +| Claude Code Component | Copilot Equivalent | Notes | +|----------------------|-------------------|-------| +| **Agents** (`.md`) | **Custom Agents** (`.agent.md`) | Full frontmatter mapping: description, tools, target, infer | +| **Commands** (`.md`) | **Agent Skills** (`SKILL.md`) | Commands become skills since Copilot has no direct command equivalent. `allowed-tools` dropped silently. | +| **Skills** (`SKILL.md`) | **Agent Skills** (`SKILL.md`) | Copy as-is — format is already cross-compatible | +| **MCP Servers** | **Repo settings JSON** | Generate a `copilot-mcp-config.json` users paste into GitHub repo settings | +| **Hooks** | **Skipped with warning** | Copilot doesn't have a hooks equivalent | + +### Agent Frontmatter Mapping + +| Claude Field | Copilot Field | Mapping | +|-------------|--------------|---------| +| `name` | `name` | Direct pass-through | +| `description` | `description` (required) | Direct pass-through, generate fallback if missing | +| `capabilities` | Body text | Fold into body as "## Capabilities" section (like Cursor) | +| `model` | `model` | Pass through (works in IDE, may be ignored on github.com) | +| — | `tools` | Default to `["*"]` (all tools). Claude agents have unrestricted tool access, so Copilot agents should too. | +| — | `target` | Omit (defaults to `both` — IDE + github.com) | +| — | `infer` | Set to `true` (auto-selection enabled) | + +### Output Directories + +- **Repository-level (default):** `.github/agents/`, `.github/skills/` +- **User-wide (with --personal flag):** `~/.copilot/skills/` (only skills supported at this level) + +### Content Transformation + +Apply transformations similar to Cursor converter: + +1. **Task agent calls:** `Task agent-name(args)` → `Use the agent-name skill to: args` +2. **Slash commands:** `/workflows:plan` → `/plan` (flatten namespace) +3. **Path rewriting:** `.claude/` → `.github/` (Copilot's repo-level config path) +4. **Agent references:** `@agent-name` → `the agent-name agent` + +### MCP Server Handling + +Generate a `copilot-mcp-config.json` file with the structure Copilot expects: + +```json +{ + "mcpServers": { + "server-name": { + "type": "local", + "command": "npx", + "args": ["package"], + "tools": ["*"], + "env": { + "KEY": "COPILOT_MCP_KEY" + } + } + } +} +``` + +Note: Copilot requires env vars to use the `COPILOT_MCP_` prefix. The converter should transform env var names accordingly and include a comment/note about this. + +## Files to Create/Modify + +### New Files + +- `src/types/copilot.ts` — Type definitions (CopilotAgent, CopilotSkill, CopilotBundle, etc.) +- `src/converters/claude-to-copilot.ts` — Converter with `transformContentForCopilot()` +- `src/targets/copilot.ts` — Writer with `writeCopilotBundle()` +- `docs/specs/copilot.md` — Format specification document + +### Modified Files + +- `src/targets/index.ts` — Register copilot target handler +- `src/commands/sync.ts` — Add "copilot" to valid sync targets + +### Test Files + +- `tests/copilot-converter.test.ts` — Converter tests following existing patterns + +### Character Limit + +Copilot imposes a 30,000 character limit on agent body content. If an agent body exceeds this after folding in capabilities, the converter should truncate with a warning to stderr. + +### Agent File Extension + +Use `.agent.md` (not plain `.md`). This is the canonical Copilot convention and makes agent files immediately identifiable. + +## Open Questions + +- Should the converter generate a `copilot-setup-steps.yml` workflow file for MCP servers that need special dependencies (e.g., `uv`, `pipx`)? +- Should `.github/copilot-instructions.md` be generated with any base instructions from the plugin? + +## Next Steps + +→ `/workflows:plan` for implementation details diff --git a/docs/brainstorms/2026-02-17-copilot-skill-naming-brainstorm.md b/docs/brainstorms/2026-02-17-copilot-skill-naming-brainstorm.md new file mode 100644 index 0000000..c04e97d --- /dev/null +++ b/docs/brainstorms/2026-02-17-copilot-skill-naming-brainstorm.md @@ -0,0 +1,30 @@ +--- +date: 2026-02-17 +topic: copilot-skill-naming +--- + +# Copilot Skill Naming: Preserve Namespace + +## What We're Building + +Change the Copilot converter to preserve command namespaces when converting commands to skills. Currently `workflows:plan` flattens to `plan`, which is too generic and clashes with Copilot's own features in the chat suggestion UI. + +## Why This Approach + +The `flattenCommandName` function strips everything before the last colon, producing names like `plan`, `review`, `work` that are too generic for Copilot's skill discovery UI. Replacing colons with hyphens (`workflows:plan` -> `workflows-plan`) preserves context while staying within valid filename characters. + +## Key Decisions + +- **Replace colons with hyphens** instead of stripping the prefix: `workflows:plan` -> `workflows-plan` +- **Copilot only** — other converters (Cursor, Droid, etc.) keep their current flattening behavior +- **Content transformation too** — slash command references in body text also use hyphens: `/workflows:plan` -> `/workflows-plan` + +## Changes Required + +1. `src/converters/claude-to-copilot.ts` — change `flattenCommandName` to replace colons with hyphens +2. `src/converters/claude-to-copilot.ts` — update `transformContentForCopilot` slash command rewriting +3. `tests/copilot-converter.test.ts` — update affected tests + +## Next Steps + +-> Implement directly (small, well-scoped change) diff --git a/docs/css/docs.css b/docs/css/docs.css deleted file mode 100644 index 2e89ca5..0000000 --- a/docs/css/docs.css +++ /dev/null @@ -1,675 +0,0 @@ -/* Documentation-specific styles */ - -/* ============================================ - Documentation Layout - ============================================ */ - -.docs-layout { - display: grid; - grid-template-columns: 1fr; - min-height: 100vh; -} - -@media (min-width: 1024px) { - .docs-layout { - grid-template-columns: 280px 1fr; - } -} - -/* ============================================ - Sidebar - ============================================ */ - -.docs-sidebar { - position: fixed; - top: 0; - left: -300px; - width: 280px; - height: 100vh; - background-color: var(--color-background); - border-right: 1px solid var(--color-border); - overflow-y: auto; - transition: left 0.3s ease; - z-index: 100; -} - -.docs-sidebar.open { - left: 0; -} - -@media (min-width: 1024px) { - .docs-sidebar { - position: sticky; - left: 0; - } -} - -.sidebar-header { - padding: var(--space-l); - border-bottom: 1px solid var(--color-border); -} - -.sidebar-header .nav-brand { - display: flex; - align-items: center; - gap: var(--space-s); - text-decoration: none; - color: var(--color-text-primary); - font-weight: 600; -} - -.sidebar-header .logo-icon { - color: var(--color-accent); - font-size: var(--font-size-l); -} - -.sidebar-header .logo-text { - display: inline; -} - -.sidebar-nav { - padding: var(--space-l); -} - -.nav-section { - margin-bottom: var(--space-xl); -} - -.nav-section h3 { - font-size: var(--font-size-xs); - font-weight: 600; - text-transform: uppercase; - letter-spacing: 0.05em; - color: var(--color-text-tertiary); - margin: 0 0 var(--space-m) 0; -} - -.nav-section ul { - list-style: none; - margin: 0; - padding: 0; -} - -.nav-section li { - margin: 0; -} - -.nav-section a { - display: block; - padding: var(--space-s) var(--space-m); - color: var(--color-text-secondary); - text-decoration: none; - font-size: var(--font-size-s); - border-radius: var(--radius-s); - transition: all 0.2s ease; -} - -.nav-section a:hover { - color: var(--color-text-primary); - background-color: var(--color-surface); -} - -.nav-section a.active { - color: var(--color-accent); - background-color: var(--color-accent-light); -} - -/* ============================================ - Main Content - ============================================ */ - -.docs-content { - padding: var(--space-xl); - max-width: 900px; -} - -@media (min-width: 1024px) { - .docs-content { - padding: var(--space-xxl); - } -} - -.docs-header { - display: flex; - align-items: center; - justify-content: space-between; - margin-bottom: var(--space-xl); -} - -.breadcrumb { - display: flex; - align-items: center; - gap: var(--space-s); - font-size: var(--font-size-s); - color: var(--color-text-tertiary); -} - -.breadcrumb a { - color: var(--color-text-secondary); - text-decoration: none; -} - -.breadcrumb a:hover { - color: var(--color-accent); -} - -.mobile-menu-toggle { - display: flex; - align-items: center; - justify-content: center; - width: 40px; - height: 40px; - background: none; - border: 1px solid var(--color-border); - border-radius: var(--radius-s); - color: var(--color-text-secondary); - cursor: pointer; -} - -@media (min-width: 1024px) { - .mobile-menu-toggle { - display: none; - } -} - -/* ============================================ - Article Styles - ============================================ */ - -.docs-article { - line-height: 1.7; -} - -.docs-article h1 { - font-size: var(--font-size-xl); - margin-bottom: var(--space-l); -} - -.docs-article h2 { - font-size: var(--font-size-l); - margin-top: var(--space-xxl); - margin-bottom: var(--space-l); - padding-bottom: var(--space-s); - border-bottom: 1px solid var(--color-border); - display: flex; - align-items: center; - gap: var(--space-s); -} - -.docs-article h2 i { - color: var(--color-accent); -} - -.docs-article h3 { - font-size: var(--font-size-m); - margin-top: var(--space-xl); - margin-bottom: var(--space-m); -} - -.docs-article h4 { - font-size: var(--font-size-s); - margin-top: var(--space-l); - margin-bottom: var(--space-s); -} - -.docs-article p { - margin-bottom: var(--space-l); -} - -.docs-article .lead { - font-size: var(--font-size-l); - color: var(--color-text-secondary); - margin-bottom: var(--space-xl); -} - -.docs-article ul, -.docs-article ol { - margin-bottom: var(--space-l); - padding-left: var(--space-xl); -} - -.docs-article li { - margin-bottom: var(--space-s); -} - -/* ============================================ - Code Blocks in Docs - ============================================ */ - -.docs-article .card-code-block { - margin: var(--space-l) 0; -} - -.docs-article code { - font-family: var(--font-mono); - font-size: 0.9em; - background-color: var(--color-surface); - padding: 2px 6px; - border-radius: var(--radius-xs); - color: var(--color-accent); -} - -.docs-article pre code { - background: none; - padding: 0; - color: var(--color-code-text); -} - -/* ============================================ - Tables - ============================================ */ - -.docs-table { - width: 100%; - border-collapse: collapse; - margin: var(--space-l) 0; - font-size: var(--font-size-s); -} - -.docs-table th, -.docs-table td { - padding: var(--space-m); - text-align: left; - border-bottom: 1px solid var(--color-border); -} - -.docs-table th { - font-weight: 600; - color: var(--color-text-primary); - background-color: var(--color-surface); -} - -.docs-table td { - color: var(--color-text-secondary); -} - -.docs-table code { - font-size: 0.85em; -} - -/* ============================================ - Callouts - ============================================ */ - -.callout { - display: flex; - gap: var(--space-m); - padding: var(--space-l); - border-radius: var(--radius-m); - margin: var(--space-l) 0; -} - -.callout-icon { - font-size: var(--font-size-l); - flex-shrink: 0; -} - -.callout-content h4 { - margin: 0 0 var(--space-s) 0; - font-size: var(--font-size-s); -} - -.callout-content p { - margin: 0; - font-size: var(--font-size-s); -} - -.callout-info { - background-color: rgba(99, 102, 241, 0.1); - border: 1px solid rgba(99, 102, 241, 0.2); -} - -.callout-info .callout-icon { - color: var(--color-accent); -} - -.callout-info .callout-content h4 { - color: var(--color-accent); -} - -.callout-tip { - background-color: rgba(16, 185, 129, 0.1); - border: 1px solid rgba(16, 185, 129, 0.2); -} - -.callout-tip .callout-icon { - color: var(--color-success); -} - -.callout-tip .callout-content h4 { - color: var(--color-success); -} - -.callout-warning { - background-color: rgba(245, 158, 11, 0.1); - border: 1px solid rgba(245, 158, 11, 0.2); -} - -.callout-warning .callout-icon { - color: var(--color-warning); -} - -.callout-warning .callout-content h4 { - color: var(--color-warning); -} - -/* ============================================ - Badges - ============================================ */ - -.badge { - display: inline-block; - padding: 2px 8px; - font-size: var(--font-size-xs); - font-weight: 600; - border-radius: var(--radius-s); - text-transform: uppercase; - letter-spacing: 0.03em; -} - -.badge-critical { - background-color: rgba(239, 68, 68, 0.15); - color: var(--color-error); -} - -.badge-important { - background-color: rgba(245, 158, 11, 0.15); - color: var(--color-warning); -} - -.badge-nice { - background-color: rgba(99, 102, 241, 0.15); - color: var(--color-accent); -} - -/* ============================================ - Philosophy Grid - ============================================ */ - -.philosophy-grid { - display: grid; - grid-template-columns: repeat(1, 1fr); - gap: var(--space-l); - margin: var(--space-xl) 0; -} - -@media (min-width: 640px) { - .philosophy-grid { - grid-template-columns: repeat(2, 1fr); - } -} - -.philosophy-card { - padding: var(--space-xl); - background-color: var(--color-surface); - border-radius: var(--radius-m); - border: 1px solid var(--color-border); -} - -.philosophy-icon { - font-size: var(--font-size-xl); - color: var(--color-accent); - margin-bottom: var(--space-m); -} - -.philosophy-card h4 { - margin: 0 0 var(--space-s) 0; - color: var(--color-text-primary); -} - -.philosophy-card p { - margin: 0; - font-size: var(--font-size-s); - color: var(--color-text-secondary); -} - -/* ============================================ - Blockquotes - ============================================ */ - -.highlight-quote { - font-size: var(--font-size-l); - font-style: italic; - color: var(--color-accent); - padding: var(--space-xl); - margin: var(--space-xl) 0; - background: linear-gradient(135deg, var(--color-accent-lighter), transparent); - border-left: 4px solid var(--color-accent); - border-radius: var(--radius-m); -} - -/* ============================================ - Navigation Footer - ============================================ */ - -.docs-nav-footer { - display: flex; - justify-content: space-between; - gap: var(--space-l); - margin-top: var(--space-xxl); - padding-top: var(--space-xl); - border-top: 1px solid var(--color-border); -} - -.nav-prev, -.nav-next { - display: flex; - flex-direction: column; - gap: var(--space-xs); - padding: var(--space-l); - background-color: var(--color-surface); - border-radius: var(--radius-m); - text-decoration: none; - transition: all 0.2s ease; - flex: 1; - max-width: 300px; -} - -.nav-prev:hover, -.nav-next:hover { - background-color: var(--color-surface-hover); - border-color: var(--color-accent); -} - -.nav-next { - text-align: right; - margin-left: auto; -} - -.nav-label { - font-size: var(--font-size-xs); - color: var(--color-text-tertiary); - text-transform: uppercase; - letter-spacing: 0.05em; -} - -.nav-title { - font-weight: 600; - color: var(--color-accent); - display: flex; - align-items: center; - gap: var(--space-s); -} - -.nav-next .nav-title { - justify-content: flex-end; -} - -/* ============================================ - Mobile Sidebar Overlay - ============================================ */ - -@media (max-width: 1023px) { - .docs-sidebar.open::before { - content: ''; - position: fixed; - top: 0; - left: 0; - right: 0; - bottom: 0; - background-color: rgba(0, 0, 0, 0.5); - z-index: -1; - } -} - -/* ============================================ - Changelog Styles - ============================================ */ - -.version-section { - margin-bottom: var(--space-xxl); - padding-bottom: var(--space-xl); - border-bottom: 1px solid var(--color-border); -} - -.version-section:last-child { - border-bottom: none; -} - -.version-header { - display: flex; - align-items: center; - gap: var(--space-m); - margin-bottom: var(--space-l); - flex-wrap: wrap; -} - -.version-header h2 { - margin: 0; - padding: 0; - border: none; - font-size: var(--font-size-xl); - color: var(--color-text-primary); -} - -.version-date { - font-size: var(--font-size-s); - color: var(--color-text-tertiary); - background-color: var(--color-surface); - padding: var(--space-xs) var(--space-m); - border-radius: var(--radius-s); -} - -.version-badge { - font-size: var(--font-size-xs); - font-weight: 600; - padding: var(--space-xs) var(--space-m); - border-radius: var(--radius-s); - background-color: var(--color-accent); - color: white; -} - -.version-badge.major { - background-color: var(--color-warning); -} - -.version-description { - font-size: var(--font-size-m); - color: var(--color-text-secondary); - margin-bottom: var(--space-l); - font-style: italic; -} - -.changelog-category { - margin-bottom: var(--space-l); - padding: var(--space-l); - background-color: var(--color-surface); - border-radius: var(--radius-m); - border-left: 4px solid var(--color-border); -} - -.changelog-category h3 { - margin: 0 0 var(--space-m) 0; - font-size: var(--font-size-m); - display: flex; - align-items: center; - gap: var(--space-s); -} - -.changelog-category h3 i { - font-size: var(--font-size-s); -} - -.changelog-category h4 { - margin: var(--space-l) 0 var(--space-s) 0; - font-size: var(--font-size-s); - color: var(--color-text-secondary); -} - -.changelog-category ul { - margin: 0; - padding-left: var(--space-xl); -} - -.changelog-category li { - margin-bottom: var(--space-s); -} - -.changelog-category.added { - border-left-color: var(--color-success); -} - -.changelog-category.added h3 { - color: var(--color-success); -} - -.changelog-category.improved { - border-left-color: var(--color-accent); -} - -.changelog-category.improved h3 { - color: var(--color-accent); -} - -.changelog-category.changed { - border-left-color: var(--color-warning); -} - -.changelog-category.changed h3 { - color: var(--color-warning); -} - -.changelog-category.fixed { - border-left-color: var(--color-error); -} - -.changelog-category.fixed h3 { - color: var(--color-error); -} - -.version-summary { - margin-top: var(--space-l); -} - -.version-summary h4 { - margin-bottom: var(--space-m); -} - -.version-summary table { - width: 100%; - max-width: 400px; - border-collapse: collapse; - font-size: var(--font-size-s); -} - -.version-summary th, -.version-summary td { - padding: var(--space-s) var(--space-m); - text-align: left; - border-bottom: 1px solid var(--color-border); -} - -.version-summary th { - font-weight: 600; - background-color: var(--color-surface); -} - -.version-summary .positive { - color: var(--color-success); - font-weight: 600; -} - -.version-summary .negative { - color: var(--color-error); - font-weight: 600; -} diff --git a/docs/css/style.css b/docs/css/style.css deleted file mode 100644 index 3bc9074..0000000 --- a/docs/css/style.css +++ /dev/null @@ -1,2886 +0,0 @@ -/* Compounding Engineering Documentation Styles */ -/* Based on LaunchKit template by Evil Martians */ - -/* ============================================ - CSS Variables & Theme Configuration - ============================================ */ - -:root { - /* Theme configuration */ - --theme-hue: 243; - --theme-saturation: 1; - --theme-contrast: 0.71; - - /* Fonts */ - --font-text: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, sans-serif; - --font-mono: "JetBrains Mono", "Fira Code", "SF Mono", Consolas, monospace; - - /* Font sizes */ - --font-size-xs: 12px; - --font-size-s: 14px; - --font-size-m: 16px; - --font-size-l: 20px; - --font-size-xl: 32px; - --font-size-xxl: 48px; - - /* Line heights */ - --line-height-paragraph-s: 20px; - --line-height-paragraph-m: 24px; - --line-height-paragraph-l: 28px; - --line-height-h1: 56px; - --line-height-h2: 40px; - --line-height-h3: 28px; - --line-height-ui-s: 16px; - --line-height-ui-m: 22px; - - /* Spacing */ - --space-xs: 4px; - --space-s: 8px; - --space-m: 12px; - --space-l: 16px; - --space-xl: 24px; - --space-xxl: 48px; - --space-section: 80px; - --space-card: 20px; - - /* Border radii */ - --radius-xs: 4px; - --radius-s: 8px; - --radius-m: 12px; - --radius-l: 16px; - --radius-xl: 24px; - - /* UI */ - --header-font-weight: 600; - --ui-button-font-weight: 500; -} - -/* Light Theme */ -.theme-light { - --color-background: #ffffff; - --color-background-blur: rgba(255, 255, 255, 0.9); - --color-surface: rgba(0, 0, 0, 0.03); - --color-surface-hover: rgba(0, 0, 0, 0.06); - --color-text-primary: #1a1a2e; - --color-text-secondary: #64748b; - --color-text-tertiary: #94a3b8; - --color-accent: #6366f1; - --color-accent-hover: #4f46e5; - --color-accent-light: rgba(99, 102, 241, 0.1); - --color-accent-lighter: rgba(99, 102, 241, 0.05); - --color-on-accent: #ffffff; - --color-border: rgba(0, 0, 0, 0.08); - --color-border-strong: rgba(0, 0, 0, 0.15); - --color-gradient-start: rgba(99, 102, 241, 0.15); - --color-gradient-end: rgba(99, 102, 241, 0); - --color-promo-start: #a855f7; - --color-promo-end: #6366f1; - --color-success: #10b981; - --color-warning: #f59e0b; - --color-error: #ef4444; - --color-code-bg: #1e1e2e; - --color-code-text: #cdd6f4; -} - -/* Dark Theme */ -.theme-dark { - color-scheme: dark; - --color-background: #0f0f1a; - --color-background-blur: rgba(15, 15, 26, 0.9); - --color-surface: rgba(255, 255, 255, 0.04); - --color-surface-hover: rgba(255, 255, 255, 0.08); - --color-text-primary: #f1f5f9; - --color-text-secondary: #94a3b8; - --color-text-tertiary: #64748b; - --color-accent: #818cf8; - --color-accent-hover: #a5b4fc; - --color-accent-light: rgba(129, 140, 248, 0.15); - --color-accent-lighter: rgba(129, 140, 248, 0.08); - --color-on-accent: #0f0f1a; - --color-border: rgba(255, 255, 255, 0.08); - --color-border-strong: rgba(255, 255, 255, 0.15); - --color-gradient-start: rgba(129, 140, 248, 0.2); - --color-gradient-end: rgba(129, 140, 248, 0); - --color-promo-start: #c084fc; - --color-promo-end: #818cf8; - --color-success: #34d399; - --color-warning: #fbbf24; - --color-error: #f87171; - --color-code-bg: #1e1e2e; - --color-code-text: #cdd6f4; -} - -/* ============================================ - Base Styles - ============================================ */ - -*, *::before, *::after { - box-sizing: border-box; -} - -html, body { - margin: 0; - padding: 0; -} - -body { - background-color: var(--color-background); - font-family: var(--font-text); - color: var(--color-text-primary); - font-size: var(--font-size-m); - line-height: var(--line-height-paragraph-m); - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; -} - -/* ============================================ - Typography - ============================================ */ - -h1, h2, h3, h4, h5, h6 { - font-weight: var(--header-font-weight); - margin: var(--space-m) 0; - letter-spacing: -0.02em; -} - -h1 { - font-size: 64px; - line-height: 1.1; - letter-spacing: -0.045em; - font-weight: 750; - background: linear-gradient(135deg, var(--color-text-primary), var(--color-text-secondary)); - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - background-clip: text; -} - -h2 { - font-size: var(--font-size-xl); - line-height: var(--line-height-h2); -} - -h3 { - font-size: var(--font-size-l); - line-height: var(--line-height-h3); -} - -p { - margin: var(--space-m) 0; -} - -a { - color: var(--color-accent); - text-decoration: none; - transition: color 0.2s ease; -} - -a:hover { - color: var(--color-accent-hover); -} - -ul, ol { - margin: var(--space-l) 0; - padding-left: 24px; -} - -li { - margin: var(--space-s) 0; -} - -code { - font-family: var(--font-mono); - font-size: 0.9em; - background-color: var(--color-surface); - padding: 2px 6px; - border-radius: var(--radius-xs); - color: var(--color-accent); -} - -img { - max-width: 100%; - vertical-align: middle; -} - -/* Text utilities */ -.paragraph { - margin: var(--space-m) 0; -} -.paragraph.s { font-size: var(--font-size-s); line-height: var(--line-height-paragraph-s); } -.paragraph.m { font-size: 19px; line-height: 30px; opacity: 0.9; } -.paragraph.l { font-size: var(--font-size-l); line-height: var(--line-height-paragraph-l); } -.paragraph.bold { font-weight: 600; } - -.secondary { color: var(--color-text-secondary); } -.tertiary { color: var(--color-text-tertiary); } -.color-accent { color: var(--color-accent); } - -.no-top-margin { margin-top: 0; } -.balanced { text-wrap: balance; } - -/* ============================================ - Layout - ============================================ */ - -.page-container { - max-width: 1200px; - min-height: 100vh; - margin: 0 auto; - padding: 0 var(--space-xl); - display: flex; - flex-direction: column; -} - -section { - padding: var(--space-section) 0; -} - -.background-gradient { - position: fixed; - top: 0; - left: 0; - right: 0; - height: 100vh; - background: linear-gradient(180deg, var(--color-gradient-start) 0%, var(--color-gradient-end) 50%); - z-index: -10; - pointer-events: none; -} - -/* ============================================ - Navigation - ============================================ */ - -.nav-container { - position: sticky; - top: var(--space-l); - margin-top: var(--space-l); - display: flex; - justify-content: space-between; - align-items: center; - background-color: var(--color-background-blur); - backdrop-filter: blur(20px); - -webkit-backdrop-filter: blur(20px); - border: 1px solid var(--color-border); - border-radius: var(--radius-l); - padding: var(--space-m) var(--space-l); - z-index: 1000; -} - -.nav-brand { - display: flex; - align-items: center; - gap: var(--space-s); - text-decoration: none; - color: var(--color-text-primary); - font-weight: 600; - font-size: var(--font-size-m); -} - -.logo-icon { - color: var(--color-accent); - font-size: var(--font-size-l); -} - -.logo-text { - display: none; -} - -@media (min-width: 768px) { - .logo-text { - display: inline; - } -} - -.nav-menu { - display: none; - gap: var(--space-xs); -} - -@media (min-width: 1024px) { - .nav-menu { - display: flex; - } -} - -.nav-link { - color: var(--color-text-secondary); - font-size: var(--font-size-s); - font-weight: var(--ui-button-font-weight); - padding: var(--space-s) var(--space-m); - border-radius: var(--radius-s); - text-decoration: none; - transition: all 0.2s ease; -} - -.nav-link:hover { - color: var(--color-text-primary); - background-color: var(--color-surface); -} - -.nav-hamburger { - display: flex; -} - -@media (min-width: 1024px) { - .nav-hamburger { - display: none; - } -} - -/* Mobile nav */ -.nav-menu.open { - display: flex; - flex-direction: column; - position: absolute; - top: calc(100% + var(--space-s)); - left: 0; - right: 0; - background-color: var(--color-background); - border: 1px solid var(--color-border); - border-radius: var(--radius-l); - padding: var(--space-l); -} - -/* ============================================ - Buttons - ============================================ */ - -.button { - display: inline-flex; - align-items: center; - justify-content: center; - gap: var(--space-s); - padding: 14px 28px; - font-size: var(--font-size-m); - font-weight: 600; - font-family: inherit; - text-decoration: none; - border: none; - border-radius: var(--radius-m); - cursor: pointer; - transition: all 0.25s cubic-bezier(0.4, 0, 0.2, 1); -} - -.button.compact { - padding: var(--space-s) var(--space-m); - font-size: var(--font-size-s); - border-radius: var(--radius-s); -} - -.button.primary { - background: linear-gradient(135deg, var(--color-accent), var(--color-accent-hover)); - color: var(--color-on-accent); - box-shadow: 0 4px 14px rgba(99, 102, 241, 0.3), 0 2px 4px rgba(0, 0, 0, 0.1); - position: relative; - overflow: hidden; -} - -.button.primary:hover { - background: linear-gradient(135deg, var(--color-accent-hover), var(--color-accent)); - transform: translateY(-2px); - box-shadow: 0 6px 16px rgba(99, 102, 241, 0.35); -} - -.button.secondary { - background-color: var(--color-accent-light); - color: var(--color-accent); -} - -.button.secondary:hover { - background-color: var(--color-accent-lighter); -} - -.button.tertiary { - background-color: transparent; - color: var(--color-text-primary); - border: 1.5px solid var(--color-border-strong); -} - -.button.tertiary:hover { - background-color: var(--color-surface); - border-color: var(--color-accent); - color: var(--color-accent); - transform: translateY(-1px); -} - -.button.ghost { - background-color: transparent; - color: var(--color-text-secondary); -} - -.button.ghost:hover { - background-color: var(--color-surface); - color: var(--color-text-primary); -} - -.button-group { - display: flex; - flex-wrap: wrap; - gap: var(--space-m); - align-items: center; -} - -.button-group.centered { - justify-content: center; -} - -.button-group.stacked { - flex-direction: column; -} - -.button-group.margin-paragraph { - margin: var(--space-l) 0; -} - -/* ============================================ - Headings & Sections - ============================================ */ - -.heading { - max-width: 720px; - margin-bottom: var(--space-xl); -} - -.heading.centered { - text-align: center; - margin-left: auto; - margin-right: auto; -} - -.heading.hero { - padding: 64px 0 80px 0; -} - -.eyebrow { - display: inline-flex; - align-items: center; - gap: 10px; - padding: 8px 20px; - background: linear-gradient(135deg, var(--color-accent-light), var(--color-accent-lighter)); - color: var(--color-accent); - font-size: 14px; - font-weight: 600; - border-radius: 100px; - margin-bottom: 28px; - text-decoration: none; - border: 1px solid rgba(129, 140, 248, 0.2); - transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); - box-shadow: 0 2px 8px rgba(129, 140, 248, 0.15); -} - -.eyebrow:hover { - background: linear-gradient(135deg, var(--color-accent), var(--color-accent-light)); - transform: translateY(-2px); - box-shadow: 0 4px 16px rgba(129, 140, 248, 0.25); - border-color: var(--color-accent); -} - -/* Hero Section Enhancements */ -.hero-section { - position: relative; - overflow: hidden; -} - -.hero-decoration { - position: absolute; - top: -200px; - left: 50%; - transform: translateX(-50%); - width: 800px; - height: 800px; - background: radial-gradient(circle, var(--color-accent-lighter) 0%, transparent 70%); - opacity: 0.15; - pointer-events: none; - z-index: -1; -} - -/* ============================================ - Stats Section - ============================================ */ - -.stats-section { - padding: 64px 0; - position: relative; -} - -.stats-section::before { - content: ''; - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - width: 600px; - height: 600px; - background: radial-gradient(circle, var(--color-accent-lighter) 0%, transparent 70%); - opacity: 0.1; - pointer-events: none; - z-index: -1; -} - -.stats-container { - display: grid; - grid-template-columns: repeat(2, 1fr); - gap: 20px; - max-width: 1000px; - margin: 0 auto; -} - -@media (min-width: 768px) { - .stats-container { - grid-template-columns: repeat(4, 1fr); - gap: 24px; - } -} - -.stat-card { - text-align: center; - padding: 36px 24px; - background: var(--color-background); - border-radius: var(--radius-l); - border: 1.5px solid transparent; - background-image: - linear-gradient(var(--color-background), var(--color-background)), - linear-gradient(135deg, var(--color-accent-light), var(--color-border)); - background-origin: border-box; - background-clip: padding-box, border-box; - position: relative; - overflow: hidden; - transition: all 0.35s cubic-bezier(0.4, 0, 0.2, 1); - cursor: default; - box-shadow: 0 4px 12px rgba(0, 0, 0, 0.05); -} - -.stat-card::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - height: 3px; - background: linear-gradient(90deg, transparent, var(--color-accent), transparent); - opacity: 0; - transition: opacity 0.35s ease; -} - -.stat-card:hover { - transform: translateY(-2px); - border-color: var(--color-accent); - box-shadow: 0 8px 24px rgba(129, 140, 248, 0.2); -} - -.stat-card:hover::before { - opacity: 1; -} - -.stat-card:hover .stat-icon { - transform: scale(1.15) translateY(-2px); - background: linear-gradient(135deg, var(--color-accent), var(--color-accent-hover)); - color: var(--color-on-accent); - box-shadow: 0 8px 16px rgba(129, 140, 248, 0.3); -} - -.stat-card:hover .stat-number { - transform: scale(1.08); - filter: brightness(1.1); -} - -.stat-icon { - width: 56px; - height: 56px; - margin: 0 auto 20px; - display: flex; - align-items: center; - justify-content: center; - background: linear-gradient(135deg, var(--color-accent-light), var(--color-accent-lighter)); - border-radius: 14px; - font-size: 24px; - color: var(--color-accent); - transition: all 0.35s cubic-bezier(0.4, 0, 0.2, 1); - box-shadow: 0 4px 12px rgba(129, 140, 248, 0.15); -} - -.stat-number { - font-size: 64px; - font-weight: 800; - background: linear-gradient(135deg, var(--color-accent), var(--color-accent-hover)); - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - background-clip: text; - line-height: 1; - margin-bottom: 8px; - letter-spacing: -0.03em; - transition: all 0.35s cubic-bezier(0.4, 0, 0.2, 1); - font-variant-numeric: tabular-nums; -} - -.stat-label { - font-size: 14px; - color: var(--color-text-secondary); - font-weight: 600; - letter-spacing: 0.02em; - line-height: 1.5; - text-transform: capitalize; -} - -/* ============================================ - Cards with Icons - ============================================ */ - -.cards-with-icons-container { - display: grid; - grid-template-columns: repeat(1, 1fr); - gap: var(--space-xl); - margin-top: var(--space-xl); -} - -@media (min-width: 640px) { - .cards-with-icons-container { - grid-template-columns: repeat(2, 1fr); - } -} - -@media (min-width: 1024px) { - .cards-with-icons-container { - grid-template-columns: repeat(4, 1fr); - } -} - -.card-with-icon { - display: flex; - flex-direction: column; - gap: var(--space-m); - padding: var(--space-xl); - background-color: var(--color-surface); - border-radius: var(--radius-l); - border: 1px solid var(--color-border); - transition: all 0.2s ease; -} - -.card-with-icon:hover { - border-color: var(--color-accent); - transform: translateY(-2px); -} - -.card-with-icon .icon { - font-size: var(--font-size-xl); - color: var(--color-accent); -} - -.feature-heading { - display: flex; - flex-direction: column; - gap: var(--space-xs); -} - -.feature-heading p { - margin: 0; -} - -/* ============================================ - Grid System - ============================================ */ - -.grid { - display: grid; - gap: 24px; - margin: var(--space-xl) 0; -} - -.grid.columns-2 { - grid-template-columns: repeat(1, 1fr); -} - -.grid.columns-3 { - grid-template-columns: repeat(1, 1fr); -} - -@media (min-width: 768px) { - .grid.columns-2 { - grid-template-columns: repeat(2, 1fr); - gap: 28px; - } - .grid.columns-3 { - grid-template-columns: repeat(2, 1fr); - gap: 24px; - } -} - -@media (min-width: 1024px) { - .grid.columns-3 { - grid-template-columns: repeat(3, 1fr); - } - .grid.columns-2 { - gap: 32px; - } -} - -.full-width { - grid-column: 1 / -1; -} - -/* ============================================ - Agent Cards - ============================================ */ - -.agent-category { - margin-bottom: 64px; - position: relative; -} - -.agent-category::before { - content: ''; - position: absolute; - top: 0; - left: -20px; - width: 3px; - height: 100%; - background: linear-gradient(180deg, var(--color-accent), transparent); - opacity: 0.2; - border-radius: 2px; -} - -.agent-category h3 { - display: flex; - align-items: center; - gap: var(--space-m); - margin-bottom: var(--space-xl); - padding: var(--space-l) var(--space-xl); - background: linear-gradient(135deg, var(--color-accent-lighter), var(--color-surface)); - border-left: 4px solid var(--color-accent); - border-radius: var(--radius-m); - color: var(--color-text-primary); - font-size: 24px; - font-weight: 700; - letter-spacing: -0.02em; - box-shadow: 0 2px 8px rgba(129, 140, 248, 0.08); - position: relative; - overflow: hidden; -} - -.agent-category h3::after { - content: ''; - position: absolute; - right: 0; - top: 0; - bottom: 0; - width: 100px; - background: linear-gradient(90deg, transparent, rgba(129, 140, 248, 0.05)); - pointer-events: none; -} - -.agent-category h3 i { - color: var(--color-accent); - font-size: 28px; - flex-shrink: 0; - filter: drop-shadow(0 2px 4px rgba(129, 140, 248, 0.3)); -} - -.agent-card { - padding: var(--space-xl); - background-color: var(--color-surface); - border-radius: var(--radius-l); - border: 1px solid var(--color-border); - box-shadow: 0 2px 8px rgba(0, 0, 0, 0.04); - transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); - position: relative; - overflow: hidden; -} - -.agent-card::before { - content: ''; - position: absolute; - inset: 0; - background: linear-gradient(135deg, var(--color-accent-lighter), transparent); - opacity: 0; - transition: opacity 0.3s ease; - pointer-events: none; -} - -.agent-card:hover { - border-color: var(--color-accent); - transform: translateY(-2px); - box-shadow: 0 8px 20px rgba(129, 140, 248, 0.15); -} - -.agent-card:hover::before { - opacity: 0.5; -} - -.agent-header { - display: flex; - align-items: center; - justify-content: space-between; - gap: var(--space-m); - margin-bottom: var(--space-m); -} - -.agent-name { - font-family: var(--font-mono); - font-size: 15px; - font-weight: 700; - color: var(--color-text-primary); - letter-spacing: -0.01em; - position: relative; - z-index: 1; -} - -.agent-badge { - padding: 6px 12px; - font-size: 11px; - font-weight: 700; - background: linear-gradient(135deg, var(--color-accent-light), var(--color-accent-lighter)); - color: var(--color-accent); - border-radius: 6px; - text-transform: uppercase; - letter-spacing: 0.08em; - box-shadow: 0 2px 4px rgba(129, 140, 248, 0.2); - border: 1px solid rgba(129, 140, 248, 0.3); - white-space: nowrap; - transition: all 0.2s ease; -} - -.agent-badge.critical { - background: linear-gradient(135deg, rgba(239, 68, 68, 0.2), rgba(239, 68, 68, 0.1)); - color: var(--color-error); - box-shadow: 0 2px 4px rgba(239, 68, 68, 0.25); - border-color: rgba(239, 68, 68, 0.4); -} - -.agent-card:hover .agent-badge { - transform: scale(1.05); - box-shadow: 0 4px 8px rgba(129, 140, 248, 0.3); -} - -.agent-card:hover .agent-badge.critical { - box-shadow: 0 4px 8px rgba(239, 68, 68, 0.35); -} - -.agent-card:hover .agent-name { - color: var(--color-accent); -} - -.agent-description { - font-size: 14px; - color: var(--color-text-secondary); - margin: 0 0 var(--space-m) 0; - line-height: 1.65; - position: relative; - z-index: 1; -} - -.agent-usage { - display: block; - font-family: var(--font-mono); - font-size: 13px; - color: #a6adc8; - background: linear-gradient(135deg, #1e1e2e 0%, #181825 100%); - padding: 12px 16px; - border-radius: var(--radius-m); - border: 1px solid rgba(129, 140, 248, 0.1); - position: relative; - z-index: 1; - transition: all 0.2s ease; -} - -.agent-usage:hover { - border-color: rgba(129, 140, 248, 0.3); - background: linear-gradient(135deg, #242438 0%, #1e1e2e 100%); -} - -/* ============================================ - Command Cards - ============================================ */ - -.command-category { - margin-bottom: var(--space-xxl); -} - -.command-category h3 { - display: flex; - align-items: center; - gap: var(--space-m); - margin-bottom: var(--space-xl); - padding: var(--space-m) var(--space-l); - background: linear-gradient(135deg, rgba(129, 140, 248, 0.08), rgba(129, 140, 248, 0.02)); - border-left: 3px solid var(--color-accent); - border-radius: var(--radius-s); - color: var(--color-text-primary); - font-family: var(--font-mono); - font-size: 18px; - font-weight: 600; - letter-spacing: -0.01em; - position: relative; -} - -.command-category h3::before { - content: '//'; - color: rgba(129, 140, 248, 0.4); - font-weight: 400; - margin-right: var(--space-xs); -} - -.command-category h3 i { - color: var(--color-accent); - filter: drop-shadow(0 0 8px rgba(129, 140, 248, 0.4)); -} - -.command-card { - padding: var(--space-xl); - background: linear-gradient(135deg, rgba(30, 30, 46, 0.6), rgba(24, 24, 37, 0.5)); - border-radius: var(--radius-l); - border: 1.5px solid rgba(129, 140, 248, 0.2); - transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); - position: relative; - overflow: hidden; - box-shadow: - inset 0 1px 0 rgba(255, 255, 255, 0.05), - inset 0 -1px 0 rgba(0, 0, 0, 0.2), - 0 2px 8px rgba(0, 0, 0, 0.3); -} - -.command-card::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - height: 2px; - background: linear-gradient(90deg, transparent, var(--color-accent), transparent); - opacity: 0.4; -} - -.command-card:hover { - border-color: var(--color-accent); - transform: translateY(-2px); - box-shadow: 0 8px 20px rgba(129, 140, 248, 0.2); -} - -.command-card:hover .command-name { - color: rgba(129, 140, 248, 1); - text-shadow: 0 0 24px rgba(129, 140, 248, 0.5); -} - -.command-card:hover .command-type-badge { - background: linear-gradient(135deg, var(--color-accent), rgba(129, 140, 248, 0.6)); - color: var(--color-on-accent); - border-color: var(--color-accent); - box-shadow: 0 2px 8px rgba(129, 140, 248, 0.4); -} - -.command-card:hover::after { - opacity: 0.15; -} - -.command-header { - margin-bottom: var(--space-m); - display: flex; - align-items: flex-start; - justify-content: space-between; - gap: var(--space-m); -} - -.command-name { - font-family: var(--font-mono); - font-size: 15px; - font-weight: 600; - color: var(--color-accent); - background: none; - padding: 0; - letter-spacing: -0.01em; - text-shadow: 0 0 20px rgba(129, 140, 248, 0.3); - display: flex; - align-items: center; - gap: 8px; - flex: 1; -} - -.command-name::before { - content: '$'; - color: rgba(129, 140, 248, 0.5); - font-weight: 400; - font-size: 14px; -} - -.command-type-badge { - padding: 3px 8px; - font-family: var(--font-mono); - font-size: 10px; - font-weight: 700; - text-transform: uppercase; - letter-spacing: 0.05em; - border-radius: 3px; - background: linear-gradient(135deg, rgba(129, 140, 248, 0.15), rgba(129, 140, 248, 0.08)); - color: rgba(129, 140, 248, 0.8); - border: 1px solid rgba(129, 140, 248, 0.25); - white-space: nowrap; - flex-shrink: 0; -} - -.command-description { - font-size: 14px; - color: rgba(203, 213, 225, 0.85); - margin: 0; - line-height: 1.7; - letter-spacing: 0.01em; -} - -/* ============================================ - Skill Cards - ============================================ */ - -.skill-category { - margin-bottom: 72px; - position: relative; - padding: 24px; - background: linear-gradient(135deg, - rgba(129, 140, 248, 0.02) 0%, - transparent 50%); - border-radius: var(--radius-l); - border: 1px solid transparent; - transition: all 0.3s ease; -} - -.skill-category::before { - content: ''; - position: absolute; - top: 0; - left: 0; - width: 4px; - height: 100%; - background: linear-gradient(180deg, var(--color-accent), transparent); - opacity: 0.3; - border-radius: 2px; - transition: opacity 0.3s ease; -} - -.skill-category:hover::before { - opacity: 0.6; -} - -.skill-category h3 { - display: flex; - align-items: center; - gap: var(--space-m); - margin-bottom: var(--space-xl); - padding: var(--space-l) var(--space-xl); - background: linear-gradient(135deg, - rgba(129, 140, 248, 0.1) 0%, - rgba(129, 140, 248, 0.05) 50%, - var(--color-surface) 100%); - border-left: 4px solid var(--color-accent); - border-radius: var(--radius-m); - color: var(--color-text-primary); - font-size: 22px; - font-weight: 700; - letter-spacing: -0.02em; - box-shadow: - 0 2px 8px rgba(129, 140, 248, 0.12), - inset 0 1px 0 rgba(255, 255, 255, 0.05); - transition: all 0.3s ease; - cursor: default; -} - -.skill-category h3:hover { - background: linear-gradient(135deg, - rgba(129, 140, 248, 0.15) 0%, - rgba(129, 140, 248, 0.08) 50%, - var(--color-surface) 100%); - box-shadow: - 0 4px 12px rgba(129, 140, 248, 0.18), - inset 0 1px 0 rgba(255, 255, 255, 0.08); -} - -.skill-category h3 i { - color: var(--color-accent); - font-size: 24px; -} - -.skill-card { - padding: 28px; - background: linear-gradient(135deg, var(--color-surface), var(--color-background)); - border-radius: var(--radius-l); - border: 1.5px solid var(--color-border); - transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); - position: relative; - overflow: hidden; - box-shadow: 0 2px 8px rgba(0, 0, 0, 0.04); -} - -.skill-card::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - height: 3px; - background: linear-gradient(90deg, var(--color-accent), transparent); - opacity: 0; - transition: opacity 0.3s ease; -} - -.skill-card::after { - content: ''; - position: absolute; - top: 0; - right: 0; - width: 40px; - height: 40px; - background: linear-gradient(135deg, transparent 50%, var(--color-accent) 50%); - opacity: 0.1; - pointer-events: none; - transition: opacity 0.3s ease; -} - -.skill-card:hover { - border-color: var(--color-accent); - transform: translateY(-2px); - box-shadow: 0 8px 20px rgba(129, 140, 248, 0.15); -} - -.skill-card:hover::before { - opacity: 1; -} - -.skill-card:hover::after { - opacity: 0.3; -} - -.skill-card.featured { - background: linear-gradient(135deg, - rgba(129, 140, 248, 0.12) 0%, - rgba(129, 140, 248, 0.06) 50%, - var(--color-surface) 100%); - border-color: var(--color-accent); - box-shadow: - 0 4px 16px rgba(129, 140, 248, 0.25), - 0 0 60px rgba(129, 140, 248, 0.08), - inset 0 1px 0 rgba(255, 255, 255, 0.1); - position: relative; - overflow: hidden; -} - -.skill-card.featured::before { - background: linear-gradient(90deg, - var(--color-accent) 0%, - rgba(129, 140, 248, 0.6) 100%); - opacity: 1; - height: 4px; -} - -.skill-card.featured:hover { - box-shadow: 0 10px 28px rgba(129, 140, 248, 0.2); - transform: translateY(-2px); -} - -.skill-header { - display: flex; - align-items: center; - justify-content: space-between; - gap: var(--space-m); - margin-bottom: var(--space-l); - padding-bottom: var(--space-m); - border-bottom: 1px solid rgba(129, 140, 248, 0.1); - position: relative; -} - -.skill-header::after { - content: ''; - position: absolute; - bottom: -1px; - left: 0; - width: 60px; - height: 2px; - background: linear-gradient(90deg, var(--color-accent), transparent); - opacity: 0; - transition: all 0.3s ease; -} - -.skill-card:hover .skill-header::after { - opacity: 1; - width: 120px; -} - -.skill-name { - font-family: var(--font-mono); - font-size: 16px; - font-weight: 700; - color: var(--color-text-primary); - letter-spacing: -0.01em; - display: flex; - align-items: center; - gap: 8px; - position: relative; - z-index: 1; -} - -.skill-name::before { - content: '◆'; - color: var(--color-accent); - font-size: 10px; - opacity: 0.6; - transition: all 0.3s ease; -} - -.skill-card:hover .skill-name { - color: var(--color-accent); -} - -.skill-card:hover .skill-name::before { - opacity: 1; - transform: scale(1.2); -} - -.skill-badge { - padding: 6px 12px; - font-size: 11px; - font-weight: 600; - background: var(--color-accent-light); - color: var(--color-accent); - border-radius: 6px; - text-transform: uppercase; - letter-spacing: 0.05em; - white-space: nowrap; - transition: all 0.2s ease; -} - -.skill-badge.highlight { - background: linear-gradient(135deg, var(--color-accent), var(--color-accent-hover)); - color: var(--color-on-accent); - box-shadow: 0 2px 8px rgba(129, 140, 248, 0.4); - border-color: var(--color-accent); -} - -.skill-card:hover .skill-badge { - background: var(--color-accent); - color: var(--color-on-accent); -} - -.skill-description { - font-size: 14px; - color: var(--color-text-secondary); - margin: 0 0 var(--space-l) 0; - line-height: 1.65; - position: relative; - z-index: 1; - letter-spacing: 0.01em; -} - -.skill-features { - display: flex; - flex-wrap: wrap; - gap: 10px; - margin-bottom: var(--space-l); - position: relative; - z-index: 1; -} - -.feature-item { - display: flex; - align-items: center; - gap: 6px; - font-size: 12px; - font-weight: 600; - color: var(--color-success); - background: linear-gradient(135deg, rgba(16, 185, 129, 0.15), rgba(16, 185, 129, 0.08)); - padding: 8px 14px; - border-radius: 20px; - border: 1px solid rgba(16, 185, 129, 0.3); - transition: all 0.2s ease; - letter-spacing: 0.01em; -} - -.feature-item i { - font-size: 11px; - filter: drop-shadow(0 1px 2px rgba(16, 185, 129, 0.3)); -} - -.feature-item:hover { - background: linear-gradient(135deg, rgba(16, 185, 129, 0.25), rgba(16, 185, 129, 0.15)); - border-color: rgba(16, 185, 129, 0.5); - transform: translateY(-1px); - box-shadow: 0 2px 6px rgba(16, 185, 129, 0.2); -} - -.skill-usage { - display: block; - font-family: var(--font-mono); - font-size: 13px; - color: #a6adc8; - background: linear-gradient(135deg, #1e1e2e 0%, #181825 100%); - padding: 14px 18px; - border-radius: var(--radius-m); - margin-bottom: var(--space-s); - border: 1px solid rgba(129, 140, 248, 0.15); - position: relative; - z-index: 1; - transition: all 0.2s ease; - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.05); -} - -.skill-usage:hover { - border-color: rgba(129, 140, 248, 0.3); - background: linear-gradient(135deg, #242438 0%, #1e1e2e 100%); - transform: translateX(2px); -} - -.skill-note { - font-size: 12px; - color: var(--color-text-tertiary); - margin: 0; - font-style: italic; - position: relative; - z-index: 1; - padding: 8px 12px; - background: rgba(255, 165, 0, 0.08); - border-left: 3px solid rgba(255, 165, 0, 0.4); - border-radius: 4px; -} - -.skill-note::before { - content: '⚠'; - margin-right: 6px; - color: rgba(255, 165, 0, 0.8); -} - -/* ============================================ - MCP Cards - ============================================ */ - -.mcp-card { - padding: 36px; - background: linear-gradient(135deg, var(--color-surface), var(--color-background)); - border-radius: var(--radius-l); - border: 1.5px solid transparent; - background-image: - linear-gradient(135deg, var(--color-surface), var(--color-background)), - linear-gradient(135deg, var(--color-accent-light), var(--color-border)); - background-origin: border-box; - background-clip: padding-box, border-box; - box-shadow: - 0 1px 3px rgba(0, 0, 0, 0.12), - 0 4px 8px rgba(0, 0, 0, 0.08), - 0 8px 24px rgba(0, 0, 0, 0.06), - inset 0 1px 0 rgba(255, 255, 255, 0.05); - transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); - position: relative; - overflow: hidden; -} - -.mcp-card::after { - content: '● ACTIVE'; - position: absolute; - top: 16px; - right: 16px; - font-size: 9px; - font-weight: 600; - letter-spacing: 0.08em; - padding: 5px 10px; - background: rgba(16, 185, 129, 0.15); - color: var(--color-success); - border: 1px solid rgba(16, 185, 129, 0.3); - border-radius: 12px; - z-index: 2; -} - -.mcp-card:hover { - transform: translateY(-2px); - border-color: var(--color-accent); - box-shadow: 0 8px 24px rgba(129, 140, 248, 0.2); -} - -.mcp-card:hover::after { - background: var(--color-success); - color: white; - border-color: var(--color-success); -} - -.mcp-header { - display: flex; - align-items: center; - gap: 20px; - margin-bottom: 24px; - padding-bottom: 20px; - border-bottom: 1px solid rgba(129, 140, 248, 0.1); - position: relative; -} - -.mcp-header::after { - content: ''; - position: absolute; - bottom: -1px; - left: 0; - width: 80px; - height: 2px; - background: linear-gradient(90deg, var(--color-accent), transparent); - opacity: 0; - transition: all 0.3s ease; -} - -.mcp-card:hover .mcp-header::after { - opacity: 1; - width: 150px; -} - -.mcp-icon { - width: 64px; - height: 64px; - display: flex; - align-items: center; - justify-content: center; - font-size: 32px; - background: linear-gradient(135deg, var(--color-accent-light), var(--color-accent-lighter)); - color: var(--color-accent); - border-radius: 14px; - flex-shrink: 0; - box-shadow: 0 4px 12px rgba(129, 140, 248, 0.15); - transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); - position: relative; - z-index: 1; -} - -.mcp-card:hover .mcp-icon { - transform: scale(1.1) translateY(-2px); - background: linear-gradient(135deg, var(--color-accent), var(--color-accent-hover)); - color: var(--color-on-accent); - box-shadow: 0 8px 20px rgba(129, 140, 248, 0.3); -} - -.mcp-name { - font-size: 24px; - font-weight: 700; - color: var(--color-text-primary); - letter-spacing: -0.02em; - position: relative; - z-index: 1; - transition: all 0.3s ease; -} - -.mcp-card:hover .mcp-name { - transform: translateX(4px); - background: linear-gradient(135deg, var(--color-text-primary), var(--color-accent)); - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - background-clip: text; -} - -.mcp-description { - font-size: 15px; - color: var(--color-text-secondary); - margin-bottom: 28px; - line-height: 1.7; - position: relative; - z-index: 1; - letter-spacing: 0.01em; -} - -.mcp-description::before { - content: ''; - position: absolute; - bottom: -12px; - left: 0; - right: 0; - height: 1px; - background: linear-gradient(90deg, - transparent, - rgba(129, 140, 248, 0.1) 20%, - rgba(129, 140, 248, 0.3) 40%, - rgba(129, 140, 248, 0.3) 60%, - rgba(129, 140, 248, 0.1) 80%, - transparent - ); - transition: all 0.4s ease; -} - -.mcp-card:hover .mcp-description::before { - box-shadow: 0 0 12px rgba(129, 140, 248, 0.6); - height: 2px; -} - -.mcp-tools h4 { - font-size: 13px; - font-weight: 700; - color: var(--color-text-primary); - text-transform: uppercase; - letter-spacing: 0.08em; - margin-bottom: 16px; - display: flex; - align-items: center; - gap: 8px; - position: relative; - z-index: 1; -} - -.mcp-tools h4::before { - content: '▸'; - color: var(--color-accent); - font-size: 14px; -} - -.tools-count { - font-size: 10px; - font-weight: 700; - text-transform: uppercase; - letter-spacing: 0.05em; - padding: 4px 10px; - background: rgba(129, 140, 248, 0.15); - color: var(--color-accent); - border-radius: 12px; - margin-left: 8px; - border: 1px solid rgba(129, 140, 248, 0.25); - display: inline-flex; - align-items: center; - gap: 4px; - box-shadow: 0 1px 3px rgba(129, 140, 248, 0.1); -} - -.mcp-card-browser .tools-count { - background: var(--server-color-lighter); - color: var(--server-color); - border-color: var(--server-color-light); -} - -.mcp-card-docs .tools-count { - background: var(--server-color-lighter); - color: var(--server-color); - border-color: var(--server-color-light); -} - -.mcp-tools ul { - margin: 0; - padding: 0; - list-style: none; - display: flex; - flex-direction: column; - gap: 8px; - position: relative; - z-index: 1; -} - -.mcp-tools li { - font-size: 14px; - color: var(--color-text-secondary); - display: flex; - align-items: center; - gap: 10px; - padding: 10px 14px; - background: rgba(129, 140, 248, 0.04); - border-left: 2px solid rgba(129, 140, 248, 0.2); - border-radius: 6px; - transition: all 0.2s ease; - line-height: 1.5; -} - -.mcp-tools li::before { - content: '◆'; - color: var(--color-accent); - font-size: 8px; - flex-shrink: 0; - opacity: 0.6; -} - -.mcp-tools li:hover { - background: rgba(129, 140, 248, 0.08); - border-left-color: var(--color-accent); - transform: translateX(4px); -} - -.mcp-tools li:hover::before { - opacity: 1; -} - -.mcp-tools code { - color: var(--color-accent); - background: rgba(129, 140, 248, 0.1); - padding: 3px 8px; - border-radius: 4px; - font-weight: 600; - font-size: 13px; - font-family: var(--font-mono); - transition: all 0.2s ease; -} - -.mcp-tools li:hover code { - background: rgba(129, 140, 248, 0.2); -} - -.mcp-note { - font-size: 12px; - color: var(--color-text-secondary); - margin-top: 16px; - padding: 12px 16px; - background: linear-gradient(135deg, rgba(129, 140, 248, 0.08), rgba(129, 140, 248, 0.04)); - border-left: 3px solid var(--color-accent); - border-radius: 8px; - line-height: 1.6; - position: relative; - z-index: 1; - font-weight: 500; - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.05); -} - -.mcp-note::before { - content: '→'; - color: var(--color-accent); - margin-right: 8px; - font-weight: 700; - font-size: 14px; -} - -/* Browser Server Theme (Teal/Cyan) */ -.mcp-card-browser { - --server-color: #14b8a6; - --server-color-light: rgba(20, 184, 166, 0.15); - --server-color-lighter: rgba(20, 184, 166, 0.08); - --server-color-hover: #0d9488; -} - -.mcp-card-browser .mcp-icon { - background: linear-gradient(135deg, var(--server-color-light), var(--server-color-lighter)); - color: var(--server-color); -} - -.mcp-card-browser:hover .mcp-icon { - background: linear-gradient(135deg, var(--server-color), var(--server-color-hover)); - color: white; - box-shadow: 0 8px 20px rgba(20, 184, 166, 0.4); -} - -.mcp-card-browser .mcp-header::after { - background: linear-gradient(90deg, var(--server-color), transparent); -} - -.mcp-card-browser .mcp-tools li::before { - color: var(--server-color); -} - -.mcp-card-browser .mcp-tools li:hover { - border-left-color: var(--server-color); -} - -.mcp-card-browser .mcp-tools code { - color: var(--server-color); - background: var(--server-color-lighter); -} - -.mcp-card-browser .mcp-tools h4::before { - color: var(--server-color); -} - -/* Docs Server Theme (Amber/Orange) */ -.mcp-card-docs { - --server-color: #f59e0b; - --server-color-light: rgba(245, 158, 11, 0.15); - --server-color-lighter: rgba(245, 158, 11, 0.08); - --server-color-hover: #d97706; -} - -.mcp-card-docs .mcp-icon { - background: linear-gradient(135deg, var(--server-color-light), var(--server-color-lighter)); - color: var(--server-color); -} - -.mcp-card-docs:hover .mcp-icon { - background: linear-gradient(135deg, var(--server-color), var(--server-color-hover)); - color: white; - box-shadow: 0 8px 20px rgba(245, 158, 11, 0.4); -} - -.mcp-card-docs .mcp-header::after { - background: linear-gradient(90deg, var(--server-color), transparent); -} - -.mcp-card-docs .mcp-tools li::before { - color: var(--server-color); -} - -.mcp-card-docs .mcp-tools li:hover { - border-left-color: var(--server-color); -} - -.mcp-card-docs .mcp-tools code { - color: var(--server-color); - background: var(--server-color-lighter); -} - -.mcp-card-docs .mcp-tools h4::before { - color: var(--server-color); -} - -.mcp-card-docs .mcp-note { - background: linear-gradient(135deg, var(--server-color-light), var(--server-color-lighter)); - border-left-color: var(--server-color); -} - -.mcp-card-docs .mcp-note::before { - color: var(--server-color); -} - -/* ============================================ - Installation Section - ============================================ */ - -.install-section { - background: linear-gradient(135deg, var(--color-surface), var(--color-background)); - border-radius: var(--radius-xl); - margin: var(--space-xxl) 0; - padding: var(--space-xxl); - border: 1.5px solid var(--color-border); - position: relative; - overflow: hidden; -} - -.install-section::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - height: 3px; - background: linear-gradient(90deg, var(--color-accent), var(--color-promo-start), var(--color-accent)); - opacity: 0.6; -} - -.install-steps { - max-width: 750px; - margin: 0 auto; - position: relative; -} - -.install-steps::before { - content: ''; - position: absolute; - left: 26px; - top: 52px; - bottom: 52px; - width: 2px; - background: linear-gradient(180deg, - var(--color-accent) 0%, - var(--color-accent) 33%, - var(--color-accent) 66%, - var(--color-success) 100% - ); - opacity: 0.3; - z-index: 0; -} - -.install-step { - display: flex; - gap: var(--space-xl); - margin-bottom: var(--space-xxl); - padding: var(--space-xl); - background: var(--color-background); - border-radius: var(--radius-l); - border: 1.5px solid var(--color-border); - transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); - position: relative; -} - -.install-step::before { - content: ''; - position: absolute; - inset: 0; - background: linear-gradient(135deg, var(--color-accent-lighter), transparent); - opacity: 0; - transition: opacity 0.3s ease; - border-radius: var(--radius-l); - pointer-events: none; -} - -.install-step:hover { - border-color: var(--color-accent); - transform: translateX(4px); - box-shadow: 0 8px 24px rgba(129, 140, 248, 0.15); -} - -.install-step:hover::before { - opacity: 0.4; -} - -.install-step:last-child { - margin-bottom: 0; - background: linear-gradient(135deg, rgba(16, 185, 129, 0.08), var(--color-background)); - border-color: rgba(16, 185, 129, 0.3); -} - -.install-step:last-child .step-number { - background: linear-gradient(135deg, var(--color-success), #0d9488); - box-shadow: 0 4px 16px rgba(16, 185, 129, 0.4); -} - -.step-number { - flex-shrink: 0; - width: 52px; - height: 52px; - display: flex; - align-items: center; - justify-content: center; - background: linear-gradient(135deg, var(--color-accent), var(--color-accent-hover)); - color: var(--color-on-accent); - font-weight: 700; - font-size: 24px; - border-radius: 50%; - box-shadow: 0 4px 12px rgba(129, 140, 248, 0.3); - transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); - position: relative; - z-index: 1; -} - -.install-step:hover .step-number { - transform: scale(1.1) rotate(5deg); - box-shadow: 0 6px 20px rgba(129, 140, 248, 0.5); -} - -.step-content { - flex: 1; - position: relative; - z-index: 1; -} - -.step-content h3 { - margin-top: 8px; - margin-bottom: var(--space-m); - font-size: 22px; - color: var(--color-text-primary); -} - -/* ============================================ - Code Blocks - ============================================ */ - -.card-code-block { - background: linear-gradient(135deg, #1a1a2e 0%, #16213e 100%); - border-radius: var(--radius-m); - padding: 18px 20px; - overflow-x: auto; - border: 1px solid rgba(129, 140, 248, 0.2); - box-shadow: - inset 0 1px 0 rgba(255, 255, 255, 0.05), - 0 4px 12px rgba(0, 0, 0, 0.4); - position: relative; -} - -.card-code-block::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background: repeating-linear-gradient( - 0deg, - rgba(255, 255, 255, 0.03) 0px, - rgba(255, 255, 255, 0.03) 1px, - transparent 1px, - transparent 2px - ); - pointer-events: none; - opacity: 0.3; -} - -.card-code-block pre { - margin: 0; - position: relative; - z-index: 1; -} - -.card-code-block code { - font-family: var(--font-mono); - font-size: 14px; - color: #cdd6f4; - background: none; - padding: 0; - line-height: 1.8; - white-space: pre; - text-shadow: 0 0 8px rgba(205, 214, 244, 0.3); -} - -/* ============================================ - Accordion / FAQ - ============================================ */ -.accordion-container { - display: flex; - flex-direction: column; - gap: var(--space-m); - max-width: 900px; - margin: 0 auto; -} - -.accordion-item { - background: var(--color-surface); - border: 1.5px solid var(--color-border); - border-radius: var(--radius-l); - transition: all 0.35s cubic-bezier(0.4, 0, 0.2, 1); - overflow: hidden; - position: relative; - box-shadow: 0 2px 8px rgba(0, 0, 0, 0.04); -} - -.accordion-item::before { - content: ''; - position: absolute; - left: 0; - top: 0; - bottom: 0; - width: 4px; - background: linear-gradient(180deg, var(--color-accent), var(--color-accent-hover)); - opacity: 0; - transition: opacity 0.35s ease; - z-index: 2; -} - -.accordion-item[open]::before { - opacity: 1; -} - -.accordion-item:hover { - border-color: var(--color-accent); - box-shadow: 0 8px 24px rgba(129, 140, 248, 0.2); - transform: translateY(-2px); -} - -.accordion-item[open] { - background: linear-gradient(135deg, rgba(129, 140, 248, 0.08), var(--color-surface)); - border-color: var(--color-accent); - box-shadow: 0 12px 32px rgba(129, 140, 248, 0.25); -} - -.accordion-toggle { - display: flex; - align-items: center; -/* ============================================ - Promo CTA Section - Final Polish - ============================================ */ - -.promo-cta { - background: linear-gradient(135deg, var(--color-promo-start), var(--color-promo-end)); - border-radius: var(--radius-xl); - padding: 96px var(--space-xxl); - margin-bottom: var(--space-xxl); - position: relative; - overflow: hidden; - box-shadow: - 0 20px 60px rgba(129, 140, 248, 0.5), - 0 8px 24px rgba(0, 0, 0, 0.2); -} - -.promo-cta::before { - content: ''; - position: absolute; - inset: 0; - background: - repeating-linear-gradient( - 45deg, - transparent, - transparent 20px, - rgba(255, 255, 255, 0.04) 20px, - rgba(255, 255, 255, 0.04) 40px - ); - pointer-events: none; -} - -.promo-cta::after { - content: ''; - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - width: 600px; - height: 600px; - background: radial-gradient(circle, rgba(255, 255, 255, 0.15) 0%, transparent 70%); - pointer-events: none; - animation: glow-pulse 6s ease-in-out infinite; -} - -@keyframes glow-pulse { - 0%, 100% { - opacity: 0.4; - transform: translate(-50%, -50%) scale(0.95); - } - 50% { - opacity: 0.7; - transform: translate(-50%, -50%) scale(1.05); - } -} - -.cta-badge { - display: inline-flex; - align-items: center; - gap: 8px; - padding: 10px 22px; - background: rgba(255, 255, 255, 0.25); - backdrop-filter: blur(12px); - border: 1.5px solid rgba(255, 255, 255, 0.4); - border-radius: 50px; - color: white; - font-size: 13px; - font-weight: 700; - text-transform: uppercase; - letter-spacing: 0.1em; - margin-bottom: 28px; - position: relative; - z-index: 1; - box-shadow: - 0 4px 16px rgba(0, 0, 0, 0.2), - inset 0 1px 0 rgba(255, 255, 255, 0.3); -} - -.cta-badge i { - font-size: 16px; - animation: bolt-flash 2.5s ease-in-out infinite; - filter: drop-shadow(0 0 4px rgba(255, 255, 255, 0.8)); -} - -@keyframes bolt-flash { - 0%, 100% { opacity: 1; } - 50% { opacity: 0.7; transform: scale(1.15); } -} - -.promo-cta h2 { - color: white; - font-size: 52px; - font-weight: 850; - line-height: 1.1; - margin-bottom: 28px; - position: relative; - z-index: 1; - text-shadow: - 0 2px 4px rgba(0, 0, 0, 0.2), - 0 4px 20px rgba(0, 0, 0, 0.15); - letter-spacing: -0.03em; -} - -.cta-subheading { - color: rgba(255, 255, 255, 0.98); - font-size: 20px; - line-height: 1.65; - max-width: 720px; - margin-left: auto; - margin-right: auto; - position: relative; - z-index: 1; -} - -.cta-subheading strong { - color: white; - font-weight: 700; - text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); -} - -.promo-cta .button { - position: relative; - z-index: 1; - transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); -} - -.promo-cta .button.primary { - background: white; - color: var(--color-promo-end); - font-size: 19px; - font-weight: 700; - padding: 20px 42px; - box-shadow: - 0 8px 28px rgba(0, 0, 0, 0.3), - 0 4px 12px rgba(0, 0, 0, 0.2), - inset 0 1px 0 rgba(255, 255, 255, 0.4); -} - -.promo-cta .button.primary:hover { - background: rgba(255, 255, 255, 0.98); - transform: translateY(-2px); - box-shadow: 0 10px 28px rgba(0, 0, 0, 0.3); -} - -.promo-cta .button.primary i { - font-size: 20px; - margin-right: 4px; -} - -.button-arrow { - display: inline-block; - margin-left: 8px; - transition: transform 0.3s ease; - font-weight: 700; - font-size: 20px; -} - -.promo-cta .button.primary:hover .button-arrow { - transform: translateX(4px); -} - -.promo-cta .button.tertiary { - border: 2px solid rgba(255, 255, 255, 0.5); - color: white; - font-size: 18px; - padding: 18px 36px; - background: rgba(255, 255, 255, 0.08); - backdrop-filter: blur(8px); -} - -.promo-cta .button.tertiary:hover { - background: rgba(255, 255, 255, 0.2); - border-color: rgba(255, 255, 255, 0.8); - transform: translateY(-2px); -} - -.cta-trust { - margin-top: 32px; - margin-bottom: 0; - font-size: 15px; - color: rgba(255, 255, 255, 0.85); - font-weight: 500; - letter-spacing: 0.01em; - position: relative; - z-index: 1; -} - -.cta-trust::before { - content: '✓'; - display: inline-block; - margin-right: 8px; - color: rgba(255, 255, 255, 0.9); - font-weight: 700; - font-size: 18px; -} - -@media (max-width: 767px) { - .promo-cta { - padding: 64px var(--space-xl); - } - - .promo-cta h2 { - font-size: 38px; - } - - .cta-subheading { - font-size: 18px; - } - - .promo-cta .button.primary { - font-size: 17px; - padding: 18px 32px; - } - - .button-group { - flex-direction: column; - width: 100%; - } - - .button-group .button { - width: 100%; - } -} -/* Add subtle numbers */ -.accordion-item:nth-child(1) .accordion-toggle::after { content: '01'; } -.accordion-item:nth-child(2) .accordion-toggle::after { content: '02'; } -.accordion-item:nth-child(3) .accordion-toggle::after { content: '03'; } -.accordion-item:nth-child(4) .accordion-toggle::after { content: '04'; } -.accordion-item:nth-child(5) .accordion-toggle::after { content: '05'; } - -.accordion-toggle::after { - position: absolute; - right: 70px; - font-size: 10px; - font-weight: 700; - color: var(--color-text-tertiary); - opacity: 0.4; - font-family: var(--font-mono); - letter-spacing: 0.05em; - transition: all 0.3s ease; -} - -.accordion-item:hover .accordion-toggle::after { - opacity: 0.7; - color: var(--color-accent); -} - -.accordion-item[open] .accordion-toggle::after { - opacity: 0; -} - -/* ============================================ - Promo CTA Section - ============================================ */ - -.promo-cta { - background: linear-gradient(135deg, var(--color-promo-start), var(--color-promo-end)); - border-radius: var(--radius-xl); - padding: 80px var(--space-xxl); - margin-bottom: var(--space-xxl); - position: relative; - overflow: hidden; - box-shadow: 0 20px 60px rgba(129, 140, 248, 0.4); -} - -.promo-cta::before { - content: ''; - position: absolute; - inset: 0; - background: - repeating-linear-gradient( - 45deg, - transparent, - transparent 20px, - rgba(255, 255, 255, 0.03) 20px, - rgba(255, 255, 255, 0.03) 40px - ); - pointer-events: none; -} - -.promo-cta::after { - content: ''; - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - width: 500px; - height: 500px; - background: radial-gradient(circle, rgba(255, 255, 255, 0.1) 0%, transparent 70%); - pointer-events: none; - animation: glow-pulse 4s ease-in-out infinite; -} - -@keyframes glow-pulse { - 0%, 100% { opacity: 0.5; transform: translate(-50%, -50%) scale(1); } - 50% { opacity: 0.8; transform: translate(-50%, -50%) scale(1.1); } -} - -.cta-badge { - display: inline-flex; - align-items: center; - gap: 8px; - padding: 10px 20px; - background: rgba(255, 255, 255, 0.2); - backdrop-filter: blur(10px); - border: 1.5px solid rgba(255, 255, 255, 0.3); - border-radius: 50px; - color: white; - font-size: 14px; - font-weight: 700; - text-transform: uppercase; - letter-spacing: 0.08em; - margin-bottom: 24px; - position: relative; - z-index: 1; - box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15); -} - -.cta-badge i { - font-size: 16px; -} - -.promo-cta h2 { - color: white; - font-size: 48px; - font-weight: 800; - line-height: 1.15; - margin-bottom: 24px; - position: relative; - z-index: 1; - text-shadow: 0 2px 20px rgba(0, 0, 0, 0.2); -} - -.promo-cta p { - color: rgba(255, 255, 255, 0.95); - font-size: 21px; - line-height: 1.6; - max-width: 700px; - margin-left: auto; - margin-right: auto; - position: relative; - z-index: 1; -} - -.promo-cta .button { - position: relative; - z-index: 1; -} - -.promo-cta .button.primary { - background-color: white; - color: var(--color-promo-end); - font-size: 18px; - padding: 18px 36px; - box-shadow: 0 8px 24px rgba(0, 0, 0, 0.25); -} - -.promo-cta .button.primary:hover { - background-color: rgba(255, 255, 255, 0.95); - transform: translateY(-2px); - box-shadow: 0 8px 24px rgba(0, 0, 0, 0.3); -} - -.promo-cta .button.primary i { - font-size: 20px; -} - -.promo-cta .button.tertiary { - border-color: rgba(255, 255, 255, 0.4); - color: white; - font-size: 18px; - padding: 18px 36px; -} - -.promo-cta .button.tertiary:hover { - background-color: rgba(255, 255, 255, 0.15); - border-color: rgba(255, 255, 255, 0.6); -} - -.accordion-content code { - background: rgba(129, 140, 248, 0.15); - color: var(--color-accent); - padding: 3px 8px; - border-radius: 4px; - font-weight: 600; -} - -.accordion-content strong { - color: var(--color-text-primary); - font-weight: 600; -} - -/* ============================================ - Promo CTA Section - ============================================ */ - -.promo-cta { - background: linear-gradient(135deg, var(--color-promo-start), var(--color-promo-end)); - border-radius: var(--radius-xl); - padding: var(--space-xxl); - margin-bottom: var(--space-xxl); -} - -.promo-cta h2 { - color: white; -} - -.promo-cta p { - color: rgba(255, 255, 255, 0.9); -} - -.promo-cta .button.primary { - background-color: white; - color: var(--color-promo-end); -} - -.promo-cta .button.primary:hover { - background-color: rgba(255, 255, 255, 0.9); -} - -.promo-cta .button.tertiary { - border-color: rgba(255, 255, 255, 0.3); - color: white; -} - -.promo-cta .button.tertiary:hover { - background-color: rgba(255, 255, 255, 0.1); -} - -/* ============================================ - Footer - ============================================ */ - -.footer { - border-top: 1px solid var(--color-border); - margin-top: auto; - padding: var(--space-xxl) 0 var(--space-xl); -} - -.footer-menu { - display: grid; - grid-template-columns: 1fr; - gap: var(--space-xl); - margin-bottom: var(--space-xl); -} - -@media (min-width: 768px) { - .footer-menu { - grid-template-columns: 2fr 1fr 1fr; - } -} - -.footer p { - margin: 0; - color: var(--color-text-secondary); -} - -.link-list { - display: flex; - flex-direction: column; - gap: var(--space-m); -} - -.link-list a { - color: var(--color-text-secondary); - font-size: var(--font-size-s); - text-decoration: none; -} - -.link-list a:hover { - color: var(--color-accent); -} - -.icon-link { - display: flex; - align-items: center; - gap: var(--space-s); -} - -.icon-link .icon { - color: var(--color-accent); -} - -.pseudo-link { - text-decoration: underline; - text-decoration-color: var(--color-border); - text-underline-offset: 2px; -} - -.link-list-horizontal { - display: flex; - flex-wrap: wrap; - gap: var(--space-l); -} - -.link-list-horizontal a { - color: var(--color-text-tertiary); - font-size: var(--font-size-s); -} - -.link-list-horizontal a:hover { - color: var(--color-text-secondary); -} - -/* ============================================ - Utility Classes - ============================================ */ - -.hide-on-mobile { - display: none; -} - -@media (min-width: 768px) { - .hide-on-mobile { - display: flex; - } -} - -.mobile-only { - display: flex; -} - -@media (min-width: 1024px) { - .mobile-only { - display: none; - } -} - -.margin-top-l { - margin-top: var(--space-l); -} - -.ui.s { - font-size: var(--font-size-s); - line-height: var(--line-height-ui-s); -} - -.icon { - display: inline-flex !important; - align-items: center; -} - -.icon.m { font-size: var(--font-size-m); } -.icon.l { font-size: var(--font-size-l); } -.icon.xl { font-size: var(--font-size-xl); } - -/* ============================================ - Responsive Adjustments - ============================================ */ - -@media (max-width: 767px) { - :root { - --font-size-xxl: 36px; - --font-size-xl: 28px; - --line-height-h1: 44px; - --line-height-h2: 36px; - --space-section: 48px; - } - - .page-container { - padding: 0 var(--space-l); - } - - .hero-section .heading.hero { - padding: var(--space-xl) 0; - } - - .install-section { - padding: var(--space-xl); - } - - .install-step { - flex-direction: column; - gap: var(--space-m); - } - - .promo-cta { - padding: var(--space-xl); - } -} - -/* ============================================ - Philosophy Section (Enhanced) - ============================================ */ - -.philosophy-section { - padding: var(--space-section) 0; - position: relative; -} - -.philosophy-section::before { - content: ''; - position: absolute; - top: 0; - left: 50%; - transform: translateX(-50%); - width: 400px; - height: 400px; - background: radial-gradient(circle, var(--color-accent-lighter) 0%, transparent 70%); - opacity: 0.08; - pointer-events: none; - z-index: 0; -} - -.philosophy-section > * { - position: relative; - z-index: 1; -} - -.philosophy-quote { - max-width: 900px; - margin: 0 auto var(--space-xxl); - text-align: center; - position: relative; -} - -.philosophy-quote::before { - content: '"'; - position: absolute; - top: -20px; - left: 50%; - transform: translateX(-50%); - font-size: 120px; - font-weight: 700; - color: var(--color-accent); - opacity: 0.1; - line-height: 1; - font-family: Georgia, serif; - z-index: 0; -} - -.philosophy-quote blockquote { - font-size: 22px; - font-style: italic; - color: var(--color-text-secondary); - line-height: 1.7; - margin: 0; - padding: var(--space-xxl); - background: linear-gradient(135deg, var(--color-accent-lighter) 0%, transparent 50%); - border-left: 5px solid var(--color-accent); - border-radius: var(--radius-l); - box-shadow: 0 8px 24px rgba(129, 140, 248, 0.12); - position: relative; - z-index: 1; - font-weight: 400; -} - -/* Philosophy Pillars */ -.philosophy-pillars { - display: grid; - grid-template-columns: 1fr; - gap: var(--space-xl); - margin-bottom: var(--space-xxl); -} - -@media (min-width: 768px) { - .philosophy-pillars { - grid-template-columns: repeat(2, 1fr); - } -} - -.pillar { - display: flex; - gap: var(--space-l); - padding: var(--space-xl); - background: var(--color-background); - border-radius: var(--radius-l); - border: 1.5px solid transparent; - background-image: - linear-gradient(var(--color-background), var(--color-background)), - linear-gradient(135deg, var(--color-accent-light), var(--color-border)); - background-origin: border-box; - background-clip: padding-box, border-box; - transition: all 0.35s cubic-bezier(0.4, 0, 0.2, 1); - position: relative; - overflow: hidden; - box-shadow: 0 4px 12px rgba(0, 0, 0, 0.05); -} - -.pillar::before { - content: ''; - position: absolute; - inset: 0; - background: linear-gradient(135deg, var(--color-accent-lighter), transparent); - opacity: 0; - transition: opacity 0.35s ease; - z-index: 0; -} - -.pillar:hover { - transform: translateY(-2px); - border-color: var(--color-accent); - box-shadow: 0 8px 24px rgba(129, 140, 248, 0.2); -} - -.pillar:hover::before { - opacity: 0.3; -} - -.pillar > * { - position: relative; - z-index: 1; -} - -.pillar-icon { - flex-shrink: 0; - width: 72px; - height: 72px; - display: flex; - align-items: center; - justify-content: center; - background: linear-gradient(135deg, var(--color-accent), var(--color-accent-hover)); - border-radius: var(--radius-l); - font-size: 36px; - color: var(--color-on-accent); - box-shadow: 0 8px 20px rgba(129, 140, 248, 0.3); - transition: all 0.35s cubic-bezier(0.4, 0, 0.2, 1); -} - -.pillar:hover .pillar-icon { - transform: scale(1.1) rotateY(10deg); - box-shadow: 0 12px 30px rgba(129, 140, 248, 0.5); -} - -.pillar-content h3 { - margin: 0 0 var(--space-xs) 0; - font-size: 28px; - font-weight: 700; - color: var(--color-text-primary); - letter-spacing: -0.02em; -} - -.pillar-tagline { - margin: 0 0 var(--space-m) 0; - font-size: 16px; - background: linear-gradient(135deg, var(--color-accent), var(--color-accent-hover)); - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - background-clip: text; - font-weight: 600; - font-style: italic; -} - -.pillar-description { - margin: 0 0 var(--space-l) 0; - font-size: 15px; - color: var(--color-text-secondary); - line-height: 1.7; - letter-spacing: 0.01em; -} - -.pillar-tools { - display: flex; - flex-wrap: wrap; - gap: var(--space-s); -} - -.tool-tag { - font-family: var(--font-mono); - font-size: 11px; - padding: 6px 12px; - background-color: var(--color-surface-hover); - color: var(--color-text-secondary); - border-radius: var(--radius-s); - border: 1px solid var(--color-border); - transition: all 0.25s cubic-bezier(0.4, 0, 0.2, 1); - cursor: default; - font-weight: 500; -} - -.tool-tag:hover { - background: linear-gradient(135deg, var(--color-accent-lighter), var(--color-surface-hover)); - border-color: var(--color-accent); - color: var(--color-accent); - transform: translateY(-1px) scale(1.05); - box-shadow: 0 4px 8px rgba(129, 140, 248, 0.15); -} - -/* Compound Effect Timeline */ -.compound-effect { - background: linear-gradient(135deg, var(--color-surface), var(--color-background)); - border-radius: var(--radius-xl); - padding: var(--space-xxl); - border: 1.5px solid transparent; - background-image: - linear-gradient(135deg, var(--color-surface), var(--color-background)), - linear-gradient(135deg, var(--color-accent-light), var(--color-border)); - background-origin: border-box; - background-clip: padding-box, border-box; - position: relative; - overflow: hidden; -} - -.compound-effect::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background: radial-gradient(circle at 50% 0%, var(--color-accent-lighter), transparent 60%); - opacity: 0.3; - pointer-events: none; -} - -.compound-effect h3 { - text-align: center; - margin: 0 0 var(--space-xxl) 0; - display: flex; - align-items: center; - justify-content: center; - gap: var(--space-s); - font-size: 28px; - position: relative; - z-index: 1; -} - -.compound-effect h3 i { - font-size: 32px; - animation: chart-pulse 2s ease-in-out infinite; -} - -@keyframes chart-pulse { - 0%, 100% { transform: scale(1) translateY(0); } - 50% { transform: scale(1.15) translateY(-2px); } -} - -.compound-grid { - display: flex; - flex-wrap: wrap; - align-items: center; - justify-content: center; - gap: var(--space-l); - position: relative; - z-index: 1; -} - -.compound-item { - text-align: center; - padding: var(--space-xl); - background: var(--color-background); - border-radius: var(--radius-l); - border: 1.5px solid var(--color-border); - min-width: 160px; - transition: all 0.35s cubic-bezier(0.4, 0, 0.2, 1); - position: relative; - box-shadow: 0 4px 12px rgba(0, 0, 0, 0.05); -} - -.compound-item::before { - content: ''; - position: absolute; - inset: 0; - background: linear-gradient(135deg, var(--color-accent-lighter), transparent); - opacity: 0; - border-radius: var(--radius-l); - transition: opacity 0.35s ease; -} - -.compound-item:hover { - transform: translateY(-2px); - border-color: var(--color-accent); - box-shadow: 0 8px 20px rgba(129, 140, 248, 0.2); -} - -.compound-item:hover::before { - opacity: 0.2; -} - -.compound-item.highlight { - background: linear-gradient(135deg, var(--color-accent), var(--color-accent-hover)); - border-color: var(--color-accent); - box-shadow: 0 12px 32px rgba(129, 140, 248, 0.4); - transform: scale(1.08); -} - -.compound-item.highlight .compound-number, -.compound-item.highlight .compound-text { - color: var(--color-on-accent); -} - -.compound-item.highlight:hover { - transform: translateY(-2px); - box-shadow: 0 10px 28px rgba(129, 140, 248, 0.4); -} - -.compound-number { - font-size: 18px; - font-weight: 700; - color: var(--color-accent); - margin-bottom: var(--space-s); - text-transform: uppercase; - letter-spacing: 0.05em; - position: relative; - z-index: 1; -} - -.compound-text { - font-size: 14px; - color: var(--color-text-secondary); - line-height: 1.5; - position: relative; - z-index: 1; -} - -.compound-arrow { - color: var(--color-accent); - font-size: 24px; - opacity: 0.6; - animation: arrow-slide 2s ease-in-out infinite; -} - -@keyframes arrow-slide { - 0%, 100% { transform: translateX(0); opacity: 0.6; } - 50% { transform: translateX(4px); opacity: 1; } -} - -@media (max-width: 767px) { - .pillar { - flex-direction: column; - text-align: center; - } - - .pillar-icon { - margin: 0 auto; - } - - .pillar-tools { - justify-content: center; - } - - .compound-arrow { - transform: rotate(90deg); - } - - .compound-grid { - flex-direction: column; - } -} diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index 0802199..0000000 --- a/docs/index.html +++ /dev/null @@ -1,1046 +0,0 @@ - - - - - - Compounding Engineering - AI-Powered Development Tools for Claude Code - - - - - - - - - - - - - - - - - - - - - - - -
-
- - -
- -
-
-
- - Version 2.32.2 released! - -

- Your Code Reviews Just Got 12 Expert Opinions. In 30 Seconds. -

-

- Here's what happened when we shipped yesterday: security audit, performance analysis, architectural review, pattern detection, and eight more specialized checks—all running in parallel. No meetings. No waiting. Just answers. That's compounding engineering: 29 specialized agents, 23 workflow commands, and 18 skills that make today's work easier than yesterday's. -

- -
-
- - -
-
-
-
-
29
-
Specialized Agents
-
-
-
-
23
-
Slash Commands
-
-
-
-
18
-
Intelligent Skills
-
-
-
-
1
-
MCP Server
-
-
-
- - -
-
-

Why Your Third Code Review Should Be Easier Than Your First

-

- Think about the last time you fixed a Rails N+1 query. You found it. You fixed it. Then next month, different developer, same bug, same investigation. That's linear engineering—you solved it, but the solution evaporated. -

-
- - -
-
- "Most engineering work is amnesia. You solve a problem on Tuesday, forget the solution by Friday, and re-solve it next quarter. Compounding engineering is different: each solved problem teaches the system. The security review you run today makes tomorrow's review smarter. The pattern you codify this sprint prevents bugs in the next three." -
-
- - -
-
-
-
-

Plan

-

Stop starting over from scratch

-

- You know that moment when you open a ticket and think "how did we solve this last time?" The framework-docs-researcher already knows. The git-history-analyzer remembers what worked in March. Run /plan and three research agents work in parallel—one reading docs, one analyzing your repo's history, one finding community patterns. In 60 seconds, you have a plan built on institutional memory instead of starting cold. -

-
- framework-docs-researcher - best-practices-researcher - repo-research-analyst - git-history-analyzer -
-
-
- -
-
-
-

Delegate

-

Work with experts who never forget

-

- The security-sentinel has checked 10,000 PRs for SQL injection. The kieran-rails-reviewer never approves a controller with business logic. They don't get tired, don't skip Friday afternoon reviews, don't forget the conventions you agreed on in March. Run /work and watch your plan execute with quality gates that actually enforce your standards—every single time. -

-
- 29 specialized agents - /work - dhh-rails-style skill - git-worktree skill -
-
-
- -
-
-
-

Assess

-

Get twelve opinions without twelve meetings

-

- Type /review PR#123 and go get coffee. When you come back, you'll have a security audit (did you sanitize that user input?), performance analysis (N+1 spotted on line 47), architecture review (this breaks the pattern from v2.3), data integrity check (that migration will fail in production), and eight more specialized reviews. All running in parallel. All categorized by severity. All stored as actionable P1/P2/P3 todos you can knock out in order. -

-
- security-sentinel - performance-oracle - architecture-strategist - data-integrity-guardian -
-
-
- -
-
-
-

Compound

-

Make sure you never solve the same bug twice

-

- Remember that CORS issue you debugged for three hours last month? Neither do I. That's the problem. Run /compound right after you fix something and it captures the solution as searchable documentation with YAML frontmatter. Next time someone hits the same issue, they grep for "CORS production" and find your answer in five seconds instead of re-debugging for three hours. That's how you compound. -

-
- /compound - compound-docs skill - file-todos skill -
-
-
-
- -
- - -
-
-

- 29 Specialized Agents -

-

- Think of them as coworkers who never quit. The security-sentinel has seen every SQL injection variant. The kieran-rails-reviewer enforces conventions with zero compromise. The performance-oracle spots N+1 queries while you're still reading the PR. Run them solo or launch twelve in parallel—your choice. -

-
- - -
-

Review Agents (11)

-
-
-
- kieran-rails-reviewer - Rails -
-

Super senior Rails developer with impeccable taste. Applies strict conventions for Turbo Streams, namespacing, and the "duplication over complexity" philosophy.

- claude agent kieran-rails-reviewer -
-
-
- dhh-rails-reviewer - Rails -
-

Reviews code from DHH's perspective. Focus on Rails conventions, simplicity, and avoiding over-engineering.

- claude agent dhh-rails-reviewer -
-
-
- kieran-python-reviewer - Python -
-

Python code review with strict conventions. PEP 8 compliance, type hints, and Pythonic patterns.

- claude agent kieran-python-reviewer -
-
-
- kieran-typescript-reviewer - TypeScript -
-

TypeScript review with focus on type safety, modern patterns, and clean architecture.

- claude agent kieran-typescript-reviewer -
-
-
- security-sentinel - Security -
-

Security audits and vulnerability assessments. OWASP top 10, injection attacks, authentication flaws.

- claude agent security-sentinel -
-
-
- performance-oracle - Performance -
-

Performance analysis and optimization recommendations. N+1 queries, caching strategies, bottleneck identification.

- claude agent performance-oracle -
-
-
- architecture-strategist - Architecture -
-

Analyze architectural decisions, compliance, and system design patterns.

- claude agent architecture-strategist -
-
-
- data-integrity-guardian - Data -
-

Database migrations and data integrity review. Schema changes, foreign keys, data consistency.

- claude agent data-integrity-guardian -
-
-
- pattern-recognition-specialist - Patterns -
-

Analyze code for patterns and anti-patterns. Design patterns, code smells, refactoring opportunities.

- claude agent pattern-recognition-specialist -
-
-
- code-simplicity-reviewer - Quality -
-

Final pass for simplicity and minimalism. Remove unnecessary complexity, improve readability.

- claude agent code-simplicity-reviewer -
-
-
- julik-frontend-races-reviewer - JavaScript -
-

Review JavaScript and Stimulus code for race conditions, DOM event handling, promise management, and timer cleanup.

- claude agent julik-frontend-races-reviewer -
-
-
- - -
-

Research Agents (4)

-
-
-
- framework-docs-researcher - Research -
-

Research framework documentation and best practices. Find official guidance and community patterns.

- claude agent framework-docs-researcher -
-
-
- best-practices-researcher - Research -
-

Gather external best practices and examples from the community and industry standards.

- claude agent best-practices-researcher -
-
-
- git-history-analyzer - Git -
-

Analyze git history and code evolution. Understand how code has changed and why.

- claude agent git-history-analyzer -
-
-
- repo-research-analyst - Research -
-

Research repository structure and conventions. Understand project patterns and organization.

- claude agent repo-research-analyst -
-
-
- - -
-

Design Agents (3)

-
-
-
- design-iterator - Design -
-

Iteratively refine UI through systematic design iterations with screenshots and feedback loops.

- claude agent design-iterator -
-
-
- figma-design-sync - Figma -
-

Synchronize web implementations with Figma designs. Pixel-perfect matching.

- claude agent figma-design-sync -
-
-
- design-implementation-reviewer - Review -
-

Verify UI implementations match Figma designs. Catch visual regressions.

- claude agent design-implementation-reviewer -
-
-
- - -
-

Workflow Agents (5)

-
-
-
- bug-reproduction-validator - Bugs -
-

Systematically reproduce and validate bug reports. Create minimal reproduction cases.

- claude agent bug-reproduction-validator -
-
-
- pr-comment-resolver - PR -
-

Address PR comments and implement fixes. Batch process review feedback.

- claude agent pr-comment-resolver -
-
-
- lint - Quality -
-

Run linting and code quality checks on Ruby and ERB files.

- claude agent lint -
-
-
- spec-flow-analyzer - Testing -
-

Analyze user flows and identify gaps in specifications.

- claude agent spec-flow-analyzer -
-
-
- every-style-editor - Content -
-

Edit content to conform to Every's style guide.

- claude agent every-style-editor -
-
-
- - -
-

Documentation Agent (1)

-
-
-
- ankane-readme-writer - Docs -
-

Create READMEs following Ankane-style template for Ruby gems. Clean, concise, comprehensive documentation that gets straight to the point.

- claude agent ankane-readme-writer -
-
-
-
- - -
-
-

- 23 Powerful Commands -

-

- Slash commands that replace entire workflows. /review is your code review committee. /plan is your research team. /triage sorts 50 todos in the time it takes you to read five. Each one automates hours of work into a single line. -

-
- - -
-

Workflow Commands

-
-
-
- /plan - core -
-

Create comprehensive implementation plans with research agents and stakeholder analysis.

-
-
-
- /review - core -
-

Run exhaustive code reviews using 12 or more parallel agents, ultra-thinking, and worktrees.

-
-
-
- /work - core -
-

Execute work items systematically with progress tracking and validation.

-
-
-
- /compound - core -
-

Document solved problems to compound team knowledge. Turn learnings into reusable patterns.

-
-
-
- - -
-

Utility Commands

-
-
-
- /changelog - util -
-

Create engaging changelogs for recent merges.

-
-
-
- /create-agent-skill - util -
-

Create or edit Claude Code skills with expert guidance.

-
-
-
- /generate_command - util -
-

Generate new slash commands from templates.

-
-
-
- /heal-skill - util -
-

Fix skill documentation issues automatically.

-
-
-
- /plan_review - util -
-

Multi-agent plan review in parallel.

-
-
-
- /prime - util -
-

Prime/setup command for project initialization.

-
-
-
- /report-bug - util -
-

Report bugs in the plugin with structured templates.

-
-
-
- /reproduce-bug - util -
-

Reproduce bugs using logs and console output.

-
-
-
- /triage - util -
-

Triage and prioritize issues interactively.

-
-
-
- /resolve_parallel - util -
-

Resolve TODO comments in parallel.

-
-
-
- /resolve_pr_parallel - util -
-

Resolve PR comments in parallel.

-
-
-
- /resolve_todo_parallel - util -
-

Resolve file-based todos in parallel.

-
-
-
- /release-docs - util -
-

Build and update the documentation site with current plugin components.

-
-
-
- /deploy-docs - util -
-

Validate and prepare documentation for GitHub Pages deployment.

-
-
-
-
- - -
-
-

- 18 Intelligent Skills -

-

- Domain expertise on tap. Need to write a Ruby gem? The andrew-kane-gem-writer knows the patterns Andrew uses in 50+ popular gems. Building a Rails app? The dhh-rails-style enforces 37signals conventions. Generating images? The gemini-imagegen has Google's AI on speed dial. Just invoke the skill and watch it work. -

-
- - -
-

Development Tools

-
-
-
- andrew-kane-gem-writer - Ruby -
-

Write Ruby gems following Andrew Kane's patterns. Clean APIs, smart defaults, comprehensive testing.

- skill: andrew-kane-gem-writer -
-
-
- dhh-rails-style - Rails -
-

Write Ruby/Rails code in DHH's 37signals style. REST purity, fat models, thin controllers, Hotwire patterns.

- skill: dhh-rails-style -
-
-
- dspy-ruby - AI -
-

Build type-safe LLM applications with DSPy.rb. Structured prompting, optimization, providers.

- skill: dspy-ruby -
-
-
- frontend-design - Design -
-

Create production-grade frontend interfaces with modern CSS, responsive design, accessibility.

- skill: frontend-design -
-
-
- create-agent-skills - Meta -
-

Expert guidance for creating Claude Code skills. Templates, best practices, validation.

- skill: create-agent-skills -
-
-
- skill-creator - Meta -
-

Guide for creating effective Claude Code skills with structured workflows.

- skill: skill-creator -
-
-
- compound-docs - Docs -
-

Capture solved problems as categorized documentation with YAML schema.

- skill: compound-docs -
-
-
- - -
-

Content & Workflow

-
-
-
- every-style-editor - Content -
-

Review copy for Every's style guide compliance.

- skill: every-style-editor -
-
-
- file-todos - Workflow -
-

File-based todo tracking system with priorities and status.

- skill: file-todos -
-
-
- git-worktree - Git -
-

Manage Git worktrees for parallel development on multiple branches.

- skill: git-worktree -
-
-
- - -
-

Image Generation

-
- -
-
-
- - -
-
-

- 1 MCP Server -

-

- Playwright gives Claude a browser—it can click buttons, take screenshots, fill forms, and validate what your users actually see. Context7 gives it instant access to docs for 100+ frameworks. Need to know how Next.js handles dynamic routes? Context7 fetches the answer in real-time instead of hallucinating from outdated training data. -

-
- -
-
-
- - Playwright -
-

Your AI can now see and click like a user. Test flows, grab screenshots, debug what's actually rendering.

-
-

Tools Provided: 6 tools

-
    -
  • browser_navigate - Navigate to URLs
  • -
  • browser_take_screenshot - Take screenshots
  • -
  • browser_click - Click elements
  • -
  • browser_fill_form - Fill form fields
  • -
  • browser_snapshot - Get accessibility snapshot
  • -
  • browser_evaluate - Execute JavaScript
  • -
-
-
-
-
- - Context7 -
-

Stop getting outdated answers. Context7 fetches current docs from 100+ frameworks in real-time.

-
-

Tools Provided: 2 tools

-
    -
  • resolve-library-id - Find library ID
  • -
  • get-library-docs - Get documentation
  • -
-

Supports: Rails, React, Next.js, Vue, Django, Laravel, and more than 100 others

-
-
-
-
- - -
-
-

Three Commands. Zero Configuration.

-

- You're literally 30 seconds from running your first 12-agent code review. No config files. No API keys (except for image generation). Just copy, paste, go. -

-
- -
-
-
1
-
-

Add the Marketplace

-
-
claude /plugin marketplace add https://github.com/EveryInc/compound-engineering-plugin
-
-
-
-
-
2
-
-

Install the Plugin

-
-
claude /plugin install compound-engineering
-
-
-
-
-
3
-
-

Ship Faster

-
-
# Run a 12-agent code review
-/review PR#123
-
-# Get a security audit
-claude agent security-sentinel
-
-# Generate an image
-skill: gemini-imagegen
-
-
-
-
-
- - -
-
-

Frequently Asked Questions

-
-
-
- -

What is Compounding Engineering?

- -
-
-

- It's the opposite of how most teams work. Normally, you fix a bug, ship it, and forget it. Next month someone hits the same bug and re-solves it from scratch. Compounding engineering means each fix teaches the system. Your third code review is faster than your first because the agents learned patterns. Your tenth security audit catches issues you missed in audit #2. The work accumulates instead of evaporating. -

-
-
-
- -

How do agents differ from skills?

- -
-
-

- Agents are coworkers with specific jobs. The security-sentinel does security reviews. The kieran-rails-reviewer enforces Rails conventions. You call them directly: claude agent security-sentinel. -

-

- Skills are expertise Claude can tap into when needed. The dhh-rails-style knows 37signals Rails patterns. The gemini-imagegen knows how to generate images. Claude invokes them automatically when relevant, or you can explicitly call them: skill: dhh-rails-style. -

-
-
-
- -

Why aren't MCP servers loading automatically?

- -
-
-

- Yeah, we know. It's a current limitation. The workaround is simple: manually add the MCP servers to your .claude/settings.json file. Check the README for copy-paste config. Takes 30 seconds and you're done. -

-
-
-
- -

Can I use this with languages other than Ruby/Rails?

- -
-
-

- Absolutely. We've got Python and TypeScript reviewers alongside the Rails ones. And the workflow commands, research agents, and skills like gemini-imagegen don't care what language you write. The security-sentinel finds SQL injection whether it's in Rails, Django, or Laravel. -

-
-
-
- -

How do I create my own agents or skills?

- -
-
-

- Run /create-agent-skill or invoke the create-agent-skills skill. Both give you templates, enforce best practices, and walk you through the structure. You'll have a working agent or skill in minutes instead of reverse-engineering from examples. -

-
-
-
-
- - -
-
- Free & Open Source -

Install Once. Compound Forever.

-

- Your next code review takes 30 seconds. The one after that? Even faster. That's compounding. Get 29 expert agents, 23 workflow commands, and 18 specialized skills working for you right now. -

- -

Join developers who ship faster because yesterday's work makes today easier

-
-
-
- - -
- - - diff --git a/docs/js/main.js b/docs/js/main.js deleted file mode 100644 index bc71913..0000000 --- a/docs/js/main.js +++ /dev/null @@ -1,225 +0,0 @@ -/** - * Compounding Engineering Documentation - * Main JavaScript functionality - */ - -document.addEventListener('DOMContentLoaded', () => { - initMobileNav(); - initSmoothScroll(); - initCopyCode(); - initThemeToggle(); -}); - -/** - * Mobile Navigation Toggle - */ -function initMobileNav() { - const mobileToggle = document.querySelector('[data-mobile-toggle]'); - const navigation = document.querySelector('[data-navigation]'); - - if (!mobileToggle || !navigation) return; - - mobileToggle.addEventListener('click', () => { - navigation.classList.toggle('open'); - mobileToggle.classList.toggle('active'); - - // Update aria-expanded - const isOpen = navigation.classList.contains('open'); - mobileToggle.setAttribute('aria-expanded', isOpen); - }); - - // Close menu when clicking outside - document.addEventListener('click', (event) => { - if (!mobileToggle.contains(event.target) && !navigation.contains(event.target)) { - navigation.classList.remove('open'); - mobileToggle.classList.remove('active'); - mobileToggle.setAttribute('aria-expanded', 'false'); - } - }); - - // Close menu when clicking a nav link - navigation.querySelectorAll('.nav-link').forEach(link => { - link.addEventListener('click', () => { - navigation.classList.remove('open'); - mobileToggle.classList.remove('active'); - mobileToggle.setAttribute('aria-expanded', 'false'); - }); - }); -} - -/** - * Smooth Scroll for Anchor Links - */ -function initSmoothScroll() { - document.querySelectorAll('a[href^="#"]').forEach(anchor => { - anchor.addEventListener('click', function(e) { - const targetId = this.getAttribute('href'); - if (targetId === '#') return; - - const targetElement = document.querySelector(targetId); - if (!targetElement) return; - - e.preventDefault(); - - const navHeight = document.querySelector('.nav-container')?.offsetHeight || 0; - const targetPosition = targetElement.getBoundingClientRect().top + window.pageYOffset - navHeight - 24; - - window.scrollTo({ - top: targetPosition, - behavior: 'smooth' - }); - - // Update URL without jumping - history.pushState(null, null, targetId); - }); - }); -} - -/** - * Copy Code Functionality - */ -function initCopyCode() { - document.querySelectorAll('.card-code-block').forEach(block => { - // Create copy button - const copyBtn = document.createElement('button'); - copyBtn.className = 'copy-btn'; - copyBtn.innerHTML = ''; - copyBtn.setAttribute('aria-label', 'Copy code'); - copyBtn.setAttribute('title', 'Copy to clipboard'); - - // Style the button - copyBtn.style.cssText = ` - position: absolute; - top: 8px; - right: 8px; - padding: 6px 10px; - background: rgba(255, 255, 255, 0.1); - border: none; - border-radius: 6px; - color: #94a3b8; - cursor: pointer; - opacity: 0; - transition: all 0.2s ease; - font-size: 14px; - `; - - // Make parent relative for positioning - block.style.position = 'relative'; - block.appendChild(copyBtn); - - // Show/hide on hover - block.addEventListener('mouseenter', () => { - copyBtn.style.opacity = '1'; - }); - - block.addEventListener('mouseleave', () => { - copyBtn.style.opacity = '0'; - }); - - // Copy functionality - copyBtn.addEventListener('click', async () => { - const code = block.querySelector('code'); - if (!code) return; - - try { - await navigator.clipboard.writeText(code.textContent); - copyBtn.innerHTML = ''; - copyBtn.style.color = '#34d399'; - - setTimeout(() => { - copyBtn.innerHTML = ''; - copyBtn.style.color = '#94a3b8'; - }, 2000); - } catch (err) { - console.error('Failed to copy:', err); - copyBtn.innerHTML = ''; - copyBtn.style.color = '#f87171'; - - setTimeout(() => { - copyBtn.innerHTML = ''; - copyBtn.style.color = '#94a3b8'; - }, 2000); - } - }); - }); -} - -/** - * Theme Toggle (Light/Dark) - */ -function initThemeToggle() { - // Check for saved theme preference or default to dark - const savedTheme = localStorage.getItem('theme') || 'dark'; - document.documentElement.className = `theme-${savedTheme}`; - - // Create theme toggle button if it doesn't exist - const existingToggle = document.querySelector('[data-theme-toggle]'); - if (existingToggle) { - existingToggle.addEventListener('click', toggleTheme); - updateThemeToggleIcon(existingToggle, savedTheme); - } -} - -function toggleTheme() { - const html = document.documentElement; - const currentTheme = html.classList.contains('theme-dark') ? 'dark' : 'light'; - const newTheme = currentTheme === 'dark' ? 'light' : 'dark'; - - html.className = `theme-${newTheme}`; - localStorage.setItem('theme', newTheme); - - const toggle = document.querySelector('[data-theme-toggle]'); - if (toggle) { - updateThemeToggleIcon(toggle, newTheme); - } -} - -function updateThemeToggleIcon(toggle, theme) { - const icon = toggle.querySelector('i'); - if (icon) { - icon.className = theme === 'dark' ? 'fa-solid fa-sun' : 'fa-solid fa-moon'; - } -} - -/** - * Intersection Observer for Animation on Scroll - */ -function initScrollAnimations() { - const observerOptions = { - threshold: 0.1, - rootMargin: '0px 0px -50px 0px' - }; - - const observer = new IntersectionObserver((entries) => { - entries.forEach(entry => { - if (entry.isIntersecting) { - entry.target.classList.add('visible'); - observer.unobserve(entry.target); - } - }); - }, observerOptions); - - document.querySelectorAll('.agent-card, .command-card, .skill-card, .mcp-card, .stat-card').forEach(card => { - card.style.opacity = '0'; - card.style.transform = 'translateY(20px)'; - card.style.transition = 'opacity 0.5s ease, transform 0.5s ease'; - observer.observe(card); - }); -} - -// Add visible class styles -const style = document.createElement('style'); -style.textContent = ` - .agent-card.visible, - .command-card.visible, - .skill-card.visible, - .mcp-card.visible, - .stat-card.visible { - opacity: 1 !important; - transform: translateY(0) !important; - } -`; -document.head.appendChild(style); - -// Initialize scroll animations after a short delay -setTimeout(initScrollAnimations, 100); diff --git a/docs/pages/agents.html b/docs/pages/agents.html deleted file mode 100644 index eb39b75..0000000 --- a/docs/pages/agents.html +++ /dev/null @@ -1,649 +0,0 @@ - - - - - - Agent Reference - Compounding Engineering - - - - - - - - - - -
-
- - -
-
- - -
- -
-

Agent Reference

-

- Think of agents as your expert teammates who never sleep. You've got 23 specialists here—each one obsessed with a single domain. Call them individually when you need focused expertise, or orchestrate them together for multi-angle analysis. They're opinionated, they're fast, and they remember your codebase better than you do. -

- -
-

How to Use Agents

-
-
# Basic invocation
-claude agent [agent-name]
-
-# With a specific message
-claude agent [agent-name] "Your message here"
-
-# Examples
-claude agent kieran-rails-reviewer
-claude agent security-sentinel "Audit the payment flow"
-
-
- - -
-

Review Agents (10)

-

Your code review dream team. These agents catch what humans miss at 2am—security holes, performance cliffs, architectural drift, and those "it works but I hate it" moments. They're picky. They disagree with each other. That's the point.

- -
-
-

kieran-rails-reviewer

- Rails -
-

- Your senior Rails developer who's seen too many "clever" solutions fail in production. Obsessed with code that's boring, predictable, and maintainable. Strict on existing code (because touching it risks everything), pragmatic on new isolated features (because shipping matters). If you've ever thought "this works but feels wrong," this reviewer will tell you why. -

-

Key Principles

-
    -
  • Existing Code Modifications - Very strict. Added complexity needs strong justification.
  • -
  • New Code - Pragmatic. If it's isolated and works, it's acceptable.
  • -
  • Turbo Streams - Simple turbo streams MUST be inline arrays in controllers.
  • -
  • Testing as Quality - Hard-to-test code = poor structure that needs refactoring.
  • -
  • Naming (5-Second Rule) - Must understand what a view/component does in 5 seconds from its name.
  • -
  • Namespacing - Always use class Module::ClassName pattern.
  • -
  • Duplication > Complexity - Simple duplicated code is better than complex DRY abstractions.
  • -
-
-
claude agent kieran-rails-reviewer "Review the UserController"
-
-
- -
-
-

dhh-rails-reviewer

- Rails -
-

- What if DHH reviewed your Rails PR? He'd ask why you're building React inside Rails, why you need six layers of abstraction for a form, and whether you've forgotten that Rails already solved this problem. This agent channels that energy—blunt, opinionated, allergic to complexity. -

-

Key Focus Areas

-
    -
  • Identifies deviations from Rails conventions
  • -
  • Spots JavaScript framework patterns infiltrating Rails
  • -
  • Tears apart unnecessary abstractions
  • -
  • Challenges overengineering and microservices mentality
  • -
-
-
claude agent dhh-rails-reviewer
-
-
- -
-
-

kieran-python-reviewer

- Python -
-

- Your Pythonic perfectionist who believes type hints aren't optional and dict.get() beats try/except KeyError. Expects modern Python 3.10+ patterns—no legacy syntax, no typing.List when list works natively. If your code looks like Java translated to Python, prepare for rewrites. -

-

Key Focus Areas

-
    -
  • Type hints for all functions
  • -
  • Pythonic patterns and idioms
  • -
  • Modern Python syntax
  • -
  • Import organization
  • -
  • Module extraction signals
  • -
-
-
claude agent kieran-python-reviewer
-
-
- -
-
-

kieran-typescript-reviewer

- TypeScript -
-

- TypeScript's type system is a gift—don't throw it away with any. This reviewer treats any like a code smell that needs justification. Expects proper types, clean imports, and code that doesn't need comments because the types explain everything. You added TypeScript for safety; this agent makes sure you actually get it. -

-

Key Focus Areas

-
    -
  • No any without justification
  • -
  • Component/module extraction signals
  • -
  • Import organization
  • -
  • Modern TypeScript patterns
  • -
  • Testability assessment
  • -
-
-
claude agent kieran-typescript-reviewer
-
-
- -
-
-

security-sentinel

- Security -
-

- Security vulnerabilities hide in boring code—the "just grab the user ID from params" line that ships a privilege escalation bug to production. This agent thinks like an attacker: SQL injection, XSS, auth bypass, leaked secrets. Run it before touching authentication, payments, or anything with PII. Your users' data depends on paranoia. -

-

Security Checks

-
    -
  • Input validation analysis
  • -
  • SQL injection risk assessment
  • -
  • XSS vulnerability detection
  • -
  • Authentication/authorization audit
  • -
  • Sensitive data exposure scanning
  • -
  • OWASP Top 10 compliance
  • -
  • Hardcoded secrets search
  • -
-
-
claude agent security-sentinel "Audit the payment flow"
-
-
- -
-
-

performance-oracle

- Performance -
-

- Your code works fine with 10 users. What happens at 10,000? This agent time-travels to your future scaling problems—N+1 queries that murder your database, O(n²) algorithms hiding in loops, missing indexes, memory leaks. It thinks in Big O notation and asks uncomfortable questions about what breaks first when traffic spikes. -

-

Analysis Areas

-
    -
  • Algorithmic complexity (Big O notation)
  • -
  • N+1 query pattern detection
  • -
  • Proper index usage verification
  • -
  • Memory management review
  • -
  • Caching opportunity identification
  • -
  • Network usage optimization
  • -
  • Frontend bundle impact
  • -
-
-
claude agent performance-oracle
-
-
- -
-
-

architecture-strategist

- Architecture -
-

- Every "small change" either reinforces your architecture or starts eroding it. This agent zooms out to see if your fix actually fits the system's design—or if you're bolting duct tape onto a crumbling foundation. It speaks SOLID principles, microservice boundaries, and API contracts. Call it when you're about to make a change that "feels weird." -

-

Analysis Areas

-
    -
  • Overall system structure understanding
  • -
  • Change context within architecture
  • -
  • Architectural violation identification
  • -
  • SOLID principles compliance
  • -
  • Microservice boundary assessment
  • -
  • API contract evaluation
  • -
-
-
claude agent architecture-strategist
-
-
- -
-
-

data-integrity-guardian

- Data -
-

- Migrations can't be rolled back once they're run on production. This agent is your last line of defense before you accidentally drop a column with user data, create a race condition in transactions, or violate GDPR. It obsesses over referential integrity, rollback safety, and data constraints. Your database is forever; migrations should be paranoid. -

-

Review Areas

-
    -
  • Migration safety and reversibility
  • -
  • Data constraint validation
  • -
  • Transaction boundary review
  • -
  • Referential integrity preservation
  • -
  • Privacy compliance (GDPR, CCPA)
  • -
  • Data corruption scenario checking
  • -
-
-
claude agent data-integrity-guardian
-
-
- -
-
-

pattern-recognition-specialist

- Patterns -
-

- Patterns tell stories—Factory, Observer, God Object, Copy-Paste Programming. This agent reads your code like an archaeologist reading artifacts. It spots the good patterns (intentional design), the anti-patterns (accumulated tech debt), and the duplicated blocks you swore you'd refactor later. Runs tools like jscpd because humans miss repetition that machines catch instantly. -

-

Detection Areas

-
    -
  • Design patterns (Factory, Singleton, Observer, etc.)
  • -
  • Anti-patterns and code smells
  • -
  • TODO/FIXME comments
  • -
  • God objects and circular dependencies
  • -
  • Naming consistency
  • -
  • Code duplication
  • -
-
-
claude agent pattern-recognition-specialist
-
-
- -
-
-

code-simplicity-reviewer

- Quality -
-

- Simplicity is violent discipline. This agent asks "do you actually need this?" about every line, every abstraction, every dependency. YAGNI isn't a suggestion—it's the law. Your 200-line feature with three layers of indirection? This agent will show you the 50-line version that does the same thing. Complexity is a liability; simplicity compounds. -

-

Simplification Checks

-
    -
  • Analyze every line for necessity
  • -
  • Simplify complex logic
  • -
  • Remove redundancy and duplication
  • -
  • Challenge abstractions
  • -
  • Optimize for readability
  • -
  • Eliminate premature generalization
  • -
-
-
claude agent code-simplicity-reviewer
-
-
-
- - -
-

Research Agents (4)

-

Stop guessing. These agents dig through documentation, GitHub repos, git history, and real-world examples to give you answers backed by evidence. They read faster than you, remember more than you, and synthesize patterns you'd miss. Perfect for "how should I actually do this?" questions.

- -
-
-

framework-docs-researcher

- Research -
-

- Official docs are scattered. GitHub examples are inconsistent. Deprecations hide in changelogs. This agent pulls it all together—docs, source code, version constraints, real-world examples. Ask "how do I use Hotwire Turbo?" and get back patterns that actually work in production, not toy tutorials. -

-

Capabilities

-
    -
  • Fetch official framework and library documentation
  • -
  • Identify version-specific constraints and deprecations
  • -
  • Search GitHub for real-world usage examples
  • -
  • Analyze gem/library source code using bundle show
  • -
  • Synthesize findings with practical examples
  • -
-
-
claude agent framework-docs-researcher "Research Hotwire Turbo patterns"
-
-
- -
-
-

best-practices-researcher

- Research -
-

- "Best practices" are everywhere and contradictory. This agent cuts through the noise by evaluating sources (official docs, trusted blogs, real GitHub repos), checking recency, and synthesizing actionable guidance. You get code templates, patterns that scale, and answers you can trust—not StackOverflow copy-paste roulette. -

-

Capabilities

-
    -
  • Leverage multiple sources (Context7 MCP, web search, GitHub)
  • -
  • Evaluate information quality and recency
  • -
  • Synthesize into actionable guidance
  • -
  • Provide code examples and templates
  • -
  • Research issue templates and community engagement
  • -
-
-
claude agent best-practices-researcher "Find pagination patterns"
-
-
- -
-
-

git-history-analyzer

- Git -
-

- Your codebase has a history—decisions, patterns, mistakes. This agent does archaeology with git tools: file evolution, blame analysis, contributor expertise mapping. Ask "why does this code exist?" and get the commit that explains it. Spot patterns in how bugs appear. Understand the design decisions buried in history. -

-

Analysis Techniques

-
    -
  • Trace file evolution using git log --follow
  • -
  • Determine code origins using git blame -w -C -C -C
  • -
  • Identify patterns from commit history
  • -
  • Map key contributors and expertise areas
  • -
  • Extract historical patterns of issues and fixes
  • -
-
-
claude agent git-history-analyzer "Analyze changes to User model"
-
-
- -
-
-

repo-research-analyst

- Research -
-

- Every repo has conventions—some documented, most tribal knowledge. This agent reads ARCHITECTURE.md, issue templates, PR patterns, and actual code to reverse-engineer the standards. Perfect for joining a new project or ensuring your PR matches the team's implicit style. Finds the rules nobody wrote down. -

-

Analysis Areas

-
    -
  • Architecture and documentation files (ARCHITECTURE.md, README.md, CLAUDE.md)
  • -
  • GitHub issues for patterns and conventions
  • -
  • Issue/PR templates and guidelines
  • -
  • Implementation patterns using ast-grep or rg
  • -
  • Project-specific conventions
  • -
-
-
claude agent repo-research-analyst
-
-
-
- - -
-

Workflow Agents (5)

-

Tedious work you hate doing. These agents handle the grind—reproducing bugs, resolving PR comments, running linters, analyzing specs. They're fast, they don't complain, and they free you up to solve interesting problems instead of mechanical ones.

- -
-
-

bug-reproduction-validator

- Bugs -
-

- Half of bug reports aren't bugs—they're user errors, environment issues, or misunderstood features. This agent systematically reproduces the reported behavior, classifies what it finds (Confirmed, Can't Reproduce, Not a Bug, etc.), and assesses severity. Saves you from chasing ghosts or missing real issues. -

-

Classification Types

-
    -
  • Confirmed - Bug reproduced successfully
  • -
  • Cannot Reproduce - Unable to reproduce
  • -
  • Not a Bug - Expected behavior
  • -
  • Environmental - Environment-specific issue
  • -
  • Data - Data-related issue
  • -
  • User Error - User misunderstanding
  • -
-
-
claude agent bug-reproduction-validator
-
-
- -
-
-

pr-comment-resolver

- PR -
-

- Code review comments pile up. This agent reads them, plans fixes, implements changes, and reports back what it did. It doesn't argue with reviewers or skip hard feedback—it just resolves the work systematically. Great for burning through a dozen "change this variable name" comments in seconds. -

-

Workflow

-
    -
  • Analyze code review comments
  • -
  • Plan the resolution before implementation
  • -
  • Implement requested modifications
  • -
  • Verify resolution doesn't break functionality
  • -
  • Provide clear resolution reports
  • -
-
-
claude agent pr-comment-resolver
-
-
- -
-
-

lint

- Quality -
-

- Linters are pedantic robots that enforce consistency. This agent runs StandardRB, ERBLint, and Brakeman for you—checking Ruby style, ERB templates, and security issues. It's fast (uses the Haiku model) and catches the formatting noise before CI does. -

-

Tools Run

-
    -
  • bundle exec standardrb - Ruby file checking/fixing
  • -
  • bundle exec erblint --lint-all - ERB templates
  • -
  • bin/brakeman - Security scanning
  • -
-
-
claude agent lint
-
-
- -
-
-

spec-flow-analyzer

- Testing -
-

- Specs always have gaps—edge cases nobody thought about, ambiguous requirements, missing error states. This agent maps all possible user flows, identifies what's unclear or missing, and generates the questions you need to ask stakeholders. Runs before you code to avoid building the wrong thing. -

-

Analysis Areas

-
    -
  • Map all possible user flows and permutations
  • -
  • Identify gaps, ambiguities, and missing specifications
  • -
  • Consider different user types, roles, permissions
  • -
  • Analyze error states and edge cases
  • -
  • Generate critical questions requiring clarification
  • -
-
-
claude agent spec-flow-analyzer
-
-
- -
-
-

every-style-editor

- Content -
-

- Style guides are arbitrary rules that make writing consistent. This agent enforces Every's particular quirks—title case in headlines, no overused filler words ("actually," "very"), active voice, Oxford commas. It's a line-by-line grammar cop for content that needs to match the brand. -

-

Style Checks

-
    -
  • Title case in headlines, sentence case elsewhere
  • -
  • Company singular/plural usage
  • -
  • Remove overused words (actually, very, just)
  • -
  • Enforce active voice
  • -
  • Apply formatting rules (Oxford commas, em dashes)
  • -
-
-
claude agent every-style-editor
-
-
-
- - -
-

Design Agents (3)

-

Design is iteration. These agents take screenshots, compare them to Figma, make targeted improvements, and repeat. They fix spacing, alignment, colors, typography—the visual details that compound into polish. Perfect for closing the gap between "it works" and "it looks right."

- -
-
-

design-iterator

- Design -
-

- Design doesn't happen in one pass. This agent runs a loop: screenshot the UI, analyze what's off (spacing, colors, alignment), implement 3-5 targeted fixes, repeat. Run it for 10 iterations and watch rough interfaces transform into polished designs through systematic refinement. -

-

Process

-
    -
  • Take focused screenshots of target elements
  • -
  • Analyze current state and identify 3-5 improvements
  • -
  • Implement targeted CSS/design changes
  • -
  • Document changes made
  • -
  • Repeat for specified iterations (default 10)
  • -
-
-
claude agent design-iterator
-
-
- -
-
-

figma-design-sync

- Figma -
-

- Designers hand you a Figma file. You build it. Then: "the spacing is wrong, the font is off, the colors don't match." This agent compares your implementation to the Figma spec, identifies every visual discrepancy, and fixes them automatically. Designers stay happy. You stay sane. -

-

Workflow

-
    -
  • Extract design specifications from Figma
  • -
  • Capture implementation screenshots
  • -
  • Conduct systematic visual comparison
  • -
  • Make precise code changes to fix discrepancies
  • -
  • Verify implementation matches design
  • -
-
-
claude agent figma-design-sync
-
-
- -
-
-

design-implementation-reviewer

- Review -
-

- Before you ship UI changes, run this agent. It compares your implementation against Figma at a pixel level—layouts, typography, colors, spacing, responsive behavior. Uses the Opus model for detailed visual analysis. Catches the "close enough" mistakes that users notice but you don't. -

-

Comparison Areas

-
    -
  • Layouts and structure
  • -
  • Typography (fonts, sizes, weights)
  • -
  • Colors and themes
  • -
  • Spacing and alignment
  • -
  • Different viewport sizes
  • -
-
-
claude agent design-implementation-reviewer
-
-
-
- - -
-

Documentation Agent (1)

- -
-
-

ankane-readme-writer

- Docs -
-

- Andrew Kane writes READMEs that are models of clarity—concise, scannable, zero fluff. This agent generates gem documentation in that style: 15 words max per sentence, imperative voice, single-purpose code examples. If your README rambles, this agent will fix it. -

-

Section Order

-
    -
  1. Header (title + description)
  2. -
  3. Installation
  4. -
  5. Quick Start
  6. -
  7. Usage
  8. -
  9. Options
  10. -
  11. Upgrading
  12. -
  13. Contributing
  14. -
  15. License
  16. -
-

Style Guidelines

-
    -
  • Imperative voice throughout
  • -
  • 15 words max per sentence
  • -
  • Single-purpose code fences
  • -
  • Up to 4 badges maximum
  • -
  • No HTML comments
  • -
-
-
claude agent ankane-readme-writer
-
-
-
- - - -
-
-
- - - - - diff --git a/docs/pages/changelog.html b/docs/pages/changelog.html deleted file mode 100644 index dd0c72d..0000000 --- a/docs/pages/changelog.html +++ /dev/null @@ -1,534 +0,0 @@ - - - - - - Changelog - Compounding Engineering - - - - - - - - - - -
-
- - -
-
- - -
- -
-

Changelog

-

- All notable changes to the compound-engineering plugin. This project follows - Semantic Versioning and - Keep a Changelog conventions. -

- - -
-
-

v2.32.2

- 2026-02-12 -
- -
-

Changed

-
    -
  • - /release-docs command moved from plugin to local .claude/commands/ - - This is a repository maintenance command and should not be distributed to users. Command count reduced from 24 to 23. -
  • -
-
-
- - -
-
-

v2.32.1

- 2026-02-12 -
- -
-

Changed

-
    -
  • - /workflows:review command - Added learnings-researcher - agent to the parallel review phase. The review now searches docs/solutions/ for past - issues related to the PR's modules and patterns, surfacing "Known Pattern" findings during synthesis. -
  • -
-
-
- - -
-
-

v2.6.0

- 2024-11-26 -
- -
-

Removed

-
    -
  • - feedback-codifier agent - Removed from workflow agents. - Agent count reduced from 24 to 23. -
  • -
-
-
- - -
-
-

v2.5.0

- 2024-11-25 -
- -
-

Added

-
    -
  • - /report-bug command - New slash command for reporting bugs in the - compound-engineering plugin. Provides a structured workflow that gathers bug information - through guided questions, collects environment details automatically, and creates a GitHub - issue in the EveryInc/compound-engineering-plugin repository. -
  • -
-
-
- - -
-
-

v2.4.1

- 2024-11-24 -
- -
-

Improved

-
    -
  • - design-iterator agent - Added focused screenshot guidance: always capture - only the target element/area instead of full page screenshots. Includes browser_resize - recommendations, element-targeted screenshot workflow using browser_snapshot refs, and - explicit instruction to never use fullPage mode. -
  • -
-
-
- - -
-
-

v2.4.0

- 2024-11-24 -
- -
-

Fixed

-
    -
  • - MCP Configuration - Moved MCP servers back to plugin.json - following working examples from anthropics/life-sciences plugins. -
  • -
  • - Context7 URL - Updated to use HTTP type with correct endpoint URL. -
  • -
-
-
- - -
-
-

v2.3.0

- 2024-11-24 -
- -
-

Changed

-
    -
  • - MCP Configuration - Moved MCP servers from inline plugin.json - to separate .mcp.json file per Claude Code best practices. -
  • -
-
-
- - -
-
-

v2.2.1

- 2024-11-24 -
- -
-

Fixed

-
    -
  • - Playwright MCP Server - Added missing "type": "stdio" field - required for MCP server configuration to load properly. -
  • -
-
-
- - -
-
-

v2.2.0

- 2024-11-24 -
- -
-

Added

-
    -
  • - Context7 MCP Server - Bundled Context7 for instant framework documentation - lookup. Provides up-to-date docs for Rails, React, Next.js, and more than 100 other frameworks. -
  • -
-
-
- - -
-
-

v2.1.0

- 2024-11-24 -
- -
-

Added

-
    -
  • - Playwright MCP Server - Bundled @playwright/mcp for browser - automation across all projects. Provides screenshot, navigation, click, fill, and evaluate tools. -
  • -
-
- -
-

Changed

-
    -
  • Replaced all Puppeteer references with Playwright across agents and commands: -
      -
    • bug-reproduction-validator agent
    • -
    • design-iterator agent
    • -
    • design-implementation-reviewer agent
    • -
    • figma-design-sync agent
    • -
    • generate_command command
    • -
    -
  • -
-
-
- - -
-
-

v2.0.2

- 2024-11-24 -
- -
-

Changed

-
    -
  • - design-iterator agent - Updated description to emphasize proactive usage - when design work isn't coming together on first attempt. -
  • -
-
-
- - -
-
-

v2.0.1

- 2024-11-24 -
- -
-

Added

-
    -
  • CLAUDE.md - Project instructions with versioning requirements
  • -
  • docs/solutions/plugin-versioning-requirements.md - Workflow documentation
  • -
-
-
- - -
-
-

v2.0.0

- 2024-11-24 - Major Release -
- -

- Major reorganization consolidating agents, commands, and skills from multiple sources into - a single, well-organized plugin. -

- -
-

Added

- -

New Agents (seven)

-
    -
  • design-iterator - Iteratively refine UI components through systematic design iterations
  • -
  • design-implementation-reviewer - Verify UI implementations match Figma design specifications
  • -
  • figma-design-sync - Synchronize web implementations with Figma designs
  • -
  • bug-reproduction-validator - Systematically reproduce and validate bug reports
  • -
  • spec-flow-analyzer - Analyze user flows and identify gaps in specifications
  • -
  • lint - Run linting and code quality checks on Ruby and ERB files
  • -
  • ankane-readme-writer - Create READMEs following Ankane-style template for Ruby gems
  • -
- -

New Commands (nine)

-
    -
  • /changelog - Create engaging changelogs for recent merges
  • -
  • /plan_review - Multi-agent plan review in parallel
  • -
  • /resolve_parallel - Resolve TODO comments in parallel
  • -
  • /resolve_pr_parallel - Resolve PR comments in parallel
  • -
  • /reproduce-bug - Reproduce bugs using logs and console
  • -
  • /prime - Prime/setup command
  • -
  • /create-agent-skill - Create or edit Claude Code skills
  • -
  • /heal-skill - Fix skill documentation issues
  • -
  • /codify - Document solved problems for knowledge base
  • -
- -

New Skills (10)

-
    -
  • andrew-kane-gem-writer - Write Ruby gems following Andrew Kane's patterns
  • -
  • codify-docs - Capture solved problems as categorized documentation
  • -
  • create-agent-skills - Expert guidance for creating Claude Code skills
  • -
  • dhh-ruby-style - Write Ruby/Rails code in DHH's 37signals style
  • -
  • dspy-ruby - Build type-safe LLM applications with DSPy.rb
  • -
  • every-style-editor - Review copy for Every's style guide compliance
  • -
  • file-todos - File-based todo tracking system
  • -
  • frontend-design - Create production-grade frontend interfaces
  • -
  • git-worktree - Manage Git worktrees for parallel development
  • -
  • skill-creator - Guide for creating effective Claude Code skills
  • -
-
- -
-

Changed

-

Agents Reorganized by Category

-
    -
  • review/ (10 agents) - Code quality, security, performance reviewers
  • -
  • research/ (four agents) - Documentation, patterns, history analysis
  • -
  • design/ (three agents) - UI/design review and iteration
  • -
  • workflow/ (six agents) - PR resolution, bug validation, linting
  • -
  • docs/ (one agent) - README generation
  • -
-
- -
-

Summary

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Componentv1.1.0v2.0.0Change
Agents1724+7
Commands615+9
Skills111+10
-
-
- - -
-
-

v1.1.0

- 2024-11-22 -
- -
-

Added

-
    -
  • - gemini-imagegen Skill -
      -
    • Text-to-image generation with Google's Gemini API
    • -
    • Image editing and manipulation
    • -
    • Multi-turn refinement via chat interface
    • -
    • Multiple reference image composition (up to 14 images)
    • -
    • Model support: gemini-2.5-flash-image and gemini-3-pro-image-preview
    • -
    -
  • -
-
- -
-

Fixed

-
    -
  • Corrected component counts in documentation (17 agents, not 15)
  • -
-
-
- - -
-
-

v1.0.0

- 2024-10-09 - Initial Release -
- -

- Initial release of the compound-engineering plugin. -

- -
-

Added

- -

17 Specialized Agents

- -

Code Review (five)

-
    -
  • kieran-rails-reviewer - Rails code review with strict conventions
  • -
  • kieran-python-reviewer - Python code review with quality standards
  • -
  • kieran-typescript-reviewer - TypeScript code review
  • -
  • dhh-rails-reviewer - Rails review from DHH's perspective
  • -
  • code-simplicity-reviewer - Final pass for simplicity and minimalism
  • -
- -

Analysis & Architecture (four)

-
    -
  • architecture-strategist - Architectural decisions and compliance
  • -
  • pattern-recognition-specialist - Design pattern analysis
  • -
  • security-sentinel - Security audits and vulnerability assessments
  • -
  • performance-oracle - Performance analysis and optimization
  • -
- -

Research (four)

-
    -
  • framework-docs-researcher - Framework documentation research
  • -
  • best-practices-researcher - External best practices gathering
  • -
  • git-history-analyzer - Git history and code evolution analysis
  • -
  • repo-research-analyst - Repository structure and conventions
  • -
- -

Workflow (three)

-
    -
  • every-style-editor - Every's style guide compliance
  • -
  • pr-comment-resolver - PR comment resolution
  • -
  • feedback-codifier - Feedback pattern codification
  • -
- -

Six Slash Commands

-
    -
  • /plan - Create implementation plans
  • -
  • /review - Comprehensive code reviews
  • -
  • /work - Execute work items systematically
  • -
  • /triage - Triage and prioritize issues
  • -
  • /resolve_todo_parallel - Resolve TODOs in parallel
  • -
  • /generate_command - Generate new slash commands
  • -
- -

Infrastructure

-
    -
  • MIT license
  • -
  • Plugin manifest (plugin.json)
  • -
  • Pre-configured permissions for Rails development
  • -
-
-
- -
-
-
- - - diff --git a/docs/pages/commands.html b/docs/pages/commands.html deleted file mode 100644 index c5be692..0000000 --- a/docs/pages/commands.html +++ /dev/null @@ -1,523 +0,0 @@ - - - - - - Command Reference - Compounding Engineering - - - - - - - - - - -
-
- - -
-
- - -
- -
-

Command Reference

-

- Here's the thing about slash commands: they're workflows you'd spend 20 minutes doing manually, compressed into one line. Type /plan and watch three agents launch in parallel to research your codebase while you grab coffee. That's the point—automation that actually saves time, not busywork dressed up as productivity. -

- - -
-

Workflow Commands (four)

-

These are the big four: Plan your feature, Review your code, Work through the implementation, and Codify what you learned. Every professional developer does this cycle—these commands just make you faster at it.

- -
-
- /plan -
-

- You've got a feature request and a blank page. This command turns "we need OAuth" into a structured plan that actually tells you what to build—researched, reviewed, and ready to execute. -

-

Arguments

-

[feature description, bug report, or improvement idea]

-

Workflow

-
    -
  1. Repository Research (Parallel) - Launch three agents simultaneously: -
      -
    • repo-research-analyst - Project patterns
    • -
    • best-practices-researcher - Industry standards
    • -
    • framework-docs-researcher - Framework documentation
    • -
    -
  2. -
  3. SpecFlow Analysis - Run spec-flow-analyzer for user flows
  4. -
  5. Choose Detail Level: -
      -
    • MINIMAL - Simple bugs/small improvements
    • -
    • MORE - Standard features
    • -
    • A LOT - Major features with phases
    • -
    -
  6. -
  7. Write Plan - Save as plans/<issue_title>.md
  8. -
  9. Review - Call /plan_review for multi-agent feedback
  10. -
-
-
-
-

This command does NOT write code. It only researches and creates the plan.

-
-
-
-
/plan Add OAuth integration for third-party auth
-/plan Fix N+1 query in user dashboard
-
-
- -
-
- /review -
-

- Twelve specialized reviewers examine your PR in parallel—security, performance, architecture, patterns. It's like code review by committee, except the committee finishes in two minutes instead of two days. -

-

Arguments

-

[PR number, GitHub URL, branch name, or "latest"]

-

Workflow

-
    -
  1. Setup - Detect review target, optionally use git-worktree for isolation
  2. -
  3. Launch 12 Parallel Review Agents: -
      -
    • kieran-rails-reviewer, dhh-rails-reviewer
    • -
    • security-sentinel, performance-oracle
    • -
    • architecture-strategist, data-integrity-guardian
    • -
    • pattern-recognition-specialist, git-history-analyzer
    • -
    • And more...
    • -
    -
  4. -
  5. Ultra-Thinking Analysis - Stakeholder perspectives, scenario exploration
  6. -
  7. Simplification Review - Run code-simplicity-reviewer
  8. -
  9. Synthesize Findings - Categorize by severity (P1/P2/P3)
  10. -
  11. Create Todo Files - Using file-todos skill for all findings
  12. -
-
-
-
-

P1 (Critical) findings BLOCK MERGE. Address these before merging.

-
-
-
-
/review 42
-/review https://github.com/owner/repo/pull/42
-/review feature-branch-name
-/review latest
-
-
- -
-
- /work -
-

- Point this at a plan file and watch it execute—reading requirements, setting up environment, running tests, creating commits, opening PRs. It's the "just build the thing" button you wish you always had. -

-

Arguments

-

[plan file, specification, or todo file path]

-

Phases

-
    -
  1. Quick Start -
      -
    • Read plan & clarify requirements
    • -
    • Setup environment (live or worktree)
    • -
    • Create TodoWrite task list
    • -
    -
  2. -
  3. Execute -
      -
    • Task execution loop with progress tracking
    • -
    • Follow existing patterns
    • -
    • Test continuously
    • -
    • Figma sync if applicable
    • -
    -
  4. -
  5. Quality Check -
      -
    • Run test suite
    • -
    • Run linting
    • -
    • Optional reviewer agents for complex changes
    • -
    -
  6. -
  7. Ship It -
      -
    • Create commit with conventional format
    • -
    • Create pull request
    • -
    • Notify with summary
    • -
    -
  8. -
-
-
/work plans/user-authentication.md
-/work todos/042-ready-p1-performance-issue.md
-
-
- -
-
- /compound -
-

- Just fixed a gnarly bug? This captures the solution before you forget it. Seven agents analyze what you did, why it worked, and how to prevent it next time. Each documented solution compounds your team's knowledge. -

-

Arguments

-

[optional: brief context about the fix]

-

Workflow

-
    -
  1. Preconditions - Verify problem is solved and verified working
  2. -
  3. Launch seven parallel subagents: -
      -
    • Context Analyzer - Extract YAML frontmatter skeleton
    • -
    • Solution Extractor - Identify root cause and solution
    • -
    • Related Docs Finder - Find cross-references
    • -
    • Prevention Strategist - Develop prevention strategies
    • -
    • Category Classifier - Determine docs category
    • -
    • Documentation Writer - Create the file
    • -
    • Optional Specialized Agent - Based on problem type
    • -
    -
  4. -
  5. Create Documentation - File in docs/solutions/[category]/
  6. -
-

Auto-Triggers

-

Phrases: "that worked", "it's fixed", "working now", "problem solved"

-
-
/compound
-/compound N+1 query optimization
-
-
-
- - -
-

Utility Commands (12)

-

The supporting cast—commands that do one specific thing really well. Generate changelogs, resolve todos in parallel, triage findings, create new commands. The utilities you reach for daily.

- -
-
- /changelog -
-

- Turn your git history into a changelog people actually want to read. Breaking changes at the top, fun facts at the bottom, everything organized by what matters to your users. -

-

Arguments

-

[optional: daily|weekly, or time period in days]

-

Output Sections

-
    -
  • Breaking Changes (top priority)
  • -
  • New Features
  • -
  • Bug Fixes
  • -
  • Other Improvements
  • -
  • Shoutouts
  • -
  • Fun Fact
  • -
-
-
/changelog daily
-/changelog weekly
-/changelog 7
-
-
- -
-
- /create-agent-skill -
-

- Need a new skill? This walks you through creating one that actually works—proper frontmatter, clear documentation, all the conventions baked in. Think of it as scaffolding for skills. -

-

Arguments

-

[skill description or requirements]

-
-
/create-agent-skill PDF processing for document analysis
-/create-agent-skill Web scraping with error handling
-
-
- -
-
- /generate_command -
-

- Same idea, but for commands instead of skills. Tell it what workflow you're tired of doing manually, and it generates a proper slash command with all the right patterns. -

-

Arguments

-

[command purpose and requirements]

-
-
/generate_command Security audit for codebase
-/generate_command Automated performance testing
-
-
- -
-
- /heal-skill -
-

- Skills drift—APIs change, URLs break, parameters get renamed. When a skill stops working, this figures out what's wrong and fixes the documentation. You approve the changes before anything commits. -

-

Arguments

-

[optional: specific issue to fix]

-

Approval Options

-
    -
  1. Apply and commit
  2. -
  3. Apply without commit
  4. -
  5. Revise changes
  6. -
  7. Cancel
  8. -
-
-
/heal-skill API endpoint URL changed
-/heal-skill parameter validation error
-
-
- -
-
- /plan_review -
-

- Before you execute a plan, have three reviewers tear it apart—Rails conventions, best practices, simplicity. Better to find the problems in the plan than in production. -

-

Arguments

-

[plan file path or plan content]

-

Review Agents

-
    -
  • dhh-rails-reviewer - Rails conventions
  • -
  • kieran-rails-reviewer - Rails best practices
  • -
  • code-simplicity-reviewer - Simplicity and clarity
  • -
-
-
/plan_review plans/user-authentication.md
-
-
- -
-
- /report-bug -
-

- Something broken? This collects all the context—what broke, what you expected, error messages, environment—and files a proper bug report. No more "it doesn't work" issues. -

-

Arguments

-

[optional: brief description of the bug]

-

Information Collected

-
    -
  • Bug category (Agent/Command/Skill/MCP/Installation)
  • -
  • Specific component name
  • -
  • Actual vs expected behavior
  • -
  • Steps to reproduce
  • -
  • Error messages
  • -
  • Environment info (auto-gathered)
  • -
-
-
/report-bug Agent not working
-/report-bug Command failing with timeout
-
-
- -
-
- /reproduce-bug -
-

- Give it a GitHub issue number and it tries to actually reproduce the bug—reading the issue, analyzing code paths, iterating until it finds the root cause. Then it posts findings back to the issue. -

-

Arguments

-

[GitHub issue number]

-

Investigation Process

-
    -
  1. Read GitHub issue details
  2. -
  3. Launch parallel investigation agents
  4. -
  5. Analyze code for failure points
  6. -
  7. Iterate until root cause found
  8. -
  9. Post findings to GitHub issue
  10. -
-
-
/reproduce-bug 142
-
-
- -
-
- /triage -
-

- Got a pile of code review findings or security audit results? This turns them into actionable todos—one at a time, you decide: create the todo, skip it, or modify and re-present. -

-

Arguments

-

[findings list or source type]

-

User Decisions

-
    -
  • "yes" - Create/update todo file, change status to ready
  • -
  • "next" - Skip and delete from todos
  • -
  • "custom" - Modify and re-present
  • -
-
-
-
-

This command does NOT write code. It only categorizes and creates todo files.

-
-
-
-
/triage code-review-findings.txt
-/triage security-audit-results
-
-
- -
-
- /resolve_parallel -
-

- All those TODO comments scattered through your codebase? This finds them, builds a dependency graph, and spawns parallel agents to resolve them all at once. Clears the backlog in minutes. -

-

Arguments

-

[optional: specific TODO pattern or file]

-

Process

-
    -
  1. Analyze TODO comments from codebase
  2. -
  3. Create dependency graph (mermaid diagram)
  4. -
  5. Spawn parallel pr-comment-resolver agents
  6. -
  7. Commit and push after completion
  8. -
-
-
/resolve_parallel
-/resolve_parallel authentication
-/resolve_parallel src/auth/
-
-
- -
-
- /resolve_pr_parallel -
-

- Same deal, but for PR review comments. Fetch unresolved threads, spawn parallel resolver agents, commit the fixes, and mark threads as resolved. Your reviewers will wonder how you're so fast. -

-

Arguments

-

[optional: PR number or current PR]

-

Process

-
    -
  1. Get all unresolved PR comments
  2. -
  3. Create TodoWrite list
  4. -
  5. Launch parallel pr-comment-resolver agents
  6. -
  7. Commit, resolve threads, and push
  8. -
-
-
/resolve_pr_parallel
-/resolve_pr_parallel 123
-
-
- -
-
- /resolve_todo_parallel -
-

- Those todo files in your /todos directory? Point this at them and watch parallel agents knock them out—analyzing dependencies, executing in the right order, marking resolved as they finish. -

-

Arguments

-

[optional: specific todo ID or pattern]

-

Process

-
    -
  1. Get unresolved TODOs from /todos/*.md
  2. -
  3. Analyze dependencies
  4. -
  5. Spawn parallel agents
  6. -
  7. Commit, mark as resolved, push
  8. -
-
-
/resolve_todo_parallel
-/resolve_todo_parallel 042
-/resolve_todo_parallel p1
-
-
- -
-
- /prime -
-

- Your project initialization command. What exactly it does depends on your project setup—think of it as the "get everything ready" button before you start coding. -

-
-
/prime
-
-
-
- - - -
-
-
- - - - - diff --git a/docs/pages/getting-started.html b/docs/pages/getting-started.html deleted file mode 100644 index be37682..0000000 --- a/docs/pages/getting-started.html +++ /dev/null @@ -1,582 +0,0 @@ - - - - - - Getting Started - Compounding Engineering - - - - - - - - - - - -
-
- - - - -
-
- - -
- -
-

Getting Started with Compounding Engineering

-

- Five minutes from now, you'll run a single command that spins up 10 AI agents—each with a different specialty—to review your pull request in parallel. Security, performance, architecture, accessibility, all happening at once. That's the plugin. Let's get you set up. -

- - -
-

Installation

- -

Prerequisites

-
    -
  • Claude Code installed and configured
  • -
  • A GitHub account (for marketplace access)
  • -
  • Node.js 18+ (for MCP servers)
  • -
- -

Step 1: Add the Marketplace

-

Think of the marketplace as an app store. You're adding it to Claude Code's list of places to look for plugins:

-
-
claude /plugin marketplace add https://github.com/EveryInc/compound-engineering-plugin
-
- -

Step 2: Install the Plugin

-

Now grab the plugin itself:

-
-
claude /plugin install compound-engineering
-
- -

Step 3: Verify Installation

-

Check that it worked:

-
-
claude /plugin list
-
-

You'll see compound-engineering in the list. If you do, you're ready.

- -
-
-
-

Known Issue: MCP Servers

-

- The bundled MCP servers (Playwright for browser automation, Context7 for docs) don't always auto-load. If you need them, there's a manual config step below. Otherwise, ignore this—everything else works fine. -

-
-
-
- - -
-

Quick Start

- -

Let's see what this thing can actually do. I'll show you three workflows you'll use constantly:

- -

Run a Code Review

-

This is the big one. Type /review and watch it spawn 10+ specialized reviewers:

-
-
# Review a PR by number
-/review 123
-
-# Review the current branch
-/review
-
-# Review a specific branch
-/review feature/my-feature
-
- -

Use a Specialized Agent

-

Sometimes you just need one expert. Call them directly:

-
-
# Rails code review with Kieran's conventions
-claude agent kieran-rails-reviewer "Review the UserController"
-
-# Security audit
-claude agent security-sentinel "Audit authentication flow"
-
-# Research best practices
-claude agent best-practices-researcher "Find pagination patterns for Rails"
-
- -

Invoke a Skill

-

Skills are like loading a reference book into Claude's brain. When you need deep knowledge in a specific domain:

-
-
# Generate images with Gemini
-skill: gemini-imagegen
-
-# Write Ruby in DHH's style
-skill: dhh-ruby-style
-
-# Create a new Claude Code skill
-skill: create-agent-skills
-
-
- - -
-

Configuration

- -

MCP Server Configuration

-

- If the MCP servers didn't load automatically, paste this into .claude/settings.json: -

-
-
{
-  "mcpServers": {
-    "playwright": {
-      "type": "stdio",
-      "command": "npx",
-      "args": ["-y", "@playwright/mcp@latest"],
-      "env": {}
-    },
-    "context7": {
-      "type": "http",
-      "url": "https://mcp.context7.com/mcp"
-    }
-  }
-}
-
- -

Environment Variables

-

Right now, only one skill needs an API key. If you use Gemini's image generation:

- - - - - - - - - - - - - - - -
VariableRequired ForDescription
GEMINI_API_KEYgemini-imagegenGoogle Gemini API key for image generation
-
- - -
-

The Compounding Engineering Philosophy

- -
- Every unit of engineering work should make subsequent units of work easier—not harder. -
- -

Here's how it works in practice—the four-step loop you'll run over and over:

- -
-
-
-

1. Plan

-

- Before you write a single line, figure out what you're building and why. Use research agents to gather examples, patterns, and context. Think of it as Google Search meets expert consultation. -

-
-
-
-

2. Delegate

-

- Now build it—with help. Each agent specializes in something (Rails, security, design). You stay in the driver's seat, but you've got a team of specialists riding shotgun. -

-
-
-
-

3. Assess

-

- Before you ship, run the gauntlet. Security agent checks for vulnerabilities. Performance agent flags N+1 queries. Architecture agent questions your design choices. All at once, all in parallel. -

-
-
-
-

4. Codify

-

- You just solved a problem. Write it down. Next time you (or your teammate) face this, you'll have a runbook. That's the "compounding" part—each solution makes the next one faster. -

-
-
-
- - -
-

Using Agents

- -

- Think of agents as coworkers with different job titles. You wouldn't ask your security engineer to design your UI, right? Same concept here—each agent has a specialty, and you call the one you need. -

- -

Invoking Agents

-
-
# Basic syntax
-claude agent [agent-name] "[optional message]"
-
-# Examples
-claude agent kieran-rails-reviewer
-claude agent security-sentinel "Audit the payment flow"
-claude agent git-history-analyzer "Show changes to user model"
-
- -

Agent Categories

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CategoryCountPurpose
Review10Code review, security audits, performance analysis
ResearchfourBest practices, documentation, git history
DesignthreeUI iteration, Figma sync, design review
WorkflowfiveBug reproduction, PR resolution, linting
DocsoneREADME generation
- -

- - View All Agents - -

-
- - -
-

Using Commands

- -

- Commands are macros that run entire workflows for you. One command can spin up a dozen agents, coordinate their work, collect results, and hand you a summary. It's automation all the way down. -

- -

Running Commands

-
-
# Workflow commands
-/plan
-/review 123
-/work
-/compound
-
-# Utility commands
-/changelog
-/triage
-/reproduce-bug
-
- -

The Review Workflow

-

Let me show you what happens when you run /review. Here's the sequence:

-
    -
  1. Detection - Figures out what you want reviewed (PR number, branch name, or current changes)
  2. -
  3. Isolation - Spins up a git worktree so the review doesn't mess with your working directory
  4. -
  5. Parallel execution - Launches 10+ agents simultaneously (security, performance, architecture, accessibility...)
  6. -
  7. Synthesis - Sorts findings by severity (P1 = blocks merge, P2 = should fix, P3 = nice-to-have)
  8. -
  9. Persistence - Creates todo files so you don't lose track of issues
  10. -
  11. Summary - Hands you a readable report with action items
  12. -
- -

- - View All Commands - -

-
- - -
-

Using Skills

- -

- Here's the difference: agents are who does the work, skills are what they know. When you invoke a skill, you're loading a reference library into Claude's context—patterns, templates, examples, workflows. It's like handing Claude a technical manual. -

- -

Invoking Skills

-
-
# In your prompt, reference the skill
-skill: gemini-imagegen
-
-# Or ask Claude to use it
-"Use the dhh-ruby-style skill to refactor this code"
-
- -

Skill Structure

-

Peek inside a skill directory and you'll usually find:

-
    -
  • SKILL.md - The main instructions (what Claude reads first)
  • -
  • references/ - Deep dives on concepts and patterns
  • -
  • templates/ - Copy-paste code snippets
  • -
  • workflows/ - Step-by-step "how to" guides
  • -
  • scripts/ - Actual executable code (when words aren't enough)
  • -
- -

- - View All Skills - -

-
- - -
-

Code Review Workflow Guide

- -

- You'll spend most of your time here. This workflow is why the plugin exists—to turn code review from a bottleneck into a superpower. -

- -

Basic Review

-
-
# Review a PR
-/review 123
-
-# Review current branch
-/review
-
- -

Understanding Findings

-

Every finding gets a priority label. Here's what they mean:

-
    -
  • P1 Critical - Don't merge until this is fixed. Think: SQL injection, data loss, crashes in production.
  • -
  • P2 Important - Fix before shipping. Performance regressions, N+1 queries, shaky architecture.
  • -
  • P3 Nice-to-Have - Would be better, but ship without it if you need to. Documentation, minor cleanup, style issues.
  • -
- -

Working with Todo Files

-

After a review, you'll have a todos/ directory full of markdown files. Each one is a single issue to fix:

-
-
# List all pending todos
-ls todos/*-pending-*.md
-
-# Triage findings
-/triage
-
-# Resolve todos in parallel
-/resolve_todo_parallel
-
-
- - -
-

Creating Custom Agents

- -

- The built-in agents cover a lot of ground, but every team has unique needs. Maybe you want a "rails-api-reviewer" that enforces your company's API standards. That's 10 minutes of work. -

- -

Agent File Structure

-
-
---
-name: my-custom-agent
-description: Brief description of what this agent does
----
-
-# Agent Instructions
-
-You are [role description].
-
-## Your Responsibilities
-1. First responsibility
-2. Second responsibility
-
-## Guidelines
-- Guideline one
-- Guideline two
-
- -

Agent Location

-

Drop your agent file in one of these directories:

-
    -
  • .claude/agents/ - Just for this project (committed to git)
  • -
  • ~/.claude/agents/ - Available in all your projects (stays on your machine)
  • -
- -
-
-
-

The Easy Way

-

- Don't write the YAML by hand. Just run /create-agent-skill and answer a few questions. The command generates the file, validates the format, and puts it in the right place. -

-
-
-
- - -
-

Creating Custom Skills

- -

- Skills are heavier than agents—they're knowledge bases, not just prompts. You're building a mini library that Claude can reference. Worth the effort for things you do repeatedly. -

- -

Skill Directory Structure

-
-
my-skill/
-  SKILL.md           # Main skill file (required)
-  references/        # Supporting documentation
-    concept-one.md
-    concept-two.md
-  templates/         # Code templates
-    basic-template.md
-  workflows/         # Step-by-step procedures
-    workflow-one.md
-  scripts/           # Executable scripts
-    helper.py
-
- -

SKILL.md Format

-
-
---
-name: my-skill
-description: Brief description shown when skill is invoked
----
-
-# Skill Title
-
-Detailed instructions for using this skill.
-
-## Quick Start
-...
-
-## Reference Materials
-The skill includes references in the `references/` directory.
-
-## Templates
-Use templates from the `templates/` directory.
-
- -
-
-
-

Get Help Building Skills

-

- Type skill: create-agent-skills and Claude loads expert guidance on skill architecture, best practices, file organization, and validation. It's like having a senior engineer walk you through it. -

-
-
-
- - - -
-
-
- - - - - diff --git a/docs/pages/mcp-servers.html b/docs/pages/mcp-servers.html deleted file mode 100644 index ecc2f3c..0000000 --- a/docs/pages/mcp-servers.html +++ /dev/null @@ -1,409 +0,0 @@ - - - - - - MCP Servers Reference - Compounding Engineering - - - - - - - - - - -
-
- - -
-
- - -
- -
-

MCP Servers Reference

-

- Think of MCP servers as power tools that plug into Claude Code. Want Claude to actually open a browser and click around your app? That's Playwright. Need the latest Rails docs without leaving your terminal? That's Context7. The plugin bundles both servers—they just work when you install. -

- -
-
-
-

Known Issue: Auto-Loading

-

- Sometimes MCP servers don't wake up automatically. If Claude can't take screenshots or look up docs, you'll need to add them manually. See Manual Configuration for the fix. -

-
-
- - -
-

Playwright

-

- You know how you can tell a junior developer "open Chrome and click the login button"? Now you can tell Claude the same thing. Playwright gives Claude hands to control a real browser—clicking buttons, filling forms, taking screenshots, running JavaScript. It's like pair programming with someone who has a browser open next to you. -

- -

Tools Provided

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ToolDescription
browser_navigateGo to any URL—your localhost dev server, production, staging, that competitor's site you're studying
browser_take_screenshotCapture what you're seeing right now. Perfect for "does this look right?" design reviews
browser_clickClick buttons, links, whatever. Claude finds it by text or CSS selector, just like you would
browser_fill_formType into forms faster than you can. Great for testing signup flows without manual clicking
browser_snapshotGet the page's accessibility tree—how screen readers see it. Useful for understanding structure without HTML noise
browser_evaluateRun any JavaScript in the page. Check localStorage, trigger functions, read variables—full console access
- -

When You'll Use This

-
    -
  • Design reviews without leaving the terminal - "Take a screenshot of the new navbar on mobile" gets you a PNG in seconds
  • -
  • Testing signup flows while you code - "Fill in the registration form with test@example.com and click submit" runs the test for you
  • -
  • Debugging production issues - "Navigate to the error page and show me what's in localStorage" gives you the state without opening DevTools
  • -
  • Competitive research - "Go to competitor.com and screenshot their pricing page" builds your swipe file automatically
  • -
- -

Example Usage

-
-
# Just talk to Claude naturally—it knows when to use Playwright
-
-# Design review
-"Take a screenshot of the login page"
-
-# Testing a form
-"Navigate to /signup and fill in the email field with test@example.com"
-
-# Debug JavaScript state
-"Go to localhost:3000 and run console.log(window.currentUser)"
-
-# The browser runs in the background. You'll get results without switching windows.
-
- -

Configuration

-
-
{
-  "playwright": {
-    "type": "stdio",
-    "command": "npx",
-    "args": ["-y", "@playwright/mcp@latest"],
-    "env": {}
-  }
-}
-
-
- - -
-

Context7

-

- Ever ask Claude about a framework and get an answer from 2023? Context7 fixes that. It's a documentation service that keeps Claude current with 100+ frameworks—Rails, React, Next.js, Django, whatever you're using. Think of it as having the official docs piped directly into Claude's brain. -

- -

Tools Provided

- - - - - - - - - - - - - - - - - -
ToolDescription
resolve-library-idMaps "Rails" to the actual library identifier Context7 uses. You don't call this—Claude does it automatically
get-library-docsFetches the actual documentation pages. Ask "How does useEffect work?" and this grabs the latest React docs
- -

What's Covered

-

Over 100 frameworks and libraries. Here's a taste of what you can look up:

-
-
-

Backend

-
    -
  • Ruby on Rails
  • -
  • Django
  • -
  • Laravel
  • -
  • Express
  • -
  • FastAPI
  • -
  • Spring Boot
  • -
-
-
-

Frontend

-
    -
  • React
  • -
  • Vue.js
  • -
  • Angular
  • -
  • Svelte
  • -
  • Next.js
  • -
  • Nuxt
  • -
-
-
-

Mobile

-
    -
  • React Native
  • -
  • Flutter
  • -
  • SwiftUI
  • -
  • Kotlin
  • -
-
-
-

Tools & Libraries

-
    -
  • Tailwind CSS
  • -
  • PostgreSQL
  • -
  • Redis
  • -
  • GraphQL
  • -
  • Prisma
  • -
  • And many more...
  • -
-
-
- -

Example Usage

-
-
# Just ask about the framework—Claude fetches current docs automatically
-
-"Look up the Rails ActionCable documentation"
-
-"How does the useEffect hook work in React?"
-
-"What are the best practices for PostgreSQL indexes?"
-
-# You get answers based on the latest docs, not Claude's training cutoff
-
- -

Configuration

-
-
{
-  "context7": {
-    "type": "http",
-    "url": "https://mcp.context7.com/mcp"
-  }
-}
-
-
- - -
-

Manual Configuration

-

- If the servers don't load automatically (you'll know because Claude can't take screenshots or fetch docs), you need to wire them up yourself. It's a two-minute copy-paste job. -

- -

Project-Level Configuration

-

To enable for just this project, add this to .claude/settings.json in your project root:

-
-
{
-  "mcpServers": {
-    "playwright": {
-      "type": "stdio",
-      "command": "npx",
-      "args": ["-y", "@playwright/mcp@latest"],
-      "env": {}
-    },
-    "context7": {
-      "type": "http",
-      "url": "https://mcp.context7.com/mcp"
-    }
-  }
-}
-
- -

Global Configuration

-

Or enable everywhere—every project on your machine gets these servers. Add to ~/.claude/settings.json:

-
-
{
-  "mcpServers": {
-    "playwright": {
-      "type": "stdio",
-      "command": "npx",
-      "args": ["-y", "@playwright/mcp@latest"],
-      "env": {}
-    },
-    "context7": {
-      "type": "http",
-      "url": "https://mcp.context7.com/mcp"
-    }
-  }
-}
-
- -

Requirements

- - - - - - - - - - - - - - - - - -
ServerRequirement
PlaywrightNode.js 18+ and npx
Context7Internet connection (HTTP endpoint)
- -

Verifying MCP Servers

-

After you add the config, restart Claude Code. Then test that everything works:

-
-
# Ask Claude what it has
-"What MCP tools do you have access to?"
-
-# Test Playwright (should work now)
-"Take a screenshot of the current directory listing"
-
-# Test Context7 (should fetch real docs)
-"Look up Rails Active Record documentation"
-
-# If either fails, double-check your JSON syntax and file paths
-
-
- - - -
-
-
- - - - - - - diff --git a/docs/pages/skills.html b/docs/pages/skills.html deleted file mode 100644 index a86ae91..0000000 --- a/docs/pages/skills.html +++ /dev/null @@ -1,611 +0,0 @@ - - - - - - Skill Reference - Compounding Engineering - - - - - - - - - - -
-
- - -
-
- - -
- -
-

Skill Reference

-

- Think of skills as reference manuals that Claude Code can read mid-conversation. When you're writing Rails code and want DHH's style, or building a gem like Andrew Kane would, you don't need to paste documentation—just invoke the skill. Claude reads it, absorbs the patterns, and writes code that way. -

- -
-

How to Use Skills

-
-
# In your prompt, reference the skill
-skill: [skill-name]
-
-# Examples
-skill: gemini-imagegen
-skill: dhh-rails-style
-skill: create-agent-skills
-
-
- -
-
-
-

Skills vs Agents

-

- Agents are personas—they do things. Skills are knowledge—they teach Claude how to do things. Use claude agent [name] when you want someone to review your code. Use skill: [name] when you want to write code in a particular style yourself. -

-
-
- - -
-

Development Tools (8)

-

These skills teach Claude specific coding styles and architectural patterns. Use them when you want code that follows a particular philosophy—not just any working code, but code that looks like it was written by a specific person or framework.

- -
-
-

create-agent-skills

- Meta -
-

- You're writing a skill right now, but you're not sure if you're structuring the SKILL.md file correctly. Should the examples go before the theory? How do you organize workflows vs. references? This skill is the answer—it's the master template for building skills themselves. -

-

Capabilities

-
    -
  • Skill architecture and best practices
  • -
  • Router pattern for complex multi-step skills
  • -
  • Progressive disclosure design principles
  • -
  • SKILL.md structure guidance
  • -
  • Asset management (workflows, references, templates, scripts)
  • -
  • XML structure patterns
  • -
-

Workflows Included

-
    -
  • create-new-skill - Start from scratch
  • -
  • add-reference - Add reference documentation
  • -
  • add-template - Add code templates
  • -
  • add-workflow - Add step-by-step procedures
  • -
  • add-script - Add executable scripts
  • -
  • audit-skill - Validate skill structure
  • -
  • verify-skill - Test skill functionality
  • -
-
-
skill: create-agent-skills
-
-
- -
-
-

skill-creator

- Meta -
-

- The simpler, step-by-step version of create-agent-skills. When you just want a checklist to follow from blank file to packaged skill, use this. It's less about theory, more about "do step 1, then step 2." -

-

6-Step Process

-
    -
  1. Understand skill usage patterns with examples
  2. -
  3. Plan reusable skill contents
  4. -
  5. Initialize skill using template
  6. -
  7. Edit skill with clear instructions
  8. -
  9. Package skill into distributable zip
  10. -
  11. Iterate based on testing feedback
  12. -
-
-
skill: skill-creator
-
-
- -
-
-

dhh-rails-style

- Rails -
-

- Comprehensive 37signals Rails conventions based on Marc Köhlbrugge's analysis of 265 PRs from the Fizzy codebase. Covers everything from REST mapping to state-as-records, Turbo/Stimulus patterns, CSS with OKLCH colors, Minitest with fixtures, and Solid Queue/Cache/Cable patterns. -

-

Key Patterns

-
    -
  • REST Purity - Verbs become nouns (close → closure)
  • -
  • State as Records - Boolean columns → separate records
  • -
  • Fat Models - Business logic, authorization, broadcasting
  • -
  • Thin Controllers - 1-5 line actions with concerns
  • -
  • Current Attributes - Request context everywhere
  • -
  • Hotwire/Turbo - Model-level broadcasting, morphing
  • -
-

Reference Files (6)

-
    -
  • controllers.md - REST mapping, concerns, Turbo responses
  • -
  • models.md - Concerns, state records, callbacks, POROs
  • -
  • frontend.md - Turbo, Stimulus, CSS layers, OKLCH
  • -
  • architecture.md - Routing, auth, jobs, caching
  • -
  • testing.md - Minitest, fixtures, integration tests
  • -
  • gems.md - What to use vs avoid, decision framework
  • -
-
-
skill: dhh-rails-style
-
-
- -
-
-

andrew-kane-gem-writer

- Ruby -
-

- Andrew Kane has written 100+ Ruby gems with 374 million downloads. Every gem follows the same patterns: minimal dependencies, class macro DSLs, Rails integration without Rails coupling. When you're building a gem and want it to feel production-ready from day one, this is how you do it. -

-

Philosophy

-
    -
  • Simplicity over cleverness
  • -
  • Zero or minimal dependencies
  • -
  • Explicit code over metaprogramming
  • -
  • Rails integration without Rails coupling
  • -
-

Key Patterns

-
    -
  • Class macro DSL for configuration
  • -
  • ActiveSupport.on_load for Rails integration
  • -
  • class << self with attr_accessor
  • -
  • Railtie pattern for hooks
  • -
  • Minitest (no RSpec)
  • -
-

Reference Files

-
    -
  • references/module-organization.md
  • -
  • references/rails-integration.md
  • -
  • references/database-adapters.md
  • -
  • references/testing-patterns.md
  • -
-
-
skill: andrew-kane-gem-writer
-
-
- -
-
-

dspy-ruby

- AI -
-

- You're adding AI features to your Rails app, but you don't want brittle prompt strings scattered everywhere. DSPy.rb gives you type-safe signatures, composable predictors, and tool-using agents. This skill shows you how to use it—from basic inference to ReAct agents that iterate until they get the answer right. -

-

Predictor Types

-
    -
  • Predict - Basic inference
  • -
  • ChainOfThought - Reasoning with explanations
  • -
  • ReAct - Tool-using agents with iteration
  • -
  • CodeAct - Dynamic code generation
  • -
-

Supported Providers

-
    -
  • OpenAI (GPT-4, GPT-4o-mini)
  • -
  • Anthropic Claude
  • -
  • Google Gemini
  • -
  • Ollama (free, local)
  • -
  • OpenRouter
  • -
-

Requirements

- - - - - - - - - - - - - -
OPENAI_API_KEYFor OpenAI provider
ANTHROPIC_API_KEYFor Anthropic provider
GOOGLE_API_KEYFor Gemini provider
-
-
skill: dspy-ruby
-
-
- -
-
-

frontend-design

- Design -
-

- You've seen what AI usually generates: Inter font, purple gradients, rounded corners on everything. This skill teaches Claude to design interfaces that don't look like every other AI-generated site. It's about purposeful typography, unexpected color palettes, and interfaces with personality. -

-

Design Thinking

-
    -
  • Purpose - What is the interface for?
  • -
  • Tone - What feeling should it evoke?
  • -
  • Constraints - Technical and brand limitations
  • -
  • Differentiation - How to stand out
  • -
-

Focus Areas

-
    -
  • Typography with distinctive font choices
  • -
  • Color & theme coherence with CSS variables
  • -
  • Motion and animation patterns
  • -
  • Spatial composition with asymmetry
  • -
  • Backgrounds (gradients, textures, patterns)
  • -
-
-
-
-

Avoids generic AI aesthetics like Inter fonts, purple gradients, and rounded corners everywhere.

-
-
-
-
skill: frontend-design
-
-
- -
-
-

compound-docs

- Docs -
-

- You just fixed a weird build error after an hour of debugging. Tomorrow you'll forget how you fixed it. This skill automatically detects when you solve something (phrases like "that worked" or "it's fixed") and documents it with YAML frontmatter so you can find it again. Each documented solution compounds your team's knowledge. -

-

Auto-Triggers

-

Phrases: "that worked", "it's fixed", "working now", "problem solved"

-

7-Step Process

-
    -
  1. Detect confirmation phrase
  2. -
  3. Gather context (module, symptom, investigation, root cause)
  4. -
  5. Check existing docs for similar issues
  6. -
  7. Generate filename
  8. -
  9. Validate YAML frontmatter
  10. -
  11. Create documentation in category directory
  12. -
  13. Cross-reference related issues
  14. -
-

Categories

-
    -
  • build-errors/
  • -
  • test-failures/
  • -
  • runtime-errors/
  • -
  • performance-issues/
  • -
  • database-issues/
  • -
  • security-issues/
  • -
-
-
skill: compound-docs
-
-
- -
-
-

agent-native-architecture

- AI -
-

- Build AI agents using prompt-native architecture where features are defined in prompts, not code. When creating autonomous agents, designing MCP servers, or implementing self-modifying systems, this skill guides the "trust the agent's intelligence" philosophy. -

-

Key Patterns

-
    -
  • Prompt-Native Features - Define features in prompts, not code
  • -
  • MCP Tool Design - Build tools agents can use effectively
  • -
  • System Prompts - Write instructions that guide agent behavior
  • -
  • Self-Modification - Allow agents to improve their own prompts
  • -
-

Core Principle

-

Whatever the user can do, the agent can do. Whatever the user can see, the agent can see.

-
-
skill: agent-native-architecture
-
-
-
- - -
-

Content & Workflow (3)

-

Writing, editing, and organizing work. These skills handle everything from style guide compliance to git worktree management—the meta-work that makes the real work easier.

- -
-
-

every-style-editor

- Content -
-

- You wrote a draft, but you're not sure if it matches Every's style guide. Should "internet" be capitalized? Is this comma splice allowed? This skill does a four-phase line-by-line review: context, detailed edits, mechanical checks, and actionable recommendations. It's like having a copy editor who never gets tired. -

-

Four-Phase Review

-
    -
  1. Initial Assessment - Context, type, audience, tone
  2. -
  3. Detailed Line Edit - Sentence structure, punctuation, capitalization
  4. -
  5. Mechanical Review - Spacing, formatting, consistency
  6. -
  7. Recommendations - Actionable improvement suggestions
  8. -
-

Style Checks

-
    -
  • Grammar and punctuation
  • -
  • Style guide compliance
  • -
  • Capitalization rules
  • -
  • Word choice optimization
  • -
  • Formatting consistency
  • -
-
-
skill: every-style-editor
-
-
- -
-
-

file-todos

- Workflow -
-

- Your todo list is a bunch of markdown files in a todos/ directory. Each filename encodes status, priority, and description. No database, no UI, just files with YAML frontmatter. When you need to track work without setting up Jira, this is the system. -

-

File Format

-
-
# Naming convention
-{issue_id}-{status}-{priority}-{description}.md
-
-# Examples
-001-pending-p1-security-vulnerability.md
-002-ready-p2-performance-optimization.md
-003-complete-p3-code-cleanup.md
-
-

Status Values

-
    -
  • pending - Needs triage
  • -
  • ready - Approved for work
  • -
  • complete - Done
  • -
-

Priority Values

-
    -
  • p1 - Critical
  • -
  • p2 - Important
  • -
  • p3 - Nice-to-have
  • -
-

YAML Frontmatter

-
-
---
-status: pending
-priority: p1
-issue_id: "001"
-tags: [security, authentication]
-dependencies: []
----
-
-
-
skill: file-todos
-
-
- -
-
-

git-worktree

- Git -
-

- You're working on a feature branch, but you need to review a PR without losing your current work. Git worktrees let you have multiple branches checked out simultaneously in separate directories. This skill manages them—create, switch, cleanup—so you can context-switch without stashing or committing half-finished code. -

-

Commands

-
-
# Create new worktree
-bash scripts/worktree-manager.sh create feature-login
-
-# List worktrees
-bash scripts/worktree-manager.sh list
-
-# Switch to worktree
-bash scripts/worktree-manager.sh switch feature-login
-
-# Clean up completed worktrees
-bash scripts/worktree-manager.sh cleanup
-
-

Integration

-
    -
  • Works with /review for isolated PR analysis
  • -
  • Works with /work for parallel feature development
  • -
-

Requirements

-
    -
  • Git 2.8+ (for worktree support)
  • -
  • Worktrees stored in .worktrees/ directory
  • -
-
-
skill: git-worktree
-
-
-
- - -
-

Image Generation (1)

-

Generate images with AI. Not stock photos you found on Unsplash—images you describe and the model creates.

- - -
- - - -
-
-
- - - - - diff --git a/docs/plans/2026-02-14-feat-add-copilot-converter-target-plan.md b/docs/plans/2026-02-14-feat-add-copilot-converter-target-plan.md new file mode 100644 index 0000000..a87d0bd --- /dev/null +++ b/docs/plans/2026-02-14-feat-add-copilot-converter-target-plan.md @@ -0,0 +1,328 @@ +--- +title: "feat: Add GitHub Copilot converter target" +type: feat +date: 2026-02-14 +status: complete +--- + +# feat: Add GitHub Copilot Converter Target + +## Overview + +Add GitHub Copilot as a converter target following the established `TargetHandler` pattern. This converts the compound-engineering Claude Code plugin into Copilot's native format: custom agents (`.agent.md`), agent skills (`SKILL.md`), and MCP server configuration JSON. + +**Brainstorm:** `docs/brainstorms/2026-02-14-copilot-converter-target-brainstorm.md` + +## Problem Statement + +The CLI tool (`compound`) already supports converting Claude Code plugins to 5 target formats (OpenCode, Codex, Droid, Cursor, Pi). GitHub Copilot is a widely-used AI coding assistant that now supports custom agents, skills, and MCP servers — but there's no converter target for it. + +## Proposed Solution + +Follow the existing converter pattern exactly: + +1. Define types (`src/types/copilot.ts`) +2. Implement converter (`src/converters/claude-to-copilot.ts`) +3. Implement writer (`src/targets/copilot.ts`) +4. Register target (`src/targets/index.ts`) +5. Add sync support (`src/sync/copilot.ts`, `src/commands/sync.ts`) +6. Write tests and documentation + +### Component Mapping + +| Claude Code | Copilot | Output Path | +|-------------|---------|-------------| +| Agents (`.md`) | Custom Agents (`.agent.md`) | `.github/agents/{name}.agent.md` | +| Commands (`.md`) | Agent Skills (`SKILL.md`) | `.github/skills/{name}/SKILL.md` | +| Skills (`SKILL.md`) | Agent Skills (`SKILL.md`) | `.github/skills/{name}/SKILL.md` | +| MCP Servers | Config JSON | `.github/copilot-mcp-config.json` | +| Hooks | Skipped | Warning to stderr | + +## Technical Approach + +### Phase 1: Types + +**File:** `src/types/copilot.ts` + +```typescript +export type CopilotAgent = { + name: string + content: string // Full .agent.md content with frontmatter +} + +export type CopilotGeneratedSkill = { + name: string + content: string // SKILL.md content with frontmatter +} + +export type CopilotSkillDir = { + name: string + sourceDir: string +} + +export type CopilotMcpServer = { + type: string + command?: string + args?: string[] + url?: string + tools: string[] + env?: Record + headers?: Record +} + +export type CopilotBundle = { + agents: CopilotAgent[] + generatedSkills: CopilotGeneratedSkill[] + skillDirs: CopilotSkillDir[] + mcpConfig?: Record +} +``` + +### Phase 2: Converter + +**File:** `src/converters/claude-to-copilot.ts` + +**Agent conversion:** +- Frontmatter: `description` (required, fallback to `"Converted from Claude agent {name}"`), `tools: ["*"]`, `infer: true` +- Pass through `model` if present +- Fold `capabilities` into body as `## Capabilities` section (same as Cursor) +- Use `formatFrontmatter()` utility +- Warn if body exceeds 30,000 characters (`.length`) + +**Command → Skill conversion:** +- Convert to SKILL.md format with frontmatter: `name`, `description` +- Flatten namespaced names: `workflows:plan` → `plan` +- Drop `allowed-tools`, `model`, `disable-model-invocation` silently +- Include `argument-hint` as `## Arguments` section in body + +**Skill pass-through:** +- Map to `CopilotSkillDir` as-is (same as Cursor) + +**MCP server conversion:** +- Transform env var names: `API_KEY` → `COPILOT_MCP_API_KEY` +- Skip vars already prefixed with `COPILOT_MCP_` +- Add `type: "local"` for command-based servers, `type: "sse"` for URL-based +- Set `tools: ["*"]` for all servers + +**Content transformation (`transformContentForCopilot`):** + +| Pattern | Input | Output | +|---------|-------|--------| +| Task calls | `Task repo-research-analyst(desc)` | `Use the repo-research-analyst skill to: desc` | +| Slash commands | `/workflows:plan` | `/plan` | +| Path rewriting | `.claude/` | `.github/` | +| Home path rewriting | `~/.claude/` | `~/.copilot/` | +| Agent references | `@security-sentinel` | `the security-sentinel agent` | + +**Hooks:** Warn to stderr if present, skip. + +### Phase 3: Writer + +**File:** `src/targets/copilot.ts` + +**Path resolution:** +- If `outputRoot` basename is `.github`, write directly into it (avoid `.github/.github/` double-nesting) +- Otherwise, nest under `.github/` + +**Write operations:** +- Agents → `.github/agents/{name}.agent.md` (note: `.agent.md` extension) +- Generated skills (from commands) → `.github/skills/{name}/SKILL.md` +- Skill dirs → `.github/skills/{name}/` (copy via `copyDir`) +- MCP config → `.github/copilot-mcp-config.json` (backup existing with `backupFile`) + +### Phase 4: Target Registration + +**File:** `src/targets/index.ts` + +Add import and register: + +```typescript +import { convertClaudeToCopilot } from "../converters/claude-to-copilot" +import { writeCopilotBundle } from "./copilot" + +// In targets record: +copilot: { + name: "copilot", + implemented: true, + convert: convertClaudeToCopilot as TargetHandler["convert"], + write: writeCopilotBundle as TargetHandler["write"], +}, +``` + +### Phase 5: Sync Support + +**File:** `src/sync/copilot.ts` + +Follow the Cursor sync pattern (`src/sync/cursor.ts`): +- Symlink skills to `.github/skills/` using `forceSymlink` +- Validate skill names with `isValidSkillName` +- Convert MCP servers with `COPILOT_MCP_` prefix transformation +- Merge MCP config into existing `.github/copilot-mcp-config.json` + +**File:** `src/commands/sync.ts` + +- Add `"copilot"` to `validTargets` array +- Add case in `resolveOutputRoot()`: `case "copilot": return path.join(process.cwd(), ".github")` +- Add import and switch case for `syncToCopilot` +- Update meta description to include "Copilot" + +### Phase 6: Tests + +**File:** `tests/copilot-converter.test.ts` + +Test cases (following `tests/cursor-converter.test.ts` pattern): + +``` +describe("convertClaudeToCopilot") + ✓ converts agents to .agent.md with Copilot frontmatter + ✓ agent description is required, fallback generated if missing + ✓ agent with empty body gets default body + ✓ agent capabilities are prepended to body + ✓ agent model field is passed through + ✓ agent tools defaults to ["*"] + ✓ agent infer defaults to true + ✓ warns when agent body exceeds 30k characters + ✓ converts commands to skills with SKILL.md format + ✓ flattens namespaced command names + ✓ command name collision after flattening is deduplicated + ✓ command allowedTools is silently dropped + ✓ command with argument-hint gets Arguments section + ✓ passes through skill directories + ✓ skill and generated skill name collision is deduplicated + ✓ converts MCP servers with COPILOT_MCP_ prefix + ✓ MCP env vars already prefixed are not double-prefixed + ✓ MCP servers get type field (local vs sse) + ✓ warns when hooks are present + ✓ no warning when hooks are absent + ✓ plugin with zero agents produces empty agents array + ✓ plugin with only skills works + +describe("transformContentForCopilot") + ✓ rewrites .claude/ paths to .github/ + ✓ rewrites ~/.claude/ paths to ~/.copilot/ + ✓ transforms Task agent calls to skill references + ✓ flattens slash commands + ✓ transforms @agent references to agent references +``` + +**File:** `tests/copilot-writer.test.ts` + +Test cases (following `tests/cursor-writer.test.ts` pattern): + +``` +describe("writeCopilotBundle") + ✓ writes agents, generated skills, copied skills, and MCP config + ✓ agents use .agent.md file extension + ✓ writes directly into .github output root without double-nesting + ✓ handles empty bundles gracefully + ✓ writes multiple agents as separate .agent.md files + ✓ backs up existing copilot-mcp-config.json before overwriting + ✓ creates skill directories with SKILL.md +``` + +**File:** `tests/sync-copilot.test.ts` + +Test cases (following `tests/sync-cursor.test.ts` pattern): + +``` +describe("syncToCopilot") + ✓ symlinks skills to .github/skills/ + ✓ skips skills with invalid names + ✓ merges MCP config with existing file + ✓ transforms MCP env var names to COPILOT_MCP_ prefix + ✓ writes MCP config with restricted permissions (0o600) +``` + +### Phase 7: Documentation + +**File:** `docs/specs/copilot.md` + +Follow `docs/specs/cursor.md` format: +- Last verified date +- Primary sources (GitHub Docs URLs) +- Config locations table +- Agents section (`.agent.md` format, frontmatter fields) +- Skills section (`SKILL.md` format) +- MCP section (config structure, env var prefix requirement) +- Character limits (30k agent body) + +**File:** `README.md` + +- Add "copilot" to the list of supported targets +- Add usage example: `compound convert --to copilot ./plugins/compound-engineering` +- Add sync example: `compound sync copilot` + +## Acceptance Criteria + +### Converter +- [x] Agents convert to `.agent.md` with `description`, `tools: ["*"]`, `infer: true` +- [x] Agent `model` passes through when present +- [x] Agent `capabilities` fold into body as `## Capabilities` +- [x] Missing description generates fallback +- [x] Empty body generates fallback +- [x] Body exceeding 30k chars triggers stderr warning +- [x] Commands convert to SKILL.md format +- [x] Command names flatten (`workflows:plan` → `plan`) +- [x] Name collisions deduplicated with `-2`, `-3` suffix +- [x] Command `allowed-tools` dropped silently +- [x] Skills pass through as `CopilotSkillDir` +- [x] MCP env vars prefixed with `COPILOT_MCP_` +- [x] Already-prefixed env vars not double-prefixed +- [x] MCP servers get `type` field (`local` or `sse`) +- [x] Hooks trigger warning, skip conversion +- [x] Content transformation: Task calls, slash commands, paths, @agent refs + +### Writer +- [x] Agents written to `.github/agents/{name}.agent.md` +- [x] Generated skills written to `.github/skills/{name}/SKILL.md` +- [x] Skill dirs copied to `.github/skills/{name}/` +- [x] MCP config written to `.github/copilot-mcp-config.json` +- [x] Existing MCP config backed up before overwrite +- [x] No double-nesting when outputRoot is `.github` +- [x] Empty bundles handled gracefully + +### CLI Integration +- [x] `compound convert --to copilot` works +- [x] `compound sync copilot` works +- [x] Copilot registered in `src/targets/index.ts` +- [x] Sync resolves output to `.github/` in current directory + +### Tests +- [x] `tests/copilot-converter.test.ts` — all converter tests pass +- [x] `tests/copilot-writer.test.ts` — all writer tests pass +- [x] `tests/sync-copilot.test.ts` — all sync tests pass + +### Documentation +- [x] `docs/specs/copilot.md` — format specification +- [x] `README.md` — updated with copilot target + +## Files to Create + +| File | Purpose | +|------|---------| +| `src/types/copilot.ts` | Type definitions | +| `src/converters/claude-to-copilot.ts` | Converter logic | +| `src/targets/copilot.ts` | Writer logic | +| `src/sync/copilot.ts` | Sync handler | +| `tests/copilot-converter.test.ts` | Converter tests | +| `tests/copilot-writer.test.ts` | Writer tests | +| `tests/sync-copilot.test.ts` | Sync tests | +| `docs/specs/copilot.md` | Format specification | + +## Files to Modify + +| File | Change | +|------|--------| +| `src/targets/index.ts` | Register copilot target | +| `src/commands/sync.ts` | Add copilot to valid targets, output root, switch case | +| `README.md` | Add copilot to supported targets | + +## References + +- [Custom agents configuration - GitHub Docs](https://docs.github.com/en/copilot/reference/custom-agents-configuration) +- [About Agent Skills - GitHub Docs](https://docs.github.com/en/copilot/concepts/agents/about-agent-skills) +- [MCP and coding agent - GitHub Docs](https://docs.github.com/en/copilot/concepts/agents/coding-agent/mcp-and-coding-agent) +- Existing converter: `src/converters/claude-to-cursor.ts` +- Existing writer: `src/targets/cursor.ts` +- Existing sync: `src/sync/cursor.ts` +- Existing tests: `tests/cursor-converter.test.ts`, `tests/cursor-writer.test.ts` diff --git a/docs/plans/2026-02-14-feat-auto-detect-install-and-gemini-sync-plan.md b/docs/plans/2026-02-14-feat-auto-detect-install-and-gemini-sync-plan.md new file mode 100644 index 0000000..a4867bc --- /dev/null +++ b/docs/plans/2026-02-14-feat-auto-detect-install-and-gemini-sync-plan.md @@ -0,0 +1,360 @@ +--- +title: Auto-detect install targets and add Gemini sync +type: feat +status: completed +date: 2026-02-14 +completed_date: 2026-02-14 +completed_by: "Claude Opus 4.6" +actual_effort: "Completed in one session" +--- + +# Auto-detect Install Targets and Add Gemini Sync + +## Overview + +Two related improvements to the converter CLI: + +1. **`install --to all`** — Auto-detect which AI coding tools are installed and convert to all of them in one command +2. **`sync --target gemini`** — Add Gemini CLI as a sync target (currently missing), then add `sync --target all` to sync personal config to every detected tool + +## Problem Statement + +Users currently must run 6 separate commands to install to all targets: + +```bash +bunx @every-env/compound-plugin install compound-engineering --to opencode +bunx @every-env/compound-plugin install compound-engineering --to codex +bunx @every-env/compound-plugin install compound-engineering --to droid +bunx @every-env/compound-plugin install compound-engineering --to cursor +bunx @every-env/compound-plugin install compound-engineering --to pi +bunx @every-env/compound-plugin install compound-engineering --to gemini +``` + +Similarly, sync requires separate commands per target. And Gemini sync doesn't exist yet. + +## Acceptance Criteria + +### Auto-detect install + +- [x]`install --to all` detects installed tools and installs to each +- [x]Detection checks config directories and/or binaries for each tool +- [x]Prints which tools were detected and which were skipped +- [x]Tools with no detection signal are skipped (not errored) +- [x]`convert --to all` also works (same detection logic) +- [x]Existing `--to ` behavior unchanged +- [x]Tests for detection logic and `all` target handling + +### Gemini sync + +- [x]`sync --target gemini` symlinks skills and writes MCP servers to `.gemini/settings.json` +- [x]MCP servers merged into existing `settings.json` (same pattern as writer) +- [x]`gemini` added to `validTargets` in `sync.ts` +- [x]Tests for Gemini sync + +### Sync all + +- [x]`sync --target all` syncs to all detected tools +- [x]Reuses same detection logic as install +- [x]Prints summary of what was synced where + +## Implementation + +### Phase 1: Tool Detection Utility + +**Create `src/utils/detect-tools.ts`** + +```typescript +import os from "os" +import path from "path" +import { pathExists } from "./files" + +export type DetectedTool = { + name: string + detected: boolean + reason: string // e.g. "found ~/.codex/" or "not found" +} + +export async function detectInstalledTools(): Promise { + const home = os.homedir() + const cwd = process.cwd() + + const checks: Array<{ name: string; paths: string[] }> = [ + { name: "opencode", paths: [path.join(home, ".config", "opencode"), path.join(cwd, ".opencode")] }, + { name: "codex", paths: [path.join(home, ".codex")] }, + { name: "droid", paths: [path.join(home, ".factory")] }, + { name: "cursor", paths: [path.join(cwd, ".cursor"), path.join(home, ".cursor")] }, + { name: "pi", paths: [path.join(home, ".pi")] }, + { name: "gemini", paths: [path.join(cwd, ".gemini"), path.join(home, ".gemini")] }, + ] + + const results: DetectedTool[] = [] + for (const check of checks) { + let detected = false + let reason = "not found" + for (const p of check.paths) { + if (await pathExists(p)) { + detected = true + reason = `found ${p}` + break + } + } + results.push({ name: check.name, detected, reason }) + } + return results +} + +export async function getDetectedTargetNames(): Promise { + const tools = await detectInstalledTools() + return tools.filter((t) => t.detected).map((t) => t.name) +} +``` + +**Detection heuristics:** + +| Tool | Check paths | Notes | +|------|------------|-------| +| OpenCode | `~/.config/opencode/`, `.opencode/` | XDG config or project-local | +| Codex | `~/.codex/` | Global only | +| Droid | `~/.factory/` | Global only | +| Cursor | `.cursor/`, `~/.cursor/` | Project-local or global | +| Pi | `~/.pi/` | Global only | +| Gemini | `.gemini/`, `~/.gemini/` | Project-local or global | + +### Phase 2: Gemini Sync + +**Create `src/sync/gemini.ts`** + +Follow the Cursor sync pattern (`src/sync/cursor.ts`) since both use JSON config with `mcpServers` key: + +```typescript +import path from "path" +import { symlinkSkills } from "../utils/symlink" +import { backupFile, pathExists, readJson, writeJson } from "../utils/files" +import type { ClaudeMcpServer } from "../types/claude" + +export async function syncToGemini( + skills: { name: string; sourceDir: string }[], + mcpServers: Record, + outputRoot: string, +): Promise { + const geminiDir = path.join(outputRoot, ".gemini") + + // Symlink skills + if (skills.length > 0) { + const skillsDir = path.join(geminiDir, "skills") + await symlinkSkills(skills, skillsDir) + } + + // Merge MCP servers into settings.json + if (Object.keys(mcpServers).length > 0) { + const settingsPath = path.join(geminiDir, "settings.json") + let existing: Record = {} + if (await pathExists(settingsPath)) { + await backupFile(settingsPath) + try { + existing = await readJson>(settingsPath) + } catch { + console.warn("Warning: existing settings.json could not be parsed and will be replaced.") + } + } + + const existingMcp = (existing.mcpServers && typeof existing.mcpServers === "object") + ? existing.mcpServers as Record + : {} + + const merged = { ...existing, mcpServers: { ...existingMcp, ...convertMcpServers(mcpServers) } } + await writeJson(settingsPath, merged) + } +} + +function convertMcpServers(servers: Record) { + const result: Record> = {} + for (const [name, server] of Object.entries(servers)) { + const entry: Record = {} + if (server.command) { + entry.command = server.command + if (server.args?.length) entry.args = server.args + if (server.env && Object.keys(server.env).length > 0) entry.env = server.env + } else if (server.url) { + entry.url = server.url + if (server.headers && Object.keys(server.headers).length > 0) entry.headers = server.headers + } + result[name] = entry + } + return result +} +``` + +**Update `src/commands/sync.ts`:** + +- Add `"gemini"` to `validTargets` array +- Import `syncToGemini` from `../sync/gemini` +- Add case in switch for `"gemini"` calling `syncToGemini(skills, mcpServers, outputRoot)` + +### Phase 3: Wire `--to all` into Install and Convert + +**Modify `src/commands/install.ts`:** + +```typescript +import { detectInstalledTools } from "../utils/detect-tools" + +// In args definition, update --to description: +to: { + type: "string", + default: "opencode", + description: "Target format (opencode | codex | droid | cursor | pi | gemini | all)", +}, + +// In run(), before the existing target lookup: +if (targetName === "all") { + const detected = await detectInstalledTools() + const activeTargets = detected.filter((t) => t.detected) + + if (activeTargets.length === 0) { + console.log("No AI coding tools detected. Install at least one tool first.") + return + } + + console.log(`Detected ${activeTargets.length} tools:`) + for (const tool of detected) { + console.log(` ${tool.detected ? "✓" : "✗"} ${tool.name} — ${tool.reason}`) + } + + // Install to each detected target + for (const tool of activeTargets) { + const handler = targets[tool.name] + const bundle = handler.convert(plugin, options) + if (!bundle) continue + const root = resolveTargetOutputRoot(tool.name, outputRoot, codexHome, piHome, hasExplicitOutput) + await handler.write(root, bundle) + console.log(`Installed ${plugin.manifest.name} to ${tool.name} at ${root}`) + } + + // Codex post-processing + if (activeTargets.some((t) => t.name === "codex")) { + await ensureCodexAgentsFile(codexHome) + } + return +} +``` + +**Same change in `src/commands/convert.ts`** with its version of `resolveTargetOutputRoot`. + +### Phase 4: Wire `--target all` into Sync + +**Modify `src/commands/sync.ts`:** + +```typescript +import { detectInstalledTools } from "../utils/detect-tools" + +// Update validTargets: +const validTargets = ["opencode", "codex", "pi", "droid", "cursor", "gemini", "all"] as const + +// In run(), handle "all": +if (targetName === "all") { + const detected = await detectInstalledTools() + const activeTargets = detected.filter((t) => t.detected).map((t) => t.name) + + if (activeTargets.length === 0) { + console.log("No AI coding tools detected.") + return + } + + console.log(`Syncing to ${activeTargets.length} detected tools...`) + for (const name of activeTargets) { + // call existing sync logic for each target + } + return +} +``` + +### Phase 5: Tests + +**Create `tests/detect-tools.test.ts`** + +- Test detection with mocked directories (create temp dirs, check detection) +- Test `getDetectedTargetNames` returns only detected tools +- Test empty detection returns empty array + +**Create `tests/gemini-sync.test.ts`** + +Follow `tests/sync-cursor.test.ts` pattern: + +- Test skills are symlinked to `.gemini/skills/` +- Test MCP servers merged into `settings.json` +- Test existing `settings.json` is backed up +- Test empty skills/servers produce no output + +**Update `tests/cli.test.ts`** + +- Test `--to all` flag is accepted +- Test `sync --target all` is accepted +- Test `sync --target gemini` is accepted + +### Phase 6: Documentation + +**Update `README.md`:** + +Add to install section: +```bash +# auto-detect installed tools and install to all +bunx @every-env/compound-plugin install compound-engineering --to all +``` + +Add to sync section: +```bash +# Sync to Gemini +bunx @every-env/compound-plugin sync --target gemini + +# Sync to all detected tools +bunx @every-env/compound-plugin sync --target all +``` + +## What We're NOT Doing + +- Not adding binary detection (`which cursor`, `which gemini`) — directory checks are sufficient and don't require shell execution +- Not adding interactive prompts ("Install to Cursor? y/n") — auto-detect is fire-and-forget +- Not adding `--exclude` flag for skipping specific targets — can use `--to X --also Y` for manual selection +- Not adding Gemini to the `sync` symlink watcher (no watcher exists for any target) + +## Complexity Assessment + +**Low-medium change.** All patterns are established: +- Detection utility is new but simple (pathExists checks) +- Gemini sync follows cursor sync pattern exactly +- `--to all` is plumbing — iterate detected tools through existing handlers +- No new dependencies needed + +## References + +- Cursor sync (reference pattern): `src/sync/cursor.ts` +- Gemini writer (merge pattern): `src/targets/gemini.ts` +- Install command: `src/commands/install.ts` +- Sync command: `src/commands/sync.ts` +- File utilities: `src/utils/files.ts` +- Symlink utilities: `src/utils/symlink.ts` + +## Completion Summary + +### What Was Delivered +- Tool detection utility (`src/utils/detect-tools.ts`) with `detectInstalledTools()` and `getDetectedTargetNames()` +- Gemini sync (`src/sync/gemini.ts`) following cursor sync pattern — symlinks skills, merges MCP servers into `settings.json` +- `install --to all` and `convert --to all` auto-detect and install to all detected tools +- `sync --target gemini` added to sync command +- `sync --target all` syncs to all detected tools with summary output +- 8 new tests across 2 test files (detect-tools + sync-gemini) + +### Implementation Statistics +- 4 new files, 3 modified files +- 139 tests passing (8 new + 131 existing) +- No new dependencies + +### Git Commits +- `e4d730d` feat: add detect-tools utility and Gemini sync with tests +- `bc655f7` feat: wire --to all into install/convert and --target all/gemini into sync +- `877e265` docs: add auto-detect and Gemini sync to README, bump to 0.8.0 + +### Completion Details +- **Completed By:** Claude Opus 4.6 +- **Date:** 2026-02-14 +- **Session:** Single session, TDD approach diff --git a/docs/plans/2026-02-25-feat-windsurf-global-scope-support-plan.md b/docs/plans/2026-02-25-feat-windsurf-global-scope-support-plan.md new file mode 100644 index 0000000..d90eb6a --- /dev/null +++ b/docs/plans/2026-02-25-feat-windsurf-global-scope-support-plan.md @@ -0,0 +1,627 @@ +--- +title: Windsurf Global Scope Support +type: feat +status: completed +date: 2026-02-25 +deepened: 2026-02-25 +prior: docs/plans/2026-02-23-feat-add-windsurf-target-provider-plan.md (removed — superseded) +--- + +# Windsurf Global Scope Support + +## Post-Implementation Revisions (2026-02-26) + +After auditing the implementation against `docs/specs/windsurf.md`, two significant changes were made: + +1. **Agents → Skills (not Workflows)**: Claude agents map to Windsurf Skills (`skills/{name}/SKILL.md`), not Workflows. Skills are "complex multi-step tasks with supporting resources" — a better conceptual match for specialized expertise/personas. Workflows are "reusable step-by-step procedures" — a better match for Claude Commands (slash commands). + +2. **Workflows are flat files**: Command workflows are written to `global_workflows/{name}.md` (global scope) or `workflows/{name}.md` (workspace scope). No subdirectories — the spec requires flat files. + +3. **Content transforms updated**: `@agent-name` references are kept as-is (Windsurf skill invocation syntax). `/command` references produce `/{name}` (not `/commands/{name}`). `Task agent(args)` produces `Use the @agent-name skill: args`. + +### Final Component Mapping (per spec) + +| Claude Code | Windsurf | Output Path | Invocation | +|---|---|---|---| +| Agents (`.md`) | Skills | `skills/{name}/SKILL.md` | `@skill-name` or automatic | +| Commands (`.md`) | Workflows (flat) | `global_workflows/{name}.md` (global) / `workflows/{name}.md` (workspace) | `/{workflow-name}` | +| Skills (`SKILL.md`) | Skills (pass-through) | `skills/{name}/SKILL.md` | `@skill-name` | +| MCP servers | `mcp_config.json` | `mcp_config.json` | N/A | +| Hooks | Skipped with warning | N/A | N/A | +| CLAUDE.md | Skipped | N/A | N/A | + +### Files Changed in Revision + +- `src/types/windsurf.ts` — `agentWorkflows` → `agentSkills: WindsurfGeneratedSkill[]` +- `src/converters/claude-to-windsurf.ts` — `convertAgentToSkill()`, updated content transforms +- `src/targets/windsurf.ts` — Skills written as `skills/{name}/SKILL.md`, flat workflows +- Tests updated to match + +--- + +## Enhancement Summary + +**Deepened on:** 2026-02-25 +**Research agents used:** architecture-strategist, kieran-typescript-reviewer, security-sentinel, code-simplicity-reviewer, pattern-recognition-specialist +**External research:** Windsurf MCP docs, Windsurf tutorial docs + +### Key Improvements from Deepening +1. **HTTP/SSE servers should be INCLUDED** — Windsurf supports all 3 transport types (stdio, Streamable HTTP, SSE). Original plan incorrectly skipped them. +2. **File permissions: use `0o600`** — `mcp_config.json` contains secrets and must not be world-readable. Add secure write support. +3. **Extract `resolveTargetOutputRoot` to shared utility** — both commands duplicate this; adding scope makes it worse. Extract first. +4. **Bug fix: missing `result[name] = entry`** — all 5 review agents caught a copy-paste bug in the `buildMcpConfig` sample code. +5. **`hasPotentialSecrets` to shared utility** — currently in sync.ts, would be duplicated. Extract to `src/utils/secrets.ts`. +6. **Windsurf `mcp_config.json` is global-only** — per Windsurf docs, no per-project MCP config support. Workspace scope writes it for forward-compatibility but emit a warning. +7. **Windsurf supports `${env:VAR}` interpolation** — consider writing env var references instead of literal values for secrets. + +### New Considerations Discovered +- Backup files accumulate with secrets and are never cleaned up — cap at 3 backups +- Workspace `mcp_config.json` could be committed to git — warn about `.gitignore` +- `WindsurfMcpServerEntry` type needs `serverUrl` field for HTTP/SSE servers +- Simplicity reviewer recommends handling scope as windsurf-specific in CLI rather than generic `TargetHandler` fields — but brainstorm explicitly chose "generic with windsurf as first adopter". **Decision: keep generic approach** per user's brainstorm decision, with JSDoc documenting the relationship between `defaultScope` and `supportedScopes`. + +--- + +## Overview + +Add a generic `--scope global|workspace` flag to the converter CLI with Windsurf as the first adopter. Global scope writes to `~/.codeium/windsurf/`, making workflows, skills, and MCP servers available across all projects. This also upgrades MCP handling from a human-readable setup doc (`mcp-setup.md`) to a proper machine-readable config (`mcp_config.json`), and removes AGENTS.md generation (the plugin's CLAUDE.md contains development-internal instructions, not user-facing content). + +## Problem Statement / Motivation + +The current Windsurf converter (v0.10.0) writes everything to project-level `.windsurf/`, requiring re-installation per project. Windsurf supports global paths for skills (`~/.codeium/windsurf/skills/`) and MCP config (`~/.codeium/windsurf/mcp_config.json`). Users should install once and get capabilities everywhere. + +Additionally, the v0.10.0 MCP output was a markdown setup guide — not an actual integration. Windsurf reads `mcp_config.json` directly, so we should write to that file. + +## Breaking Changes from v0.10.0 + +This is a **minor version bump** (v0.11.0) with intentional breaking changes to the experimental Windsurf target: + +1. **Default output location changed** — `--to windsurf` now defaults to global scope (`~/.codeium/windsurf/`). Use `--scope workspace` for the old behavior. +2. **AGENTS.md no longer generated** — old files are left in place (not deleted). +3. **`mcp-setup.md` replaced by `mcp_config.json`** — proper machine-readable integration. Old files left in place. +4. **Env var secrets included with warning** — previously redacted, now included (required for the config file to work). +5. **`--output` semantics changed** — `--output` now specifies the direct target directory (not a parent where `.windsurf/` is created). + +## Proposed Solution + +### Phase 0: Extract Shared Utilities (prerequisite) + +**Files:** `src/utils/resolve-output.ts` (new), `src/utils/secrets.ts` (new) + +#### 0a. Extract `resolveTargetOutputRoot` to shared utility + +Both `install.ts` and `convert.ts` have near-identical `resolveTargetOutputRoot` functions that are already diverging (`hasExplicitOutput` exists in install.ts but not convert.ts). Adding scope would make the duplication worse. + +- [x] Create `src/utils/resolve-output.ts` with a unified function: + +```typescript +import os from "os" +import path from "path" +import type { TargetScope } from "../targets" + +export function resolveTargetOutputRoot(options: { + targetName: string + outputRoot: string + codexHome: string + piHome: string + hasExplicitOutput: boolean + scope?: TargetScope +}): string { + const { targetName, outputRoot, codexHome, piHome, hasExplicitOutput, scope } = options + if (targetName === "codex") return codexHome + if (targetName === "pi") return piHome + if (targetName === "droid") return path.join(os.homedir(), ".factory") + if (targetName === "cursor") { + const base = hasExplicitOutput ? outputRoot : process.cwd() + return path.join(base, ".cursor") + } + if (targetName === "gemini") { + const base = hasExplicitOutput ? outputRoot : process.cwd() + return path.join(base, ".gemini") + } + if (targetName === "copilot") { + const base = hasExplicitOutput ? outputRoot : process.cwd() + return path.join(base, ".github") + } + if (targetName === "kiro") { + const base = hasExplicitOutput ? outputRoot : process.cwd() + return path.join(base, ".kiro") + } + if (targetName === "windsurf") { + if (hasExplicitOutput) return outputRoot + if (scope === "global") return path.join(os.homedir(), ".codeium", "windsurf") + return path.join(process.cwd(), ".windsurf") + } + return outputRoot +} +``` + +- [x] Update `install.ts` to import and call `resolveTargetOutputRoot` from shared utility +- [x] Update `convert.ts` to import and call `resolveTargetOutputRoot` from shared utility +- [x] Add `hasExplicitOutput` tracking to `convert.ts` (currently missing) + +### Research Insights (Phase 0) + +**Architecture review:** Both commands will call the same function with the same signature. This eliminates the divergence and ensures scope resolution has a single source of truth. The `--also` loop in both commands also uses this function with `handler.defaultScope`. + +**Pattern review:** This follows the same extraction pattern as `resolveTargetHome` in `src/utils/resolve-home.ts`. + +#### 0b. Extract `hasPotentialSecrets` to shared utility + +Currently in `sync.ts:20-31`. The same regex pattern also appears in `claude-to-windsurf.ts:223` as `redactEnvValue`. Extract to avoid a third copy. + +- [x] Create `src/utils/secrets.ts`: + +```typescript +const SENSITIVE_PATTERN = /key|token|secret|password|credential|api_key/i + +export function hasPotentialSecrets( + servers: Record }>, +): boolean { + for (const server of Object.values(servers)) { + if (server.env) { + for (const key of Object.keys(server.env)) { + if (SENSITIVE_PATTERN.test(key)) return true + } + } + } + return false +} +``` + +- [x] Update `sync.ts` to import from shared utility +- [x] Use in new windsurf converter + +### Phase 1: Types and TargetHandler + +**Files:** `src/types/windsurf.ts`, `src/targets/index.ts` + +#### 1a. Update WindsurfBundle type + +```typescript +// src/types/windsurf.ts +export type WindsurfMcpServerEntry = { + command?: string + args?: string[] + env?: Record + serverUrl?: string + headers?: Record +} + +export type WindsurfMcpConfig = { + mcpServers: Record +} + +export type WindsurfBundle = { + agentWorkflows: WindsurfWorkflow[] + commandWorkflows: WindsurfWorkflow[] + skillDirs: WindsurfSkillDir[] + mcpConfig: WindsurfMcpConfig | null +} +``` + +- [x] Remove `agentsMd: string | null` +- [x] Replace `mcpSetupDoc: string | null` with `mcpConfig: WindsurfMcpConfig | null` +- [x] Add `WindsurfMcpServerEntry` (supports both stdio and HTTP/SSE) and `WindsurfMcpConfig` types + +### Research Insights (Phase 1a) + +**Windsurf docs confirm** three transport types: stdio (`command` + `args`), Streamable HTTP (`serverUrl`), and SSE (`serverUrl` or `url`). The `WindsurfMcpServerEntry` type must support all three — making `command` optional and adding `serverUrl` and `headers` fields. + +**TypeScript reviewer:** Consider making `WindsurfMcpServerEntry` a discriminated union if strict typing is desired. However, since this mirrors JSON config structure, a flat type with optional fields is pragmatically simpler. + +#### 1b. Add TargetScope to TargetHandler + +```typescript +// src/targets/index.ts +export type TargetScope = "global" | "workspace" + +export type TargetHandler = { + name: string + implemented: boolean + /** + * Default scope when --scope is not provided. + * Only meaningful when supportedScopes is defined. + * Falls back to "workspace" if absent. + */ + defaultScope?: TargetScope + /** Valid scope values. If absent, the --scope flag is rejected for this target. */ + supportedScopes?: TargetScope[] + convert: (plugin: ClaudePlugin, options: ClaudeToOpenCodeOptions) => TBundle | null + write: (outputRoot: string, bundle: TBundle) => Promise +} +``` + +- [x] Add `TargetScope` type export +- [x] Add `defaultScope?` and `supportedScopes?` to `TargetHandler` with JSDoc +- [x] Set windsurf target: `defaultScope: "global"`, `supportedScopes: ["global", "workspace"]` +- [x] No changes to other targets (they have no scope fields, flag is ignored) + +### Research Insights (Phase 1b) + +**Simplicity review:** Argued this is premature generalization (only 1 of 8 targets uses scopes). Recommended handling scope as windsurf-specific with `if (targetName !== "windsurf")` guard instead. **Decision: keep generic approach** per brainstorm decision "Generic with windsurf as first adopter", but add JSDoc documenting the invariant. + +**TypeScript review:** Suggested a `ScopeConfig` grouped object to prevent `defaultScope` without `supportedScopes`. The JSDoc approach is simpler and sufficient for now. + +**Architecture review:** Adding optional fields to `TargetHandler` follows Open/Closed Principle — existing targets are unaffected. Clean extension. + +### Phase 2: Converter Changes + +**Files:** `src/converters/claude-to-windsurf.ts` + +#### 2a. Remove AGENTS.md generation + +- [x] Remove `buildAgentsMd()` function +- [x] Remove `agentsMd` from return value + +#### 2b. Replace MCP setup doc with MCP config + +- [x] Remove `buildMcpSetupDoc()` function +- [x] Remove `redactEnvValue()` helper +- [x] Add `buildMcpConfig()` that returns `WindsurfMcpConfig | null` +- [x] Include **all** env vars (including secrets) — no redaction +- [x] Use shared `hasPotentialSecrets()` from `src/utils/secrets.ts` +- [x] Include **both** stdio and HTTP/SSE servers (Windsurf supports all transport types) + +```typescript +function buildMcpConfig( + servers?: Record, +): WindsurfMcpConfig | null { + if (!servers || Object.keys(servers).length === 0) return null + + const result: Record = {} + for (const [name, server] of Object.entries(servers)) { + if (server.command) { + // stdio transport + const entry: WindsurfMcpServerEntry = { command: server.command } + if (server.args?.length) entry.args = server.args + if (server.env && Object.keys(server.env).length > 0) entry.env = server.env + result[name] = entry + } else if (server.url) { + // HTTP/SSE transport + const entry: WindsurfMcpServerEntry = { serverUrl: server.url } + if (server.headers && Object.keys(server.headers).length > 0) entry.headers = server.headers + if (server.env && Object.keys(server.env).length > 0) entry.env = server.env + result[name] = entry + } else { + console.warn(`Warning: MCP server "${name}" has no command or URL. Skipping.`) + continue + } + } + + if (Object.keys(result).length === 0) return null + + // Warn about secrets (don't redact — they're needed for the config to work) + if (hasPotentialSecrets(result)) { + console.warn( + "Warning: MCP servers contain env vars that may include secrets (API keys, tokens).\n" + + " These will be written to mcp_config.json. Review before sharing the config file.", + ) + } + + return { mcpServers: result } +} +``` + +### Research Insights (Phase 2) + +**Windsurf docs (critical correction):** Windsurf supports **stdio, Streamable HTTP, and SSE** transports in `mcp_config.json`. HTTP/SSE servers use `serverUrl` (not `url`). The original plan incorrectly planned to skip HTTP/SSE servers. This is now corrected — all transport types are included. + +**All 5 review agents flagged:** The original code sample was missing `result[name] = entry` — the entry was built but never stored. Fixed above. + +**Security review:** The warning message should enumerate which specific env var names triggered detection. Enhanced version: + +```typescript +if (hasPotentialSecrets(result)) { + const flagged = Object.entries(result) + .filter(([, s]) => s.env && Object.keys(s.env).some(k => SENSITIVE_PATTERN.test(k))) + .map(([name]) => name) + console.warn( + `Warning: MCP servers contain env vars that may include secrets: ${flagged.join(", ")}.\n` + + " These will be written to mcp_config.json. Review before sharing the config file.", + ) +} +``` + +**Windsurf env var interpolation:** Windsurf supports `${env:VARIABLE_NAME}` syntax in `mcp_config.json`. Future enhancement: write env var references instead of literal values for secrets. Out of scope for v0.11.0 (requires more research on which fields support interpolation). + +### Phase 3: Writer Changes + +**Files:** `src/targets/windsurf.ts`, `src/utils/files.ts` + +#### 3a. Simplify writer — remove AGENTS.md and double-nesting guard + +The writer always writes directly into `outputRoot`. The CLI resolves the correct output root based on scope. + +- [x] Remove AGENTS.md writing block (lines 10-17) +- [x] Remove `resolveWindsurfPaths()` — no longer needed +- [x] Write workflows, skills, and MCP config directly into `outputRoot` + +### Research Insights (Phase 3a) + +**Pattern review (dissent):** Every other writer (kiro, copilot, gemini, droid) has a `resolve*Paths()` function with a double-nesting guard. Removing it makes Windsurf the only target where the CLI fully owns nesting. This creates an inconsistency in the `write()` contract. + +**Resolution:** Accept the divergence — Windsurf has genuinely different semantics (global vs workspace). Add a JSDoc comment on `TargetHandler.write()` documenting that some writers may apply additional nesting while the Windsurf writer expects the final resolved path. Long-term, other targets could migrate to this pattern in a separate refactor. + +#### 3b. Replace MCP setup doc with JSON config merge + +Follow Kiro pattern (`src/targets/kiro.ts:68-92`) with security hardening: + +- [x] Read existing `mcp_config.json` if present +- [x] Backup before overwrite (`backupFile()`) +- [x] Parse existing JSON (warn and replace if corrupted; add `!Array.isArray()` guard) +- [x] Merge at `mcpServers` key: plugin entries overwrite same-name entries, user entries preserved +- [x] Preserve all other top-level keys in existing file +- [x] Write merged result with **restrictive permissions** (`0o600`) +- [x] Emit warning when writing to workspace scope (Windsurf `mcp_config.json` is global-only per docs) + +```typescript +// MCP config merge with security hardening +if (bundle.mcpConfig) { + const mcpPath = path.join(outputRoot, "mcp_config.json") + const backupPath = await backupFile(mcpPath) + if (backupPath) { + console.log(`Backed up existing mcp_config.json to ${backupPath}`) + } + + let existingConfig: Record = {} + if (await pathExists(mcpPath)) { + try { + const parsed = await readJson(mcpPath) + if (parsed && typeof parsed === "object" && !Array.isArray(parsed)) { + existingConfig = parsed as Record + } + } catch { + console.warn("Warning: existing mcp_config.json could not be parsed and will be replaced.") + } + } + + const existingServers = + existingConfig.mcpServers && + typeof existingConfig.mcpServers === "object" && + !Array.isArray(existingConfig.mcpServers) + ? (existingConfig.mcpServers as Record) + : {} + const merged = { ...existingConfig, mcpServers: { ...existingServers, ...bundle.mcpConfig.mcpServers } } + await writeJsonSecure(mcpPath, merged) // 0o600 permissions +} +``` + +### Research Insights (Phase 3b) + +**Security review (HIGH):** The current `writeJson()` in `src/utils/files.ts` uses default umask (`0o644`) — world-readable. The sync targets all use `{ mode: 0o600 }` for secret-containing files. The Windsurf writer (and Kiro writer) must do the same. + +**Implementation:** Add a `writeJsonSecure()` helper or add a `mode` parameter to `writeJson()`: + +```typescript +// src/utils/files.ts +export async function writeJsonSecure(filePath: string, data: unknown): Promise { + const content = JSON.stringify(data, null, 2) + await ensureDir(path.dirname(filePath)) + await fs.writeFile(filePath, content + "\n", { encoding: "utf8", mode: 0o600 }) +} +``` + +**Security review (MEDIUM):** Backup files inherit default permissions. Ensure `backupFile()` also sets `0o600` on the backup copy when the source may contain secrets. + +**Security review (MEDIUM):** Workspace `mcp_config.json` could be committed to git. After writing to workspace scope, emit a warning: + +``` +Warning: .windsurf/mcp_config.json may contain secrets. Ensure it is in .gitignore. +``` + +**TypeScript review:** The `readJson>` assertion is unsafe — a valid JSON array or string passes parsing but fails the type. Added `!Array.isArray()` guard. + +**TypeScript review:** The `bundle.mcpConfig` null check is sufficient — when non-null, `mcpServers` is guaranteed to have entries (the converter returns null for empty servers). Simplified from `bundle.mcpConfig && Object.keys(...)`. + +**Windsurf docs (important):** `mcp_config.json` is a **global configuration only** — Windsurf has no per-project MCP config support. Writing it to `.windsurf/` in workspace scope may not be discovered by Windsurf. Emit a warning for workspace scope but still write the file for forward-compatibility. + +#### 3c. Updated writer structure + +```typescript +export async function writeWindsurfBundle(outputRoot: string, bundle: WindsurfBundle): Promise { + await ensureDir(outputRoot) + + // Write agent workflows + if (bundle.agentWorkflows.length > 0) { + const agentDir = path.join(outputRoot, "workflows", "agents") + await ensureDir(agentDir) + for (const workflow of bundle.agentWorkflows) { + validatePathSafe(workflow.name, "agent workflow") + const content = formatFrontmatter({ description: workflow.description }, `# ${workflow.name}\n\n${workflow.body}`) + await writeText(path.join(agentDir, `${workflow.name}.md`), content + "\n") + } + } + + // Write command workflows + if (bundle.commandWorkflows.length > 0) { + const cmdDir = path.join(outputRoot, "workflows", "commands") + await ensureDir(cmdDir) + for (const workflow of bundle.commandWorkflows) { + validatePathSafe(workflow.name, "command workflow") + const content = formatFrontmatter({ description: workflow.description }, `# ${workflow.name}\n\n${workflow.body}`) + await writeText(path.join(cmdDir, `${workflow.name}.md`), content + "\n") + } + } + + // Copy skill directories + if (bundle.skillDirs.length > 0) { + const skillsDir = path.join(outputRoot, "skills") + await ensureDir(skillsDir) + for (const skill of bundle.skillDirs) { + validatePathSafe(skill.name, "skill directory") + const destDir = path.join(skillsDir, skill.name) + const resolvedDest = path.resolve(destDir) + if (!resolvedDest.startsWith(path.resolve(skillsDir))) { + console.warn(`Warning: Skill name "${skill.name}" escapes skills/. Skipping.`) + continue + } + await copyDir(skill.sourceDir, destDir) + } + } + + // Merge MCP config (see 3b above) + if (bundle.mcpConfig) { + // ... merge logic from 3b + } +} +``` + +### Phase 4: CLI Wiring + +**Files:** `src/commands/install.ts`, `src/commands/convert.ts` + +#### 4a. Add `--scope` flag to both commands + +```typescript +scope: { + type: "string", + description: "Scope level: global | workspace (default varies by target)", +}, +``` + +- [x] Add `scope` arg to `install.ts` +- [x] Add `scope` arg to `convert.ts` + +#### 4b. Validate scope with type guard + +Use a proper type guard instead of unsafe `as TargetScope` cast: + +```typescript +function isTargetScope(value: string): value is TargetScope { + return value === "global" || value === "workspace" +} + +const scopeValue = args.scope ? String(args.scope) : undefined +if (scopeValue !== undefined) { + if (!target.supportedScopes) { + throw new Error(`Target "${targetName}" does not support the --scope flag.`) + } + if (!isTargetScope(scopeValue) || !target.supportedScopes.includes(scopeValue)) { + throw new Error(`Target "${targetName}" does not support --scope ${scopeValue}. Supported: ${target.supportedScopes.join(", ")}`) + } +} +const resolvedScope = scopeValue ?? target.defaultScope ?? "workspace" +``` + +- [x] Add `isTargetScope` type guard +- [x] Add scope validation in both commands (single block, not two separate checks) + +### Research Insights (Phase 4b) + +**TypeScript review:** The original plan cast `scopeValue as TargetScope` before validation — a type lie. Use a proper type guard function to keep the type system honest. + +**Simplicity review:** The two-step validation (check supported, then check exists) can be a single block with the type guard approach above. + +#### 4c. Update output root resolution + +Both commands now use the shared `resolveTargetOutputRoot` from Phase 0a. + +- [x] Call shared function with `scope: resolvedScope` for primary target +- [x] Default scope: `target.defaultScope ?? "workspace"` (only used when target supports scopes) + +#### 4d. Handle `--also` targets + +`--scope` applies only to the primary `--to` target. Extra `--also` targets use their own `defaultScope`. + +- [x] Pass `handler.defaultScope` for `--also` targets (each uses its own default) +- [x] Update the `--also` loop in both commands to use target-specific scope resolution + +### Research Insights (Phase 4d) + +**Architecture review:** There is no way for users to specify scope for an `--also` target (e.g., `--also windsurf:workspace`). Accept as a known v0.11.0 limitation. If users need workspace scope for windsurf, they can run two separate commands. Add a code comment indicating where per-target scope overrides would be added in the future. + +### Phase 5: Tests + +**Files:** `tests/windsurf-converter.test.ts`, `tests/windsurf-writer.test.ts` + +#### 5a. Update converter tests + +- [x] Remove all AGENTS.md tests (lines 275-303: empty plugin, CLAUDE.md missing) +- [x] Remove all `mcpSetupDoc` tests (lines 305-366: stdio, HTTP/SSE, redaction, null) +- [x] Update `fixturePlugin` default — remove `agentsMd` and `mcpSetupDoc` references +- [x] Add `mcpConfig` tests: + - stdio server produces correct JSON structure with `command`, `args`, `env` + - HTTP/SSE server produces correct JSON structure with `serverUrl`, `headers` + - mixed servers (stdio + HTTP) both included + - env vars included (not redacted) — verify actual values present + - `hasPotentialSecrets()` emits console.warn for sensitive keys + - `hasPotentialSecrets()` does NOT warn when no sensitive keys + - no servers produces null mcpConfig + - empty bundle has null mcpConfig + - server with no command and no URL is skipped with warning + +#### 5b. Update writer tests + +- [x] Remove AGENTS.md tests (backup test, creation test, double-nesting AGENTS.md parent test) +- [x] Remove double-nesting guard test (guard removed) +- [x] Remove `mcp-setup.md` write test +- [x] Update `emptyBundle` fixture — remove `agentsMd`, `mcpSetupDoc`, add `mcpConfig: null` +- [x] Add `mcp_config.json` tests: + - writes mcp_config.json to outputRoot + - merges with existing mcp_config.json (preserves user servers) + - backs up existing mcp_config.json before overwrite + - handles corrupted existing mcp_config.json (warn and replace) + - handles existing mcp_config.json with array (not object) at root + - handles existing mcp_config.json with `mcpServers: null` + - preserves non-mcpServers keys in existing file + - server name collision: plugin entry wins + - file permissions are 0o600 (not world-readable) +- [x] Update full bundle test — writer writes directly into outputRoot (no `.windsurf/` nesting) + +#### 5c. Add scope resolution tests + +Test the shared `resolveTargetOutputRoot` function: + +- [x] Default scope for windsurf is "global" → resolves to `~/.codeium/windsurf/` +- [x] Explicit `--scope workspace` → resolves to `cwd/.windsurf/` +- [x] `--output` overrides scope resolution (both global and workspace) +- [x] Invalid scope value for windsurf → error +- [x] `--scope` on non-scope target (e.g., opencode) → error +- [x] `--also windsurf` uses windsurf's default scope ("global") +- [x] `isTargetScope` type guard correctly identifies valid/invalid values + +### Phase 6: Documentation + +**Files:** `README.md`, `CHANGELOG.md` + +- [x] Update README.md Windsurf section to mention `--scope` flag and global default +- [x] Add CHANGELOG entry for v0.11.0 with breaking changes documented +- [x] Document migration path: `--scope workspace` for old behavior +- [x] Note that Windsurf `mcp_config.json` is global-only (workspace MCP config may not be discovered) + +## Acceptance Criteria + +- [x] `install compound-engineering --to windsurf` writes to `~/.codeium/windsurf/` by default +- [x] `install compound-engineering --to windsurf --scope workspace` writes to `cwd/.windsurf/` +- [x] `--output /custom/path` overrides scope for both commands +- [x] `--scope` on non-supporting target produces clear error +- [x] `mcp_config.json` merges with existing file (backup created, user entries preserved) +- [x] `mcp_config.json` written with `0o600` permissions (not world-readable) +- [x] No AGENTS.md generated for either scope +- [x] Env var secrets included in `mcp_config.json` with `console.warn` listing affected servers +- [x] Both stdio and HTTP/SSE MCP servers included in `mcp_config.json` +- [x] All existing tests updated, all new tests pass +- [x] No regressions in other targets +- [x] `resolveTargetOutputRoot` extracted to shared utility (no duplication) + +## Dependencies & Risks + +**Risk: Global workflow path is undocumented.** Windsurf may not discover workflows from `~/.codeium/windsurf/workflows/`. Mitigation: documented as a known assumption in the brainstorm. Users can `--scope workspace` if global workflows aren't discovered. + +**Risk: Breaking changes for existing v0.10.0 users.** Mitigation: document migration path clearly. `--scope workspace` restores previous behavior. Target is experimental with a small user base. + +**Risk: Workspace `mcp_config.json` not read by Windsurf.** Per Windsurf docs, `mcp_config.json` is global-only configuration. Workspace scope writes the file for forward-compatibility but emits a warning. The primary use case is global scope anyway. + +**Risk: Secrets in `mcp_config.json` committed to git.** Mitigation: `0o600` file permissions, console.warn about sensitive env vars, warning about `.gitignore` for workspace scope. + +## References & Research + +- Spec: `docs/specs/windsurf.md` (authoritative reference for component mapping) +- Kiro MCP merge pattern: [src/targets/kiro.ts:68-92](../../src/targets/kiro.ts) +- Sync secrets warning: [src/commands/sync.ts:20-28](../../src/commands/sync.ts) +- Windsurf MCP docs: https://docs.windsurf.com/windsurf/cascade/mcp +- Windsurf Skills global path: https://docs.windsurf.com/windsurf/cascade/skills +- Windsurf MCP tutorial: https://windsurf.com/university/tutorials/configuring-first-mcp-server +- Adding converter targets (learning): [docs/solutions/adding-converter-target-providers.md](../solutions/adding-converter-target-providers.md) +- Plugin versioning (learning): [docs/solutions/plugin-versioning-requirements.md](../solutions/plugin-versioning-requirements.md) diff --git a/docs/plans/2026-03-01-feat-ce-command-aliases-backwards-compatible-deprecation-plan.md b/docs/plans/2026-03-01-feat-ce-command-aliases-backwards-compatible-deprecation-plan.md new file mode 100644 index 0000000..844cfb9 --- /dev/null +++ b/docs/plans/2026-03-01-feat-ce-command-aliases-backwards-compatible-deprecation-plan.md @@ -0,0 +1,261 @@ +--- +title: "feat: Add ce:* command aliases with backwards-compatible deprecation of workflows:*" +type: feat +status: active +date: 2026-03-01 +--- + +# feat: Add `ce:*` Command Aliases with Backwards-Compatible Deprecation of `workflows:*` + +## Overview + +Rename the five `workflows:*` commands to `ce:*` to make it clearer they belong to compound-engineering. Keep `workflows:*` working as thin deprecation wrappers that warn users and forward to the new commands. + +## Problem Statement / Motivation + +The current `workflows:plan`, `workflows:work`, `workflows:review`, `workflows:brainstorm`, and `workflows:compound` commands are prefixed with `workflows:` — a generic namespace that doesn't signal their origin. Users don't immediately associate them with the compound-engineering plugin. + +The `ce:` prefix is shorter, more memorable, and unambiguously identifies these as compound-engineering commands — consistent with how other plugin commands already use `compound-engineering:` as a namespace. + +## Proposed Solution + +### 1. Create New `ce:*` Commands (Primary) + +Create a `commands/ce/` directory with five new command files. Each file gets the full implementation content from the current `workflows:*` counterpart, with the `name:` frontmatter updated to the new name. + +| New Command | Source Content | +|-------------|---------------| +| `ce:plan` | `commands/workflows/plan.md` | +| `ce:work` | `commands/workflows/work.md` | +| `ce:review` | `commands/workflows/review.md` | +| `ce:brainstorm` | `commands/workflows/brainstorm.md` | +| `ce:compound` | `commands/workflows/compound.md` | + +### 2. Convert `workflows:*` to Deprecation Wrappers (Backwards Compatibility) + +Replace the full content of each `workflows:*` command with a thin wrapper that: +1. Displays a visible deprecation warning to the user +2. Invokes the new `ce:*` command with the same `$ARGUMENTS` + +Example wrapper body: + +```markdown +--- +name: workflows:plan +description: "[DEPRECATED] Use /ce:plan instead. Renamed for clarity." +argument-hint: "[feature description]" +--- + +> ⚠️ **Deprecated:** `/workflows:plan` has been renamed to `/ce:plan`. +> Please update your workflow to use `/ce:plan` instead. +> This alias will be removed in a future version. + +/ce:plan $ARGUMENTS +``` + +### 3. Update All Internal References + +The grep reveals `workflows:*` is referenced in **many more places** than just `lfg`/`slfg`. All of these must be updated to point to the new `ce:*` names: + +**Orchestration commands (update to new names):** +- `commands/lfg.md` — `/workflows:plan`, `/workflows:work`, `/workflows:review` +- `commands/slfg.md` — `/workflows:plan`, `/workflows:work`, `/workflows:review` + +**Command bodies that cross-reference (update to new names):** +- `commands/workflows/brainstorm.md` — references `/workflows:plan` multiple times (will be in the deprecated wrapper, so should forward to `/ce:plan`) +- `commands/workflows/compound.md` — self-references and references `/workflows:plan` +- `commands/workflows/plan.md` — references `/workflows:work` multiple times +- `commands/deepen-plan.md` — references `/workflows:work`, `/workflows:compound` + +**Agents (update to new names):** +- `agents/review/code-simplicity-reviewer.md` — references `/workflows:plan` and `/workflows:work` +- `agents/research/git-history-analyzer.md` — references `/workflows:plan` +- `agents/research/learnings-researcher.md` — references `/workflows:plan` + +**Skills (update to new names):** +- `skills/document-review/SKILL.md` — references `/workflows:brainstorm`, `/workflows:plan` +- `skills/git-worktree/SKILL.md` — references `/workflows:review`, `/workflows:work` extensively +- `skills/setup/SKILL.md` — references `/workflows:review`, `/workflows:work` +- `skills/brainstorming/SKILL.md` — references `/workflows:plan` multiple times +- `skills/file-todos/SKILL.md` — references `/workflows:review` + +**Other commands (update to new names):** +- `commands/test-xcode.md` — references `/workflows:review` + +**Historical docs (leave as-is — they document the old names intentionally):** +- `docs/plans/*.md` — old plan files, historical record +- `docs/brainstorms/*.md` — historical +- `docs/solutions/*.md` — historical +- `tests/fixtures/` — test fixtures for the converter (intentionally use `workflows:*` to test namespace handling) +- `CHANGELOG.md` historical entries — don't rewrite history + +### 4. Update Documentation + +- `CHANGELOG.md` — add new entry documenting the rename and deprecation +- `plugins/compound-engineering/README.md` — update command table to list `ce:*` as primary, note `workflows:*` as deprecated aliases +- `plugins/compound-engineering/CLAUDE.md` — update command listing and the "Why `workflows:`?" section +- Root `README.md` — update the command table (lines 133–136) + +### 5. Converter / bunx Install Script Considerations + +The `bunx` install script (`src/commands/install.ts`) **only writes files, never deletes them**. This has two implications: + +**Now (while deprecated wrappers exist):** No stale file problem. Running `bunx install compound-engineering --to gemini` after this change will: +- Write `commands/ce/plan.toml` (new primary) +- Write `commands/workflows/plan.toml` (deprecated wrapper, with deprecation content) + +Both coexist correctly. Users who re-run install get both. + +**Future (when deprecated wrappers are eventually removed):** The old `commands/workflows/` files will remain stale in users' converted targets. At that point, a cleanup step will be needed — either: +- Manual instructions: "Delete `.gemini/commands/workflows/` after upgrading" +- OR add a cleanup pass to the install script that removes known-renamed command directories + +For now, document in the plan that stale cleanup is a known future concern when `workflows:*` wrappers are eventually dropped. + +## Technical Considerations + +### Command Naming + +The `ce:` prefix maps to a `commands/ce/` directory. This follows the existing convention where `workflows:plan` maps to `commands/workflows/plan.md`. + +### Deprecation Warning Display + +Since commands are executed by Claude, the deprecation message in the wrapper body will be displayed to the user as Claude's response before the new command runs. The `>` blockquote markdown renders as a styled callout. + +The deprecated wrappers should **not** use `disable-model-invocation: true` — Claude needs to process the body to display the warning and invoke the new command. + +### Deprecation Wrapper Mechanism + +The deprecated wrappers **must** use `disable-model-invocation: true`. This is the same mechanism `lfg.md` uses — the CLI runtime parses the body and executes slash command invocations directly. Without it, Claude reads the body as text and cannot actually invoke `/ce:plan`. + +The deprecation notice in the wrapper body becomes a printed note (same as `lfg` step descriptions), not a styled Claude response. That's acceptable — it still communicates the message. + +### Context Token Budget + +The 5 new `ce:*` commands add descriptions to the context budget. Keep descriptions short (under 120 chars). The 5 deprecated `workflows:*` wrappers have minimal descriptions (tagged as deprecated) to minimize budget impact. + +### Count Impact + +Command count remains 22 (5 new `ce:*` + 5 updated `workflows:*` wrappers = net zero change). No version bump required for counts. + +## Acceptance Criteria + +- [ ] `commands/ce/` directory created with 5 new command files +- [ ] Each `ce:*` command has the full implementation from its `workflows:*` counterpart +- [ ] Each `ce:*` command frontmatter `name:` field set to `ce:plan`, `ce:work`, etc. +- [ ] Each `workflows:*` command replaced with a thin deprecation wrapper +- [ ] Deprecation wrapper shows a clear ⚠️ warning with the new command name +- [ ] Deprecation wrapper invokes the new `ce:*` command with `$ARGUMENTS` +- [ ] `lfg.md` updated to use `ce:plan`, `ce:work`, `ce:review` +- [ ] `slfg.md` updated to use `ce:plan`, `ce:work`, `ce:review` +- [ ] All agent `.md` files updated (code-simplicity-reviewer, git-history-analyzer, learnings-researcher) +- [ ] All skill `SKILL.md` files updated (document-review, git-worktree, setup, brainstorming, file-todos) +- [ ] `commands/deepen-plan.md` and `commands/test-xcode.md` updated +- [ ] `CHANGELOG.md` updated with deprecation notice +- [ ] `plugins/compound-engineering/README.md` command table updated +- [ ] `plugins/compound-engineering/CLAUDE.md` command listing updated +- [ ] Root `README.md` command table updated +- [ ] Validate: `/ce:plan "test feature"` works end-to-end +- [ ] Validate: `/workflows:plan "test feature"` shows deprecation warning and continues +- [ ] Re-run `bunx install compound-engineering --to [target]` and confirm both `ce/` and `workflows/` output dirs are written correctly + +## Implementation Steps + +### Step 1: Create `commands/ce/` directory with 5 new files + +For each command, copy the source file and update only the `name:` frontmatter field: + +- `commands/ce/plan.md` — copy `commands/workflows/plan.md`, set `name: ce:plan` +- `commands/ce/work.md` — copy `commands/workflows/work.md`, set `name: ce:work` +- `commands/ce/review.md` — copy `commands/workflows/review.md`, set `name: ce:review` +- `commands/ce/brainstorm.md` — copy `commands/workflows/brainstorm.md`, set `name: ce:brainstorm` +- `commands/ce/compound.md` — copy `commands/workflows/compound.md`, set `name: ce:compound` + +### Step 2: Replace `commands/workflows/*.md` with deprecation wrappers + +Use `disable-model-invocation: true` so the CLI runtime directly invokes `/ce:`. The deprecation note is printed as a step description. + +Template for each wrapper: + +```markdown +--- +name: workflows: +description: "[DEPRECATED] Use /ce: instead — renamed for clarity." +argument-hint: "[...]" +disable-model-invocation: true +--- + +NOTE: /workflows: is deprecated. Please use /ce: instead. This alias will be removed in a future version. + +/ce: $ARGUMENTS +``` + +### Step 3: Update all internal references + +**Orchestration commands:** +- `commands/lfg.md` — replace `/workflows:plan`, `/workflows:work`, `/workflows:review` +- `commands/slfg.md` — same + +**Command bodies:** +- `commands/deepen-plan.md` — replace `/workflows:work`, `/workflows:compound` +- `commands/test-xcode.md` — replace `/workflows:review` +- The deprecated `workflows/brainstorm.md`, `workflows/compound.md`, `workflows/plan.md` wrappers — references in their body text pointing to other `workflows:*` commands should also be updated to `ce:*` (since users reading them should see the new names) + +**Agents:** +- `agents/review/code-simplicity-reviewer.md` +- `agents/research/git-history-analyzer.md` +- `agents/research/learnings-researcher.md` + +**Skills:** +- `skills/document-review/SKILL.md` +- `skills/git-worktree/SKILL.md` +- `skills/setup/SKILL.md` +- `skills/brainstorming/SKILL.md` +- `skills/file-todos/SKILL.md` + +### Step 4: Update documentation + +**`plugins/compound-engineering/CHANGELOG.md`** — Add under new version section: +``` +### Changed +- `workflows:plan`, `workflows:work`, `workflows:review`, `workflows:brainstorm`, `workflows:compound` renamed to `ce:plan`, `ce:work`, `ce:review`, `ce:brainstorm`, `ce:compound` for clarity + +### Deprecated +- `workflows:*` commands — use `ce:*` equivalents instead. Aliases remain functional and will be removed in a future version. +``` + +**`plugins/compound-engineering/README.md`** — Update the commands table to list `ce:*` as primary, show `workflows:*` as deprecated aliases. + +**`plugins/compound-engineering/CLAUDE.md`** — Update command listing and the "Why `workflows:`?" section to reflect new `ce:` namespace. + +**Root `README.md`** — Update the commands table (lines 133–136). + +### Step 5: Verify converter output + +After updating, re-run the bunx install script to confirm both targets are written: + +```bash +bunx @every-env/compound-plugin install compound-engineering --to gemini --output /tmp/test-output +ls /tmp/test-output/.gemini/commands/ +# Should show both: ce/ and workflows/ +``` + +The `workflows/` output will contain the deprecation wrapper content. The `ce/` output will have the full implementation. + +**Future cleanup note:** When `workflows:*` wrappers are eventually removed, users must manually delete the stale `workflows/` directories from their converted targets (`.gemini/commands/workflows/`, `.codex/commands/workflows/`, etc.). Consider adding a migration note to the CHANGELOG at that time. + +### Step 6: Run `/release-docs` to update the docs site + +## Dependencies & Risks + +- **Risk:** Users with saved references to `workflows:*` commands in their CLAUDE.md files or scripts. **Mitigation:** The deprecation wrappers remain functional indefinitely. +- **Risk:** Context token budget slightly increases (5 new command descriptions). **Mitigation:** Keep all descriptions short. Deprecated wrappers get minimal descriptions. +- **Risk:** `lfg`/`slfg` orchestration breaks if update is partial. **Mitigation:** Update both in the same commit. + +## Sources & References + +- Existing commands: `plugins/compound-engineering/commands/workflows/*.md` +- Orchestration commands: `plugins/compound-engineering/commands/lfg.md`, `plugins/compound-engineering/commands/slfg.md` +- Plugin metadata: `plugins/compound-engineering/.claude-plugin/plugin.json` +- Changelog: `plugins/compound-engineering/CHANGELOG.md` +- README: `plugins/compound-engineering/README.md` diff --git a/docs/plans/2026-03-01-fix-setup-skill-non-claude-llm-fallback-plan.md b/docs/plans/2026-03-01-fix-setup-skill-non-claude-llm-fallback-plan.md new file mode 100644 index 0000000..fd5cdf7 --- /dev/null +++ b/docs/plans/2026-03-01-fix-setup-skill-non-claude-llm-fallback-plan.md @@ -0,0 +1,140 @@ +--- +title: "fix: Setup skill fails silently on non-Claude LLMs due to AskUserQuestion dependency" +type: fix +status: active +date: 2026-03-01 +--- + +## Enhancement Summary + +**Deepened on:** 2026-03-01 +**Research agents used:** best-practices-researcher, architecture-strategist, code-simplicity-reviewer, scope-explorer + +### Key Improvements +1. Simplified preamble from 16 lines to 4 lines — drop platform name list and example blockquote (YAGNI) +2. Expanded scope: `create-new-skill.md` also has `AskUserQuestion` and needs the same fix +3. Clarified that `codex-agents.ts` change helps command/agent contexts only — does NOT reach skill execution (skills aren't converter-transformed) +4. Added CLAUDE.md skill compliance policy as a third deliverable to prevent recurrence +5. Separated two distinct failure modes: tool-not-found error vs silent auto-configuration + +### New Considerations Discovered +- Only Pi converter transforms `AskUserQuestion` (incompletely); all others pass skill content through verbatim — the codex-agents.ts fix is independent of skill execution +- `add-workflow.md` and `audit-skill.md` already explicitly prohibit `AskUserQuestion` — this undocumented policy should be formalized +- Prose fallback is probabilistic (LLM compliance); converter-level transformation is the correct long-term architectural fix +- The brainstorming skill avoids `AskUserQuestion` entirely and works cross-platform — that's the gold standard pattern + +--- + +# fix: Setup Skill Cross-Platform Fallback for AskUserQuestion + +## Overview + +The `setup` skill uses `AskUserQuestion` at 5 decision points. On non-Claude platforms (Codex, Gemini, OpenCode, Copilot, Kiro, etc.), this tool doesn't exist — the LLM reads the skill body but cannot call the tool, causing silent failure or unconsented auto-configuration. Fix by adding a minimal fallback instruction to the skill body, applying the same to `create-new-skill.md`, and adding a policy to the CLAUDE.md skill checklist to prevent recurrence. + +## Problem Statement + +**Two distinct failure modes:** + +1. **Tool-not-found error** — LLM tries to call `AskUserQuestion` as a function; platform returns an error. Setup halts. +2. **Silent skip** — LLM reads `AskUserQuestion` as prose, ignores the decision gate, auto-configures. User never consulted. This is worse — produces a `compound-engineering.local.md` the user never approved. + +`plugins/compound-engineering/skills/setup/SKILL.md` has 5 `AskUserQuestion` blocks: + +| Line | Decision Point | +|------|----------------| +| 13 | Check existing config: Reconfigure / View / Cancel | +| 44 | Stack detection: Auto-configure / Customize | +| 67 | Stack override (multi-option) | +| 85 | Focus areas (multiSelect) | +| 104 | Review depth: Thorough / Fast / Comprehensive | + +`plugins/compound-engineering/skills/create-agent-skills/workflows/create-new-skill.md` lines 22 and 45 also use `AskUserQuestion`. + +Only the Pi converter transforms the reference (incompletely). All other converters (Codex, Gemini, Copilot, Kiro, Droid, Windsurf) pass skill content through verbatim — **skills are not converter-transformed**. + +## Proposed Solution + +Three deliverables, each addressing a different layer: + +### 1. Add 4-line "Interaction Method" preamble to `setup/SKILL.md` + +Immediately after the `# Compound Engineering Setup` heading, insert: + +```markdown +## Interaction Method + +If `AskUserQuestion` is available, use it for all prompts below. + +If not, present each question as a numbered list and wait for a reply before proceeding to the next step. For multiSelect questions, accept comma-separated numbers (e.g. `1, 3`). Never skip or auto-configure. +``` + +**Why 4 lines, not 16:** LLMs know what a numbered list is — no example blockquote needed. The branching condition is tool availability, not platform identity — no platform name list needed (YAGNI: new platforms will be added and lists go stale). State the "never skip" rule once here; don't repeat it in `codex-agents.ts`. + +**Why this works:** The skill body IS read by the LLM on all platforms when `/setup` is invoked. The agent follows prose instructions regardless of tool availability. This is the same pattern `brainstorming/SKILL.md` uses — it avoids `AskUserQuestion` entirely and uses inline numbered lists — the gold standard cross-platform approach. + +### 2. Apply the same preamble to `create-new-skill.md` + +`plugins/compound-engineering/skills/create-agent-skills/workflows/create-new-skill.md` uses `AskUserQuestion` at lines 22 and 45. Apply an identical preamble at the top of that file. + +### 3. Strengthen `codex-agents.ts` AskUserQuestion mapping + +This change does NOT fix skill execution (skills bypass the converter pipeline). It improves the AGENTS.md guidance for Codex command/agent contexts. + +Replace (`src/utils/codex-agents.ts` line 21): +``` +- AskUserQuestion/Question: ask the user in chat +``` + +With: +``` +- AskUserQuestion/Question: present choices as a numbered list in chat and wait for a reply number. For multi-select (multiSelect: true), accept comma-separated numbers. Never skip or auto-configure — always wait for the user's response before proceeding. +``` + +### 4. Add lint rule to CLAUDE.md skill compliance checklist + +Add to the "Skill Compliance Checklist" in `plugins/compound-engineering/CLAUDE.md`: + +``` +### AskUserQuestion Usage + +- [ ] If the skill uses `AskUserQuestion`, it must include an "Interaction Method" preamble explaining the numbered-list fallback for non-Claude environments +- [ ] Prefer avoiding `AskUserQuestion` entirely (see brainstorming/SKILL.md pattern) for skills intended to run cross-platform +``` + +## Technical Considerations + +- `setup/SKILL.md` has `disable-model-invocation: true` — this controls session-startup context loading only, not skill-body execution at invocation time +- The prose fallback is probabilistic (LLM compliance), not a build-time guarantee. The correct long-term architectural fix is converter-level transformation of skill content (a `transformSkillContent()` pass in each converter), but that is out of scope here +- Commands with `AskUserQuestion` (`ce/brainstorm.md`, `ce/plan.md`, `test-browser.md`, etc.) have the same gap but are out of scope — explicitly noted as a future task + +## Acceptance Criteria + +- [ ] `setup/SKILL.md` has a 4-line "Interaction Method" preamble after the opening heading +- [ ] `create-new-skill.md` has the same preamble +- [ ] The skills still use `AskUserQuestion` as primary — no change to Claude Code behavior +- [ ] `codex-agents.ts` AskUserQuestion line updated with structured guidance +- [ ] `plugins/compound-engineering/CLAUDE.md` skill checklist includes AskUserQuestion policy +- [ ] No regression: on Claude Code, setup works exactly as before + +## Files + +- `plugins/compound-engineering/skills/setup/SKILL.md` — Add 4-line preamble after line 8 +- `plugins/compound-engineering/skills/create-agent-skills/workflows/create-new-skill.md` — Add same preamble at top +- `src/utils/codex-agents.ts` — Strengthen AskUserQuestion mapping (line 21) +- `plugins/compound-engineering/CLAUDE.md` — Add AskUserQuestion policy to skill compliance checklist + +## Future Work (Out of Scope) + +- Converter-level `transformSkillContent()` for all targets — build-time guarantee instead of prose fallback +- Commands with `AskUserQuestion` (`ce/brainstorm.md`, `ce/plan.md`, `test-browser.md`) — same failure mode, separate fix + +## Sources & References + +- Issue: [#204](https://github.com/EveryInc/compound-engineering-plugin/issues/204) +- `plugins/compound-engineering/skills/setup/SKILL.md:13,44,67,85,104` +- `plugins/compound-engineering/skills/create-agent-skills/workflows/create-new-skill.md:22,45` +- `src/utils/codex-agents.ts:21` +- `src/converters/claude-to-pi.ts:106` — Pi converter (reference pattern) +- `plugins/compound-engineering/skills/brainstorming/SKILL.md` — gold standard cross-platform skill (no AskUserQuestion) +- `plugins/compound-engineering/skills/create-agent-skills/workflows/add-workflow.md:12,37` — existing "DO NOT use AskUserQuestion" policy +- `docs/solutions/adding-converter-target-providers.md` diff --git a/docs/plans/2026-03-03-feat-sync-claude-mcp-all-supported-providers-plan.md b/docs/plans/2026-03-03-feat-sync-claude-mcp-all-supported-providers-plan.md new file mode 100644 index 0000000..63fab2e --- /dev/null +++ b/docs/plans/2026-03-03-feat-sync-claude-mcp-all-supported-providers-plan.md @@ -0,0 +1,639 @@ +--- +title: "feat: Sync Claude MCP servers to all supported providers" +type: feat +date: 2026-03-03 +status: completed +deepened: 2026-03-03 +--- + +# feat: Sync Claude MCP servers to all supported providers + +## Overview + +Expand the `sync` command so a user's local Claude Code MCP configuration can be propagated to every provider this CLI can reasonably support, instead of only the current partial set. + +Today, `sync` already symlinks Claude skills and syncs MCP servers for a subset of targets. The gap is that install/convert support has grown much faster than sync support, so the product promise in `README.md` has drifted away from what `src/commands/sync.ts` can actually do. + +This feature should close that parity gap without changing the core sync contract: + +- Claude remains the source of truth for personal skills and MCP servers. +- Skills stay symlinked, not copied. +- Existing user config in the destination tool is preserved where possible. +- Target-specific MCP formats stay target-specific. + +## Problem Statement + +The current implementation has three concrete problems: + +1. `sync` only knows about `opencode`, `codex`, `pi`, `droid`, `copilot`, and `gemini`, while install/convert now supports `kiro`, `windsurf`, `openclaw`, and `qwen` too. +2. `sync --target all` relies on stale detection metadata that still includes `cursor`, but misses newer supported tools. +3. Existing MCP sync support is incomplete even for some already-supported targets: + - `codex` only emits stdio servers and silently drops remote MCP servers. + - `droid` is still skills-only even though Factory now documents `mcp.json`. + +User impact: + +- A user can install the plugin to more providers than they can sync their personal Claude setup to. +- `sync --target all` does not mean "all supported tools" anymore. +- Users with remote MCP servers in Claude get partial results depending on target. + +## Research Summary + +### No Relevant Brainstorm + +I checked recent brainstorms in `docs/brainstorms/` and found no relevant document for this feature within the last 14 days. + +### Internal Findings + +- `src/commands/sync.ts:15-125` hardcodes the sync target list, output roots, and per-target dispatch. It omits `windsurf`, `kiro`, `openclaw`, and `qwen`. +- `src/utils/detect-tools.ts:15-22` still detects `cursor`, but not `windsurf`, `kiro`, `openclaw`, or `qwen`. +- `src/parsers/claude-home.ts:11-19` already gives sync exactly the right inputs: personal skills plus `settings.json` `mcpServers`. +- `src/sync/codex.ts:25-91` only serializes stdio MCP servers, even though Codex supports remote MCP config. +- `src/sync/droid.ts:6-21` symlinks skills but ignores MCP entirely. +- Target writers already encode several missing MCP formats and merge behaviors: + - `src/targets/windsurf.ts:65-92` + - `src/targets/kiro.ts:68-91` + - `src/targets/openclaw.ts:34-42` + - `src/targets/qwen.ts:9-15` +- `README.md:89-123` promises "Sync Personal Config" but only documents the old subset of targets. + +### Institutional Learnings + +`docs/solutions/adding-converter-target-providers.md:20-32` and `docs/solutions/adding-converter-target-providers.md:208-214` reinforce the right pattern for this feature: + +- keep target mappings explicit, +- treat MCP conversion as target-specific, +- warn on unsupported features instead of forcing fake parity, +- and add tests for each mapping. + +Note: `docs/solutions/patterns/critical-patterns.md` does not exist in this repository, so there was no critical-patterns file to apply. + +### External Findings + +Official docs confirm that the missing targets are not all equivalent, so this cannot be solved with a generic JSON pass-through. + +| Target | Official MCP / skills location | Key notes | +| --- | --- | --- | +| Factory Droid | `~/.factory/mcp.json`, `.factory/mcp.json`, `~/.factory/skills/` | Supports `stdio` and `http`; user config overrides project config. | +| Windsurf | `~/.codeium/windsurf/mcp_config.json`, `~/.codeium/windsurf/skills/` | Supports `stdio`, Streamable HTTP, and SSE; remote config uses `serverUrl` or `url`. | +| Kiro | `~/.kiro/settings/mcp.json`, `.kiro/settings/mcp.json`, `~/.kiro/skills/` | Supports user and workspace config; remote MCP support was added after this repo's local Kiro spec was written. | +| Qwen Code | `~/.qwen/settings.json`, `.qwen/settings.json`, `~/.qwen/skills/`, `.qwen/skills/` | Supports `stdio`, `http`, and `sse`; official docs say prefer `http`, with `sse` treated as legacy/deprecated. | +| OpenClaw | `~/.openclaw/skills`, `/skills`, `~/.openclaw/openclaw.json` | Skills are well-documented; a generic MCP server config surface is not clearly documented in official docs, so MCP sync needs validation before implementation is promised. | + +Additional important findings: + +- Kiro's current official behavior supersedes the local repo spec that says "workspace only" and "stdio only". +- Qwen's current docs explicitly distinguish `httpUrl` from legacy SSE `url`; blindly copying Claude's `url` is too lossy. +- Factory and Windsurf both support remote MCP, so `droid` should no longer be treated as skills-only. + +## Proposed Solution + +### Product Decision + +Treat this as **sync parity for MCP-capable providers**, not as a one-off patch. + +That means this feature should: + +- add missing sync targets where the provider has a documented skills/MCP surface, +- upgrade partial implementations where existing sync support drops valid Claude MCP data, +- and replace stale detection metadata so `sync --target all` is truthful again. + +### Scope + +#### In Scope + +- Add MCP sync coverage for: + - `droid` + - `windsurf` + - `kiro` + - `qwen` +- Expand `codex` sync to support remote MCP servers. +- Add provider detection for newly supported sync targets. +- Keep skills syncing for all synced targets. +- Update CLI help text, README sync docs, and tests. + +#### Conditional / Validation Gate + +- `openclaw` skills sync is straightforward and should be included if the target is added to `sync`. +- `openclaw` MCP sync should only be implemented if its config surface is validated against current upstream docs or current upstream source. If that validation fails, the feature should explicitly skip OpenClaw MCP sync with a warning rather than inventing a format. + +#### Out of Scope + +- Standardizing all existing sync targets onto user-level paths only. +- Reworking install/convert output roots. +- Hook sync. +- A full rewrite of target writers. + +### Design Decisions + +#### 0. Keep existing sync roots stable unless this feature is explicitly adding a new target + +Do not use this feature to migrate existing `copilot` and `gemini` sync behavior. + +Backward-compatibility rule: + +- existing targets keep their current sync roots unless a correctness bug forces a change, +- newly added sync targets use the provider's documented personal/global config surface, +- and any future root migration belongs in a separate plan. + +Planned sync roots after this feature: + +| Target | Sync root | Notes | +| --- | --- | --- | +| `opencode` | `~/.config/opencode` | unchanged | +| `codex` | `~/.codex` | unchanged | +| `pi` | `~/.pi/agent` | unchanged | +| `droid` | `~/.factory` | unchanged root, new MCP file | +| `copilot` | `.github` | unchanged for backwards compatibility | +| `gemini` | `.gemini` | unchanged for backwards compatibility | +| `windsurf` | `~/.codeium/windsurf` | new | +| `kiro` | `~/.kiro` | new | +| `qwen` | `~/.qwen` | new | +| `openclaw` | `~/.openclaw` | new, MCP still validation-gated | + +#### 1. Add a dedicated sync target registry + +Do not keep growing `sync.ts` as a hand-maintained switch statement. + +Create a dedicated sync registry, for example: + +### `src/sync/registry.ts` + +```ts +import os from "os" +import path from "path" +import type { ClaudeHomeConfig } from "../parsers/claude-home" + +export type SyncTargetDefinition = { + name: string + detectPaths: (home: string, cwd: string) => string[] + resolveOutputRoot: (home: string, cwd: string) => string + sync: (config: ClaudeHomeConfig, outputRoot: string) => Promise +} +``` + +This registry becomes the single source of truth for: + +- valid `sync` targets, +- `sync --target all` detection, +- output root resolution, +- and dispatch. + +This avoids the current drift between: + +- `src/commands/sync.ts` +- `src/utils/detect-tools.ts` +- `README.md` + +#### 2. Preserve sync semantics, not writer semantics + +Do not directly reuse install target writers for sync. + +Reason: + +- writers mostly copy skill directories, +- sync intentionally symlinks skills, +- writers often emit full plugin/install bundles, +- sync only needs personal skills plus MCP config. + +However, provider-specific MCP conversion helpers should be extracted or reused where practical so sync and writer logic do not diverge again. + +#### 3. Keep merge behavior additive, with Claude winning on same-name collisions + +For JSON-based targets: + +- preserve unrelated user keys, +- preserve unrelated user MCP servers, +- but if the same server name exists in Claude and the target config, Claude's value should overwrite that server entry during sync. + +Codex remains the special case: + +- continue using the managed marker block, +- remove the previous managed block, +- rewrite the managed block from Claude, +- leave the rest of `config.toml` untouched. + +#### 4. Secure config writes where secrets may exist + +Any config file that may contain MCP headers or env vars should be written with restrictive permissions where the platform already supports that pattern. + +At minimum: + +- `config.toml` +- `mcp.json` +- `mcp_config.json` +- `settings.json` + +should follow the repo's existing "secure write" conventions where possible. + +#### 5. Do not silently coerce ambiguous remote transports + +Qwen and possibly future targets distinguish Streamable HTTP from legacy SSE. + +Use this mapping rule: + +- if Claude explicitly provides `type: "sse"` or an equivalent known signal, map to the target's SSE field, +- otherwise prefer the target's HTTP form for remote URLs, +- and log a warning when a target requires more specificity than Claude provides. + +## Provider Mapping Plan + +### Existing Targets to Upgrade + +#### Codex + +Current issue: + +- only stdio servers are synced. + +Implementation: + +- extend `syncToCodex()` so remote MCP servers are serialized into the Codex TOML format, not dropped. +- keep the existing marker-based idempotent section handling. + +Notes: + +- This is a correctness fix, not a new target. + +#### Droid / Factory + +Current issue: + +- skills-only sync despite current official MCP support. + +Implementation: + +- add `src/sync/droid.ts` MCP config writing to `~/.factory/mcp.json`. +- merge with existing `mcpServers`. +- support both `stdio` and `http`. + +### New Sync Targets + +#### Windsurf + +Add `src/sync/windsurf.ts`: + +- symlink Claude skills into `~/.codeium/windsurf/skills/` +- merge MCP servers into `~/.codeium/windsurf/mcp_config.json` +- support `stdio`, Streamable HTTP, and SSE +- prefer `serverUrl` for remote HTTP config +- preserve unrelated existing servers +- write with secure permissions + +Reference implementation: + +- `src/targets/windsurf.ts:65-92` + +#### Kiro + +Add `src/sync/kiro.ts`: + +- symlink Claude skills into `~/.kiro/skills/` +- merge MCP servers into `~/.kiro/settings/mcp.json` +- support both local and remote MCP servers +- preserve user config already present in `mcp.json` + +Important: + +- This feature must treat the repository's local Kiro spec as stale where it conflicts with official 2025-2026 Kiro docs/blog posts. + +Reference implementation: + +- `src/targets/kiro.ts:68-91` + +#### Qwen + +Add `src/sync/qwen.ts`: + +- symlink Claude skills into `~/.qwen/skills/` +- merge MCP servers into `~/.qwen/settings.json` +- map stdio directly +- map remote URLs to `httpUrl` by default +- only emit legacy SSE `url` when Claude transport clearly indicates SSE + +Important: + +- capture the deprecation note in docs/comments: SSE is legacy, so HTTP is the default remote mapping. + +#### OpenClaw + +Add `src/sync/openclaw.ts` only if validated during implementation: + +- symlink skills into `~/.openclaw/skills` +- optionally merge MCP config into `~/.openclaw/openclaw.json` if the official/current upstream contract is confirmed + +Fallback behavior if MCP config cannot be validated: + +- sync skills only, +- emit a warning that OpenClaw MCP sync is skipped because the official config surface is not documented clearly enough. + +## Implementation Phases + +### Phase 1: Registry and shared helpers + +Files: + +- `src/commands/sync.ts` +- `src/utils/detect-tools.ts` +- `src/sync/registry.ts` (new) +- `src/sync/skills.ts` or `src/utils/symlink.ts` extension +- optional `src/sync/mcp-merge.ts` + +Tasks: + +- move sync target metadata into a single registry +- make `validTargets` derive from the registry +- make `sync --target all` use the registry +- update detection to include supported sync targets instead of stale `cursor` +- extract a shared helper for validated skill symlinking + +### Phase 2: Upgrade existing partial targets + +Files: + +- `src/sync/codex.ts` +- `src/sync/droid.ts` +- `tests/sync-droid.test.ts` +- new or expanded `tests/sync-codex.test.ts` + +Tasks: + +- add remote MCP support to Codex sync +- add MCP config writing to Droid sync +- preserve current skill symlink behavior + +### Phase 3: Add missing sync targets + +Files: + +- `src/sync/windsurf.ts` +- `src/sync/kiro.ts` +- `src/sync/qwen.ts` +- optionally `src/sync/openclaw.ts` +- `tests/sync-windsurf.test.ts` +- `tests/sync-kiro.test.ts` +- `tests/sync-qwen.test.ts` +- optionally `tests/sync-openclaw.test.ts` + +Tasks: + +- implement skill symlink + MCP merge for each target +- align output paths with the target's documented personal config surface +- secure writes and corrupted-config fallbacks + +### Phase 4: CLI, docs, and detection parity + +Files: + +- `src/commands/sync.ts` +- `src/utils/detect-tools.ts` +- `tests/detect-tools.test.ts` +- `tests/cli.test.ts` +- `README.md` +- optionally `docs/specs/kiro.md` + +Tasks: + +- update `sync` help text and summary output +- ensure `sync --target all` only reports real sync-capable tools +- document newly supported sync targets +- fix stale Kiro assumptions if repository docs are updated in the same change + +## SpecFlow Analysis + +### Primary user flows + +#### Flow 1: Explicit sync to one target + +1. User runs `bunx @every-env/compound-plugin sync --target ` +2. CLI loads `~/.claude/skills` and `~/.claude/settings.json` +3. CLI resolves that provider's sync root +4. Skills are symlinked +5. MCP config is merged +6. CLI prints the destination path and completion summary + +#### Flow 2: Sync to all detected tools + +1. User runs `bunx @every-env/compound-plugin sync` +2. CLI detects installed/supported tools +3. CLI prints which tools were found and which were skipped +4. CLI syncs each detected target in sequence +5. CLI prints per-target success lines + +#### Flow 3: Existing config already present + +1. User already has destination config file(s) +2. Sync reads and parses the existing file +3. Existing unrelated keys are preserved +4. Claude MCP entries are merged in +5. Corrupt config produces a warning and replacement behavior + +### Edge cases to account for + +- Claude has zero MCP servers: skills still sync, no config file is written. +- Claude has remote MCP servers: targets that support remote config receive them; unsupported transports warn, not crash. +- Existing target config is invalid JSON/TOML: warn and replace the managed portion. +- Skill name contains path traversal characters: skip with warning, same as current behavior. +- Real directory already exists where a symlink would go: skip safely, do not delete user data. +- `sync --target all` detects a tool with skills support but unclear MCP support: sync only the documented subset and warn explicitly. + +### Critical product decisions already assumed + +- `sync` remains additive and non-destructive. +- Sync roots may differ from install roots when the provider has a documented personal config location. +- OpenClaw MCP support is validation-gated rather than assumed. + +## Acceptance Criteria + +### Functional Requirements + +- [x] `sync --target` accepts `windsurf`, `kiro`, and `qwen`, in addition to the existing targets. +- [x] `sync --target droid` writes MCP servers to Factory's documented `mcp.json` format instead of remaining skills-only. +- [x] `sync --target codex` syncs both stdio and remote MCP servers. +- [x] `sync --target all` detects only sync-capable supported tools and includes the new targets. +- [x] Claude personal skills continue to be symlinked, not copied. +- [x] Existing destination config keys unrelated to MCP are preserved during merge. +- [x] Existing same-named MCP entries are refreshed from Claude for sync-managed targets. +- [x] Unsafe skill names are skipped without deleting user content. +- [x] If OpenClaw MCP sync is not validated, the CLI warns and skips MCP sync for OpenClaw instead of writing an invented format. + +### Non-Functional Requirements + +- [x] MCP config files that may contain secrets are written with restrictive permissions where supported. +- [x] Corrupt destination config files warn and recover cleanly. +- [x] New sync code does not duplicate target detection metadata in multiple places. +- [x] Remote transport mapping is explicit and tested, especially for Qwen and Codex. + +### Quality Gates + +- [x] Add target-level sync tests for every new or upgraded provider. +- [x] Update `tests/detect-tools.test.ts` for new detection rules and remove stale cursor expectations. +- [x] Add or expand CLI coverage for `sync --target all`. +- [x] `bun test` passes. + +## Testing Plan + +### Unit / integration tests + +Add or expand: + +- `tests/sync-codex.test.ts` + - remote URL server is emitted + - existing non-managed TOML content is preserved +- `tests/sync-droid.test.ts` + - writes `mcp.json` + - merges with existing file +- `tests/sync-windsurf.test.ts` + - writes `mcp_config.json` + - merges existing servers + - preserves HTTP/SSE fields +- `tests/sync-kiro.test.ts` + - writes `settings/mcp.json` + - supports user-scope root + - preserves remote servers +- `tests/sync-qwen.test.ts` + - writes `settings.json` + - maps remote servers to `httpUrl` + - emits legacy SSE only when explicitly indicated +- `tests/sync-openclaw.test.ts` if implemented + - skills path + - MCP behavior or explicit skip warning + +### CLI tests + +Expand `tests/cli.test.ts` or add focused sync CLI coverage for: + +- `sync --target windsurf` +- `sync --target kiro` +- `sync --target qwen` +- `sync --target all` with detected new tool homes +- `sync --target all` no longer surfacing unsupported `cursor` + +## Risks and Mitigations + +### Risk: local specs are stale relative to current provider docs + +Impact: + +- implementing from local docs alone would produce incorrect paths and transport support. + +Mitigation: + +- treat official 2025-2026 docs/blog posts as source of truth where they supersede local specs +- update any obviously stale repo docs touched by this feature + +### Risk: transport ambiguity for remote MCP servers + +Impact: + +- a Claude `url` may map incorrectly for targets that distinguish HTTP vs SSE. + +Mitigation: + +- prefer HTTP where the target recommends it +- only emit legacy SSE when Claude transport is explicit +- warn when mapping is lossy + +### Risk: OpenClaw MCP surface is not sufficiently documented + +Impact: + +- writing a guessed MCP config could create a broken or misleading feature. + +Mitigation: + +- validation gate during implementation +- if validation fails, ship OpenClaw skills sync only and document MCP as a follow-up + +### Risk: `sync --target all` remains easy to drift out of sync again + +Impact: + +- future providers get added to install/convert but missed by sync. + +Mitigation: + +- derive sync valid targets and detection from a shared registry +- add tests that assert detection and sync target lists match expected supported names + +## Alternative Approaches Considered + +### 1. Just add more cases to `sync.ts` + +Rejected: + +- this is exactly how the current drift happened. + +### 2. Reuse target writers directly + +Rejected: + +- writers copy directories and emit install bundles; +- sync must symlink skills and only manage personal config subsets. + +### 3. Standardize every sync target on user-level output now + +Rejected for this feature: + +- it would change existing `gemini` and `copilot` behavior and broaden scope into a migration project. + +## Documentation Plan + +- Update `README.md` sync section to list all supported sync targets and call out any exceptions. +- Update sync examples for `windsurf`, `kiro`, and `qwen`. +- If OpenClaw MCP is skipped, document that explicitly. +- If repository specs are corrected during implementation, update `docs/specs/kiro.md` to match official current behavior. + +## Success Metrics + +- `sync --target all` covers the same provider surface users reasonably expect from the current CLI, excluding only targets that lack a validated MCP config contract. +- A Claude config with one stdio server and one remote server syncs correctly to every documented MCP-capable provider. +- No user data is deleted during sync. +- Documentation and CLI help no longer over-promise relative to actual behavior. + +## AI Pairing Notes + +- Treat official provider docs as authoritative over older local notes, especially for Kiro and Qwen transport handling. +- Have a human review any AI-generated MCP mapping code before merge because these config files may contain secrets and lossy transport assumptions are easy to miss. +- When using an implementation agent, keep the work split by target so each provider's config contract can be tested independently. + +## References & Research + +### Internal References + +- `src/commands/sync.ts:15-125` +- `src/utils/detect-tools.ts:11-46` +- `src/parsers/claude-home.ts:11-64` +- `src/sync/codex.ts:7-92` +- `src/sync/droid.ts:6-21` +- `src/targets/windsurf.ts:13-93` +- `src/targets/kiro.ts:5-93` +- `src/targets/openclaw.ts:6-95` +- `src/targets/qwen.ts:5-64` +- `docs/solutions/adding-converter-target-providers.md:20-32` +- `docs/solutions/adding-converter-target-providers.md:208-214` +- `README.md:89-123` + +### External References + +- Factory MCP docs: https://docs.factory.ai/factory-cli/configuration/mcp +- Factory skills docs: https://docs.factory.ai/cli/configuration/skills +- Windsurf MCP docs: https://docs.windsurf.com/windsurf/cascade/mcp +- Kiro MCP overview: https://kiro.dev/blog/unlock-your-development-productivity-with-kiro-and-mcp/ +- Kiro remote MCP support: https://kiro.dev/blog/introducing-remote-mcp/ +- Kiro skills announcement: https://kiro.dev/blog/custom-subagents-skills-and-enterprise-controls/ +- Qwen settings docs: https://qwenlm.github.io/qwen-code-docs/en/users/configuration/settings/ +- Qwen MCP docs: https://qwenlm.github.io/qwen-code-docs/en/users/features/mcp/ +- Qwen skills docs: https://qwenlm.github.io/qwen-code-docs/zh/users/features/skills/ +- OpenClaw setup/config docs: https://docs.openclaw.ai/start/setup +- OpenClaw skills docs: https://docs.openclaw.ai/skills + +## Implementation Notes for the Follow-Up `/workflows-work` Step + +Suggested implementation order: + +1. registry + detection cleanup +2. codex remote MCP + droid MCP +3. windsurf + kiro + qwen sync modules +4. openclaw validation and implementation or explicit warning path +5. docs + tests diff --git a/docs/plans/feature_opencode-commands-as-md-and-config-merge.md b/docs/plans/feature_opencode-commands-as-md-and-config-merge.md new file mode 100644 index 0000000..f5e4a67 --- /dev/null +++ b/docs/plans/feature_opencode-commands-as-md-and-config-merge.md @@ -0,0 +1,574 @@ +# Feature: OpenCode Commands as .md Files, Config Merge, and Permissions Default Fix + +**Type:** feature + bug fix (consolidated) +**Date:** 2026-02-20 +**Starting point:** Branch `main` at commit `174cd4c` +**Create feature branch:** `feature/opencode-commands-md-merge-permissions` +**Baseline tests:** 180 pass, 0 fail (run `bun test` to confirm before starting) + +--- + +## Context + +### User-Facing Goal + +When running `bunx @every-env/compound-plugin install compound-engineering --to opencode`, three problems exist: + +1. **Commands overwrite `opencode.json`**: Plugin commands are written into the `command` key of `opencode.json`, which replaces the user's existing configuration file (the writer does `writeJson(configPath, bundle.config)` — a full overwrite). The user loses their personal settings (model, theme, provider keys, MCP servers they previously configured). + +2. **Commands should be `.md` files, not JSON**: OpenCode supports defining commands as individual `.md` files in `~/.config/opencode/commands/`. This is additive and non-destructive — one file per command, never touches `opencode.json`. + +3. **`--permissions broad` is the default and pollutes global config**: The `--permissions` flag defaults to `"broad"`, which writes 14 `permission: allow` entries and 14 `tools: true` entries into `opencode.json` on every install. These are global settings that affect ALL OpenCode sessions, not just plugin commands. Even `--permissions from-commands` is semantically wrong — it unions per-command `allowedTools` restrictions into a single global block, which inverts restriction semantics (a command allowing only `Read` gets merged with one allowing `Bash`, producing global `bash: allow`). + +### Expected Behavior After This Plan + +- Commands are written as `~/.config/opencode/commands/.md` with YAML frontmatter (`description`, `model`). The `command` key is never written to `opencode.json`. +- `opencode.json` is deep-merged (not overwritten): existing user keys survive, plugin's MCP servers are added. User values win on conflict. +- `--permissions` defaults to `"none"` — no `permission` or `tools` entries are written to `opencode.json` unless the user explicitly passes `--permissions broad` or `--permissions from-commands`. + +### Relevant File Paths + +| File | Current State on `main` | What Changes | +|---|---|---| +| `src/types/opencode.ts` | `OpenCodeBundle` has no `commandFiles` field. Has `OpenCodeCommandConfig` type and `command` field on `OpenCodeConfig`. | Add `OpenCodeCommandFile` type. Add `commandFiles` to `OpenCodeBundle`. Remove `OpenCodeCommandConfig` type and `command` field from `OpenCodeConfig`. | +| `src/converters/claude-to-opencode.ts` | `convertCommands()` returns `Record`. Result set on `config.command`. `applyPermissions()` writes `config.permission` and `config.tools`. | `convertCommands()` returns `OpenCodeCommandFile[]`. `config.command` is never set. No changes to `applyPermissions()` itself. | +| `src/targets/opencode.ts` | `writeOpenCodeBundle()` does `writeJson(configPath, bundle.config)` — full overwrite. No `commandsDir`. No merge logic. | Add `commandsDir` to path resolver. Write command `.md` files with backup. Replace overwrite with `mergeOpenCodeConfig()` — read existing, deep-merge, write back. | +| `src/commands/install.ts` | `--permissions` default is `"broad"` (line 51). | Change default to `"none"`. Update description string. | +| `src/utils/files.ts` | Has `readJson()`, `pathExists()`, `backupFile()` already. | No changes needed — utilities already exist. | +| `tests/converter.test.ts` | Tests reference `bundle.config.command` (lines 19, 74, 202-214, 243). Test `"maps commands, permissions, and agents"` tests `from-commands` mode. | Update all to use `bundle.commandFiles`. Rename permission-related test to clarify opt-in nature. | +| `tests/opencode-writer.test.ts` | 4 tests, none have `commandFiles` in bundles. `"backs up existing opencode.json before overwriting"` test expects full overwrite. | Add `commandFiles: []` to all existing bundles. Rewrite backup test to test merge behavior. Add new tests for command file writing and merge. | +| `tests/cli.test.ts` | 10 tests. None check for commands directory. | Add test for `--permissions none` default. Add test for command `.md` file existence. | +| `AGENTS.md` | Line 10: "Keep OpenCode output at `opencode.json` and `.opencode/{agents,skills,plugins}`." | Update to document commands go to `commands/.md`, `opencode.json` is deep-merged. | +| `README.md` | Line 54: "OpenCode output is written to `~/.config/opencode` by default, with `opencode.json` at the root..." | Update to document `.md` command files, merge behavior, `--permissions` default. | + +### Prior Context (Pre-Investigation) + +- **No `docs/decisions/` directory on `main`**: ADRs will be created fresh during this plan. +- **No prior plans touch the same area**: The `2026-02-08-feat-convert-local-md-settings-for-opencode-codex-plan.md` discusses path rewriting in command bodies but does not touch command output format or permissions. +- **OpenCode docs (confirmed via context7 MCP, library `/sst/opencode`):** + - Command `.md` frontmatter supports: `description`, `agent`, `model`. Does NOT support `permission` or `tools`. Placed in `~/.config/opencode/commands/` (global) or `.opencode/commands/` (project). + - Agent `.md` frontmatter supports: `description`, `mode`, `model`, `temperature`, `tools`, `permission`. Placed in `~/.config/opencode/agents/` or `.opencode/agents/`. + - `opencode.json` is the only place for: `mcp`, global `permission`, global `tools`, `model`, `provider`, `theme`, `server`, `compaction`, `watcher`, `share`. + +### Rejected Approaches + +**1. Map `allowedTools` to per-agent `.md` frontmatter permissions.** +Rejected: Claude commands are not agents. There is no per-command-to-per-agent mapping. Commands don't specify which agent to run with. Even if they did, the union of multiple commands' restrictions onto a single agent's permissions loses the per-command scoping. Agent `.md` files DO support `permission` in frontmatter, but this would require creating synthetic agents just to hold permissions — misleading and fragile. + +**2. Write permissions into command `.md` file frontmatter.** +Rejected: OpenCode command `.md` files only support `description`, `agent`, `model` in frontmatter. There is no `permission` or `tools` key. Confirmed via context7 docs. Anything else is silently ignored. + +**3. Keep `from-commands` as the default but fix the flattening logic.** +Rejected: There is no correct way to flatten per-command tool restrictions into a single global permission block. Any flattening loses information and inverts semantics. + +**4. Remove the `--permissions` flag entirely.** +Rejected: Some users may want to write permissions to `opencode.json` as a convenience. Keeping the flag with a changed default preserves optionality. + +**5. Write commands as both `.md` files AND in `opencode.json` `command` block.** +Rejected: Redundant and defeats the purpose of avoiding `opencode.json` pollution. `.md` files are the sole output format. + +--- + +## Decision Record + +### Decision 1: Commands emitted as individual `.md` files, never in `opencode.json` + +- **Decision:** `convertCommands()` returns `OpenCodeCommandFile[]` (one `.md` file per command with YAML frontmatter). The `command` key is never set on `OpenCodeConfig`. The writer creates `/.md` for each file. +- **Context:** OpenCode supports two equivalent formats for commands — JSON in config and `.md` files. The `.md` format is additive (new files) rather than destructive (rewriting JSON). This is consistent with how agents and skills are already handled as `.md` files. +- **Alternatives rejected:** JSON-only (destructive), both formats (redundant). See Rejected Approaches above. +- **Assumptions:** OpenCode resolves commands from the `commands/` directory at runtime. Confirmed via docs. +- **Reversal trigger:** If OpenCode deprecates `.md` command files or the format changes incompatibly. + +### Decision 2: `opencode.json` deep-merged, not overwritten + +- **Decision:** `writeOpenCodeBundle()` reads the existing `opencode.json` (if present), deep-merges plugin-provided keys (MCP servers, and optionally permission/tools if `--permissions` is not `none`) without overwriting user-set values, and writes the merged result. User keys always win on conflict. +- **Context:** Users have personal configuration in `opencode.json` (API keys, model preferences, themes, existing MCP servers). The current full-overwrite destroys all of this. +- **Alternatives rejected:** Skip writing `opencode.json` entirely — rejected because MCP servers must be written there (no `.md` alternative exists for MCP). +- **Assumptions:** `readJson()` and `pathExists()` already exist in `src/utils/files.ts`. Malformed JSON in existing file should warn and fall back to plugin-only config (do not crash, do not destroy). +- **Reversal trigger:** If OpenCode adds a separate mechanism for plugin MCP server registration that doesn't involve `opencode.json`. + +### Decision 3: `--permissions` default changed from `"broad"` to `"none"` + +- **Decision:** The `--permissions` CLI flag default changes from `"broad"` to `"none"`. No `permission` or `tools` keys are written to `opencode.json` unless the user explicitly opts in. +- **Context:** `"broad"` silently writes 14 global tool permissions. `"from-commands"` has a semantic inversion bug (unions per-command restrictions into global allows). Both are destructive to user config. `applyPermissions()` already short-circuits on `"none"` (line 299: `if (mode === "none") return`), so no changes to that function are needed. +- **Alternatives rejected:** Fix `from-commands` flattening — impossible to do correctly with global-only target. Remove flag entirely — too restrictive for power users. +- **Assumptions:** The `applyPermissions()` function with mode `"none"` leaves `config.permission` and `config.tools` as `undefined`. +- **Reversal trigger:** If OpenCode adds per-command permission scoping, `from-commands` could become meaningful again. + +--- + +## ADRs To Create + +Create `docs/decisions/` directory (does not exist on `main`). ADRs follow `AGENTS.md` numbering convention: `0001-short-title.md`. + +### ADR 0001: OpenCode commands written as `.md` files, not in `opencode.json` + +- **Context:** OpenCode supports two equivalent formats for custom commands. Writing to `opencode.json` requires overwriting or merging the user's config file. Writing `.md` files is additive and non-destructive. +- **Decision:** The OpenCode target always emits commands as individual `.md` files in the `commands/` subdirectory. The `command` key is never written to `opencode.json` by this tool. +- **Consequences:** + - Positive: Installs are non-destructive. Commands are visible as individual files, easy to inspect. Consistent with agents/skills handling. + - Negative: Users inspecting `opencode.json` won't see plugin commands; they must look in `commands/`. + - Neutral: Requires OpenCode >= the version with command file support (confirmed stable). + +### ADR 0002: Plugin merges into existing `opencode.json` rather than replacing it + +- **Context:** Users have existing `opencode.json` files with personal configuration. The install command previously backed up and replaced this file entirely, destroying user settings. +- **Decision:** `writeOpenCodeBundle` reads existing `opencode.json` (if present), deep-merges plugin-provided keys without overwriting user-set values, and writes the merged result. User keys always win on conflict. +- **Consequences:** + - Positive: User config preserved across installs. Re-installs are idempotent for user-set values. + - Negative: Plugin cannot remove or update an MCP server entry if the user already has one with the same name. + - Neutral: Backup of pre-merge file is still created for safety. + +### ADR 0003: Global permissions not written to `opencode.json` by default + +- **Context:** Claude commands carry `allowedTools` as per-command restrictions. OpenCode has no per-command permission mechanism. Writing per-command restrictions as global permissions is semantically incorrect and pollutes the user's global config. +- **Decision:** `--permissions` defaults to `"none"`. The plugin never writes `permission` or `tools` to `opencode.json` unless the user explicitly passes `--permissions broad` or `--permissions from-commands`. +- **Consequences:** + - Positive: User's global OpenCode permissions are never silently modified. + - Negative: Users who relied on auto-set permissions must now pass the flag explicitly. + - Neutral: The `"broad"` and `"from-commands"` modes still work as documented for opt-in use. + +--- + +## Assumptions & Invalidation Triggers + +- **Assumption:** OpenCode command `.md` frontmatter supports `description`, `agent`, `model` and does NOT support `permission` or `tools`. + - **If this changes:** The converter could emit per-command permissions in command frontmatter, making `from-commands` mode semantically correct. Phase 2 would need a new code path. + +- **Assumption:** `readJson()` and `pathExists()` exist in `src/utils/files.ts` and work as expected. + - **If this changes:** Phase 4's merge logic needs alternative I/O utilities. + +- **Assumption:** `applyPermissions()` with mode `"none"` returns early at line 299 and does not set `config.permission` or `config.tools`. + - **If this changes:** The merge logic in Phase 4 might still merge stale data. Verify before implementing. + +- **Assumption:** 180 tests pass on `main` at commit `174cd4c` with `bun test`. + - **If this changes:** Do not proceed until the discrepancy is understood. + +- **Assumption:** `formatFrontmatter()` in `src/utils/frontmatter.ts` handles `Record` data and string body, producing valid YAML frontmatter. It filters out `undefined` values (line 35). It already supports nested objects/arrays via `formatYamlLine()`. + - **If this changes:** Phase 2's command file content generation would produce malformed output. + +- **Assumption:** The `backupFile()` function in `src/utils/files.ts` returns `null` if the file does not exist, and returns the backup path if it does. It does NOT throw on missing files. + - **If this changes:** Phase 4's backup-before-write for command files would need error handling. + +--- + +## Phases + +### Phase 1: Add `OpenCodeCommandFile` type and update `OpenCodeBundle` + +**What:** In `src/types/opencode.ts`: +- Add a new type `OpenCodeCommandFile` with `name: string` (command name, used as filename stem) and `content: string` (full file content: YAML frontmatter + body). +- Add `commandFiles: OpenCodeCommandFile[]` field to `OpenCodeBundle`. +- Remove `command?: Record` from `OpenCodeConfig`. +- Remove the `OpenCodeCommandConfig` type entirely (lines 23-28). + +**Why:** This is the foundational type change that all subsequent phases depend on. Commands move from the config object to individual file entries in the bundle. + +**Test first:** + +File: `tests/converter.test.ts` + +Before making any type changes, update the test file to reflect the new shape. The existing tests will fail because they reference `bundle.config.command` and `OpenCodeBundle` doesn't have `commandFiles` yet. + +Tests to modify (they will fail after type changes, then pass after Phase 2): +- `"maps commands, permissions, and agents"` (line 11): Change `bundle.config.command?.["workflows:review"]` to `bundle.commandFiles.find(f => f.name === "workflows:review")`. Change `bundle.config.command?.["plan_review"]` to `bundle.commandFiles.find(f => f.name === "plan_review")`. +- `"normalizes models and infers temperature"` (line 60): Change `bundle.config.command?.["workflows:work"]` to check `bundle.commandFiles.find(f => f.name === "workflows:work")` and parse its frontmatter for model. +- `"excludes commands with disable-model-invocation from command map"` (line 202): Change `bundle.config.command?.["deploy-docs"]` to `bundle.commandFiles.find(f => f.name === "deploy-docs")`. +- `"rewrites .claude/ paths to .opencode/ in command bodies"` (line 217): Change `bundle.config.command?.["review"]?.template` to access `bundle.commandFiles.find(f => f.name === "review")?.content`. + +Also update `tests/opencode-writer.test.ts`: +- Add `commandFiles: []` to every `OpenCodeBundle` literal in all 4 existing tests (lines 20, 43, 67, 98). These bundles currently only have `config`, `agents`, `plugins`, `skillDirs`. + +**Implementation:** + +In `src/types/opencode.ts`: +1. Remove lines 23-28 (`OpenCodeCommandConfig` type). +2. Remove line 10 (`command?: Record`) from `OpenCodeConfig`. +3. Add after line 47: +```typescript +export type OpenCodeCommandFile = { + name: string // command name, used as the filename stem: .md + content: string // full file content: YAML frontmatter + body +} +``` +4. Add `commandFiles: OpenCodeCommandFile[]` to `OpenCodeBundle` (between `agents` and `plugins`). + +In `src/converters/claude-to-opencode.ts`: +- Update the import on line 11: Remove `OpenCodeCommandConfig` from the import. Add `OpenCodeCommandFile`. + +**Code comments required:** +- Above the `commandFiles` field in `OpenCodeBundle`: `// Commands are written as individual .md files, not in opencode.json. See ADR-001.` + +**Verification:** `bun test` will show failures in converter tests (they reference the old command format). This is expected — Phase 2 fixes them. + +--- + +### Phase 2: Convert `convertCommands()` to emit `.md` command files + +**What:** In `src/converters/claude-to-opencode.ts`: +- Rewrite `convertCommands()` (line 114) to return `OpenCodeCommandFile[]` instead of `Record`. +- Each command becomes a `.md` file with YAML frontmatter (`description`, optionally `model`) and body (the template text with Claude path rewriting applied). +- In `convertClaudeToOpenCode()` (line 64): replace `commandMap` with `commandFiles`. Remove `config.command` assignment. Add `commandFiles` to returned bundle. + +**Why:** This is the core conversion logic change that implements ADR-001. + +**Test first:** + +File: `tests/converter.test.ts` + +The tests were already updated in Phase 1 to reference `bundle.commandFiles`. Now they need to pass. Specific assertions: + +1. Rename `"maps commands, permissions, and agents"` to `"from-commands mode: maps allowedTools to global permission block"` — to clarify this tests an opt-in mode, not the default. + - Assert `bundle.config.command` is `undefined` (it no longer exists on the type, but accessing it returns `undefined`). + - Assert `bundle.commandFiles.find(f => f.name === "workflows:review")` is defined. + - Assert `bundle.commandFiles.find(f => f.name === "plan_review")` is defined. + - Permission assertions remain unchanged (they test `from-commands` mode explicitly). + +2. `"normalizes models and infers temperature"`: + - Find `workflows:work` in `bundle.commandFiles`, parse its frontmatter with `parseFrontmatter()`, assert `data.model === "openai/gpt-4o"`. + +3. `"excludes commands with disable-model-invocation from command map"` — rename to `"excludes commands with disable-model-invocation from commandFiles"`: + - Assert `bundle.commandFiles.find(f => f.name === "deploy-docs")` is `undefined`. + - Assert `bundle.commandFiles.find(f => f.name === "workflows:review")` is defined. + +4. `"rewrites .claude/ paths to .opencode/ in command bodies"`: + - Find `review` in `bundle.commandFiles`, assert `content` contains `"compound-engineering.local.md"`. + +5. Add NEW test: `"command .md files include description in frontmatter"`: + - Create a minimal `ClaudePlugin` with one command (`name: "test-cmd"`, `description: "Test description"`, `body: "Do the thing"`). + - Convert with `permissions: "none"`. + - Find the command file, parse frontmatter, assert `data.description === "Test description"`. + - Assert the body (after frontmatter) contains `"Do the thing"`. + +**Implementation:** + +In `src/converters/claude-to-opencode.ts`: + +Replace lines 114-128 (`convertCommands` function): +```typescript +// Commands are written as individual .md files rather than entries in opencode.json. +// Chosen over JSON map because opencode resolves commands by filename at runtime (ADR-001). +function convertCommands(commands: ClaudeCommand[]): OpenCodeCommandFile[] { + const files: OpenCodeCommandFile[] = [] + for (const command of commands) { + if (command.disableModelInvocation) continue + const frontmatter: Record = { + description: command.description, + } + if (command.model && command.model !== "inherit") { + frontmatter.model = normalizeModel(command.model) + } + const content = formatFrontmatter(frontmatter, rewriteClaudePaths(command.body)) + files.push({ name: command.name, content }) + } + return files +} +``` + +Replace lines 64-87 (`convertClaudeToOpenCode` function body): +- Change line 69: `const commandFiles = convertCommands(plugin.commands)` +- Change lines 73-77 (config construction): Remove the `command: ...` line. Config should only have `$schema` and `mcp`. +- Change line 81-86 (return): Replace `plugins` in the return with `commandFiles, plugins` (add `commandFiles` field to returned bundle). + +**Code comments required:** +- Above `convertCommands()`: `// Commands are written as individual .md files rather than entries in opencode.json.` and `// Chosen over JSON map because opencode resolves commands by filename at runtime (ADR-001).` + +**Verification:** Run `bun test tests/converter.test.ts`. All converter tests must pass. Then run `bun test` — writer tests should still fail (they expect the old bundle shape; fixed in Phase 1's test updates) but converter tests pass. + +--- + +### Phase 3: Add `commandsDir` to path resolver and write command files + +**What:** In `src/targets/opencode.ts`: +- Add `commandsDir` to the return value of `resolveOpenCodePaths()` for both branches (global and custom output dir). +- In `writeOpenCodeBundle()`, iterate `bundle.commandFiles` and write each as `/.md` with backup-before-overwrite. + +**Why:** This creates the file output mechanism for command `.md` files. Separated from Phase 4 (merge logic) for testability. + +**Test first:** + +File: `tests/opencode-writer.test.ts` + +Add these new tests: + +1. `"writes command files as .md in commands/ directory"`: + - Create a bundle with one `commandFiles` entry: `{ name: "my-cmd", content: "---\ndescription: Test\n---\n\nDo something." }`. + - Use an output root of `path.join(tempRoot, ".config", "opencode")` (global-style). + - Assert `exists(path.join(outputRoot, "commands", "my-cmd.md"))` is true. + - Read the file, assert content matches (with trailing newline: `content + "\n"`). + +2. `"backs up existing command .md file before overwriting"`: + - Pre-create `commands/my-cmd.md` with old content. + - Write a bundle with a `commandFiles` entry for `my-cmd`. + - Assert a `.bak.` file exists in `commands/` directory. + - Assert new content is written. + +**Implementation:** + +In `resolveOpenCodePaths()`: +- In the global branch (line 39-46): Add `commandsDir: path.join(outputRoot, "commands")` with comment: `// .md command files; alternative to the command key in opencode.json` +- In the custom branch (line 49-56): Add `commandsDir: path.join(outputRoot, ".opencode", "commands")` with same comment. + +In `writeOpenCodeBundle()`: +- After the agents loop (line 18), add: +```typescript +const commandsDir = paths.commandsDir +for (const commandFile of bundle.commandFiles) { + const dest = path.join(commandsDir, `${commandFile.name}.md`) + const cmdBackupPath = await backupFile(dest) + if (cmdBackupPath) { + console.log(`Backed up existing command file to ${cmdBackupPath}`) + } + await writeText(dest, commandFile.content + "\n") +} +``` + +**Code comments required:** +- Inline comment on `commandsDir` in both `resolveOpenCodePaths` branches: `// .md command files; alternative to the command key in opencode.json` + +**Verification:** Run `bun test tests/opencode-writer.test.ts`. The two new command file tests must pass. Existing tests must still pass (they have `commandFiles: []` from Phase 1 updates). + +--- + +### Phase 4: Replace config overwrite with deep-merge + +**What:** In `src/targets/opencode.ts`: +- Replace `writeJson(paths.configPath, bundle.config)` (line 13) with a call to a new `mergeOpenCodeConfig()` function. +- `mergeOpenCodeConfig()` reads the existing `opencode.json` (if present), merges plugin-provided keys using user-wins-on-conflict strategy, and returns the merged config. +- Import `pathExists` and `readJson` from `../utils/files` (add to existing import on line 2). + +**Why:** This implements ADR-002 — the user's existing config is preserved across installs. + +**Test first:** + +File: `tests/opencode-writer.test.ts` + +Modify existing test and add new tests: + +1. Rename `"backs up existing opencode.json before overwriting"` (line 88) to `"merges plugin config into existing opencode.json without destroying user keys"`: + - Pre-create `opencode.json` with `{ $schema: "https://opencode.ai/config.json", custom: "value" }`. + - Write a bundle with `config: { $schema: "...", mcp: { "plugin-server": { type: "local", command: "uvx", args: ["plugin-srv"] } } }`. + - Assert merged config has BOTH `custom: "value"` (user key) AND `mcp["plugin-server"]` (plugin key). + - Assert backup file exists with original content. + +2. NEW: `"merges mcp servers without overwriting user entries"`: + - Pre-create `opencode.json` with `{ mcp: { "user-server": { type: "local", command: "uvx", args: ["user-srv"] } } }`. + - Write a bundle with `config.mcp` containing both `"plugin-server"` (new) and `"user-server"` (conflict — different args). + - Assert both servers exist in merged output. + - Assert `user-server` keeps user's original args (user wins on conflict). + - Assert `plugin-server` is present with plugin's args. + +3. NEW: `"preserves unrelated user keys when merging opencode.json"`: + - Pre-create `opencode.json` with `{ model: "my-model", theme: "dark", mcp: {} }`. + - Write a bundle with `config: { $schema: "...", mcp: { "plugin-server": ... }, permission: { "bash": "allow" } }`. + - Assert `model` and `theme` are preserved. + - Assert plugin additions are present. + +**Implementation:** + +Add to imports in `src/targets/opencode.ts` line 2: +```typescript +import { backupFile, copyDir, ensureDir, pathExists, readJson, writeJson, writeText } from "../utils/files" +import type { OpenCodeBundle, OpenCodeConfig } from "../types/opencode" +``` + +Add `mergeOpenCodeConfig()` function: +```typescript +async function mergeOpenCodeConfig( + configPath: string, + incoming: OpenCodeConfig, +): Promise { + // If no existing config, write plugin config as-is + if (!(await pathExists(configPath))) return incoming + + let existing: OpenCodeConfig + try { + existing = await readJson(configPath) + } catch { + // Safety first per AGENTS.md -- do not destroy user data even if their config is malformed. + // Warn and fall back to plugin-only config rather than crashing. + console.warn( + `Warning: existing ${configPath} is not valid JSON. Writing plugin config without merging.` + ) + return incoming + } + + // User config wins on conflict -- see ADR-002 + // MCP servers: add plugin entries, skip keys already in user config. + const mergedMcp = { + ...(incoming.mcp ?? {}), + ...(existing.mcp ?? {}), // existing takes precedence (overwrites same-named plugin entries) + } + + // Permission: add plugin entries, skip keys already in user config. + const mergedPermission = incoming.permission + ? { + ...(incoming.permission), + ...(existing.permission ?? {}), // existing takes precedence + } + : existing.permission + + // Tools: same pattern + const mergedTools = incoming.tools + ? { + ...(incoming.tools), + ...(existing.tools ?? {}), + } + : existing.tools + + return { + ...existing, // all user keys preserved + $schema: incoming.$schema ?? existing.$schema, + mcp: Object.keys(mergedMcp).length > 0 ? mergedMcp : undefined, + permission: mergedPermission, + tools: mergedTools, + } +} +``` + +In `writeOpenCodeBundle()`, replace line 13 (`await writeJson(paths.configPath, bundle.config)`) with: +```typescript +const merged = await mergeOpenCodeConfig(paths.configPath, bundle.config) +await writeJson(paths.configPath, merged) +``` + +**Code comments required:** +- Above `mergeOpenCodeConfig()`: `// Merges plugin config into existing opencode.json. User keys win on conflict. See ADR-002.` +- On the `...(existing.mcp ?? {})` line: `// existing takes precedence (overwrites same-named plugin entries)` +- On malformed JSON catch: `// Safety first per AGENTS.md -- do not destroy user data even if their config is malformed.` + +**Verification:** Run `bun test tests/opencode-writer.test.ts`. All tests must pass including the renamed test and the 2 new merge tests. + +--- + +### Phase 5: Change `--permissions` default to `"none"` + +**What:** In `src/commands/install.ts`, change line 51 `default: "broad"` to `default: "none"`. Update the description string. + +**Why:** This implements ADR-003 — stops polluting user's global config with permissions by default. + +**Test first:** + +File: `tests/cli.test.ts` + +Add these tests: + +1. `"install --to opencode uses permissions:none by default"`: + - Run install with no `--permissions` flag against the fixture plugin. + - Read the written `opencode.json`. + - Assert it does NOT contain a `permission` key. + - Assert it does NOT contain a `tools` key. + +2. `"install --to opencode --permissions broad writes permission block"`: + - Run install with `--permissions broad` against the fixture plugin. + - Read the written `opencode.json`. + - Assert it DOES contain a `permission` key with values. + +**Implementation:** + +In `src/commands/install.ts`: +- Line 51: Change `default: "broad"` to `default: "none"`. +- Line 52: Change description to `"Permission mapping written to opencode.json: none (default) | broad | from-commands"`. + +**Code comments required:** +- On the `default: "none"` line: `// Default is "none" -- writing global permissions to opencode.json pollutes user config. See ADR-003.` + +**Verification:** Run `bun test tests/cli.test.ts`. All CLI tests must pass including the 2 new permission tests. Then run `bun test` — all tests (180 original + new ones) must pass. + +--- + +### Phase 6: Update `AGENTS.md` and `README.md` + +**What:** Update documentation to reflect all three changes. + +**Why:** Keeps docs accurate for future contributors and users. + +**Test first:** No tests required for documentation changes. + +**Implementation:** + +In `AGENTS.md` line 10, replace: +``` +- **Output Paths:** Keep OpenCode output at `opencode.json` and `.opencode/{agents,skills,plugins}`. +``` +with: +``` +- **Output Paths:** Keep OpenCode output at `opencode.json` and `.opencode/{agents,skills,plugins}`. For OpenCode, commands go to `~/.config/opencode/commands/.md`; `opencode.json` is deep-merged (never overwritten wholesale). +``` + +In `README.md` line 54, replace: +``` +OpenCode output is written to `~/.config/opencode` by default, with `opencode.json` at the root and `agents/`, `skills/`, and `plugins/` alongside it. +``` +with: +``` +OpenCode output is written to `~/.config/opencode` by default. Commands are written as individual `.md` files to `~/.config/opencode/commands/.md`. Agents, skills, and plugins are written to the corresponding subdirectories alongside. `opencode.json` (MCP servers) is deep-merged into any existing file -- user keys such as `model`, `theme`, and `provider` are preserved, and user values win on conflicts. Command files are backed up before being overwritten. +``` + +Also update `AGENTS.md` to add a Repository Docs Conventions section if not present: +``` +## Repository Docs Conventions + +- **ADRs** live in `docs/decisions/` and are numbered with 4-digit zero-padding: `0001-short-title.md`, `0002-short-title.md`, etc. +- **Orchestrator run reports** live in `docs/reports/`. + +When recording a significant decision (new provider, output format change, merge strategy), create an ADR in `docs/decisions/` following the numbering sequence. +``` + +**Code comments required:** None. + +**Verification:** Read the updated files and confirm accuracy. Run `bun test` to confirm no regressions. + +--- + +## TDD Enforcement + +The executing agent MUST follow this sequence for every phase that touches source code: + +1. Write the test(s) first in the test file. +2. Run `bun test ` and confirm the new/modified tests FAIL (red). +3. Implement the code change. +4. Run `bun test ` and confirm the new/modified tests PASS (green). +5. Run `bun test` (all tests) and confirm no regressions. + +**Exception:** Phase 6 is documentation only. Run `bun test` after to confirm no regressions but no red/green cycle needed. + +**Note on Phase 1:** Type changes alone will cause test failures. Phase 1 and Phase 2 are tightly coupled — the tests updated in Phase 1 will not pass until Phase 2's implementation is complete. The executing agent should: +1. Update tests in Phase 1 (expect them to fail — both due to type errors and logic changes). +2. Implement type changes in Phase 1. +3. Implement converter changes in Phase 2. +4. Confirm all converter tests pass after Phase 2. + +--- + +## Constraints + +**Do not modify:** +- `src/converters/claude-to-opencode.ts` lines 294-417 (`applyPermissions()`, `normalizeTool()`, `parseToolSpec()`, `normalizePattern()`) — these functions are correct for `"broad"` and `"from-commands"` modes. Only the default that triggers them is changing. +- Any files under `tests/fixtures/` — these are data files, not test logic. +- `src/types/claude.ts` — no changes to source types. +- `src/parsers/claude.ts` — no changes to parser logic. +- `src/utils/files.ts` — all needed utilities already exist. Do not add new utility functions. +- `src/utils/frontmatter.ts` — already handles the needed formatting. + +**Dependencies not to add:** None. No new npm/bun packages. + +**Patterns to follow:** +- Existing writer tests in `tests/opencode-writer.test.ts` use `fs.mkdtemp()` for temp directories and the local `exists()` helper function. +- Existing CLI tests in `tests/cli.test.ts` use `Bun.spawn()` to invoke the CLI. +- Existing converter tests in `tests/converter.test.ts` use `loadClaudePlugin(fixtureRoot)` for real fixtures and inline `ClaudePlugin` objects for isolated tests. +- ADR format: Follow `AGENTS.md` numbering convention `0001-short-title.md` with sections: Status, Date, Context, Decision, Consequences, Plan Reference. +- Commits: Use conventional commit format. Reference ADRs in commit bodies. +- Branch: Create `feature/opencode-commands-md-merge-permissions` from `main`. + +## Final Checklist + +After all phases complete: +- [ ] `bun test` passes all tests (180 original + new ones, 0 fail) +- [ ] `docs/decisions/0001-opencode-command-output-format.md` exists +- [ ] `docs/decisions/0002-opencode-json-merge-strategy.md` exists +- [ ] `docs/decisions/0003-opencode-permissions-default-none.md` exists +- [ ] `opencode.json` is never fully overwritten — merge logic confirmed by test +- [ ] Commands are written as `.md` files — confirmed by test +- [ ] `--permissions` defaults to `"none"` — confirmed by CLI test +- [ ] `AGENTS.md` and `README.md` updated to reflect new behavior diff --git a/docs/solutions/adding-converter-target-providers.md b/docs/solutions/adding-converter-target-providers.md new file mode 100644 index 0000000..cccda03 --- /dev/null +++ b/docs/solutions/adding-converter-target-providers.md @@ -0,0 +1,693 @@ +--- +title: Adding New Converter Target Providers +category: architecture +tags: [converter, target-provider, plugin-conversion, multi-platform, pattern] +created: 2026-02-23 +severity: medium +component: converter-cli +problem_type: best_practice +root_cause: architectural_pattern +--- + +# Adding New Converter Target Providers + +## Problem + +When adding support for a new AI platform (e.g., Devin, Cursor, Copilot), the converter CLI architecture requires consistent implementation across types, converters, writers, CLI integration, and tests. Without documented patterns and learnings, new targets take longer to implement and risk architectural inconsistency. + +## Solution + +The compound-engineering-plugin uses a proven **6-phase target provider pattern** that has been successfully applied to 8 targets: + +1. **OpenCode** (primary target, reference implementation) +2. **Codex** (second target, established pattern) +3. **Droid/Factory** (workflow/agent conversion) +4. **Pi** (MCPorter ecosystem) +5. **Gemini CLI** (content transformation patterns) +6. **Cursor** (command flattening, rule formats) +7. **Copilot** (GitHub native, MCP prefixing) +8. **Kiro** (limited MCP support) +9. **Devin** (playbook conversion, knowledge entries) + +Each implementation follows this architecture precisely, ensuring consistency and maintainability. + +## Architecture: The 6-Phase Pattern + +### Phase 1: Type Definitions (`src/types/{target}.ts`) + +**Purpose:** Define TypeScript types for the intermediate bundle format + +**Key Pattern:** + +```typescript +// Exported bundle type used by converter and writer +export type {TargetName}Bundle = { + // Component arrays matching the target format + agents?: {TargetName}Agent[] + commands?: {TargetName}Command[] + skillDirs?: {TargetName}SkillDir[] + mcpServers?: Record + // Target-specific fields + setup?: string // Instructions file content +} + +// Individual component types +export type {TargetName}Agent = { + name: string + content: string // Full file content (with frontmatter if applicable) + category?: string // e.g., "agent", "rule", "playbook" + meta?: Record // Target-specific metadata +} +``` + +**Key Learnings:** + +- Always include a `content` field (full file text) rather than decomposed fields — it's simpler and matches how files are written +- Use intermediate types for complex sections (e.g., `DevinPlaybookSections` in Devin converter) to make section building independently testable +- Avoid target-specific fields in the base bundle unless essential — aim for shared structure across targets +- Include a `category` field if the target has file-type variants (agents vs. commands vs. rules) + +**Reference Implementations:** +- OpenCode: `src/types/opencode.ts` (command + agent split) +- Devin: `src/types/devin.ts` (playbooks + knowledge entries) +- Copilot: `src/types/copilot.ts` (agents + skills + MCP) + +--- + +### Phase 2: Converter (`src/converters/claude-to-{target}.ts`) + +**Purpose:** Transform Claude Code plugin format → target-specific bundle format + +**Key Pattern:** + +```typescript +export type ClaudeTo{Target}Options = ClaudeToOpenCodeOptions // Reuse common options + +export function convertClaudeTo{Target}( + plugin: ClaudePlugin, + _options: ClaudeTo{Target}Options, +): {Target}Bundle { + // Pre-scan: build maps for cross-reference resolution (agents, commands) + // Needed if target requires deduplication or reference tracking + const refMap: Record = {} + for (const agent of plugin.agents) { + refMap[normalize(agent.name)] = macroName(agent.name) + } + + // Phase 1: Convert agents + const agents = plugin.agents.map(a => convert{Target}Agent(a, usedNames, refMap)) + + // Phase 2: Convert commands (may depend on agent names for dedup) + const commands = plugin.commands.map(c => convert{Target}Command(c, usedNames, refMap)) + + // Phase 3: Handle skills (usually pass-through, sometimes conversion) + const skillDirs = plugin.skills.map(s => ({ name: s.name, sourceDir: s.sourceDir })) + + // Phase 4: Convert MCP servers (target-specific prefixing/type mapping) + const mcpConfig = convertMcpServers(plugin.mcpServers) + + // Phase 5: Warn on unsupported features + if (plugin.hooks && Object.keys(plugin.hooks.hooks).length > 0) { + console.warn("Warning: {Target} does not support hooks. Hooks were skipped.") + } + + return { agents, commands, skillDirs, mcpConfig } +} +``` + +**Content Transformation (`transformContentFor{Target}`):** + +Applied to both agent bodies and command bodies to rewrite paths, command references, and agent mentions: + +```typescript +export function transformContentFor{Target}(body: string): string { + let result = body + + // 1. Rewrite paths (.claude/ → .github/, ~/.claude/ → ~/.{target}/) + result = result + .replace(/~\/\.claude\//g, `~/.${targetDir}/`) + .replace(/\.claude\//g, `.${targetDir}/`) + + // 2. Transform Task agent calls (to natural language) + const taskPattern = /Task\s+([a-z][a-z0-9-]*)\(([^)]+)\)/gm + result = result.replace(taskPattern, (_match, agentName: string, args: string) => { + const skillName = normalize(agentName) + return `Use the ${skillName} skill to: ${args.trim()}` + }) + + // 3. Flatten slash commands (/workflows:plan → /plan) + const slashPattern = /(? { + if (commandName.includes("/")) return match // Skip file paths + const normalized = normalize(commandName) + return `/${normalized}` + }) + + // 4. Transform @agent-name references + const agentPattern = /@([a-z][a-z0-9-]*-(?:agent|reviewer|analyst|...))/gi + result = result.replace(agentPattern, (_match, agentName: string) => { + return `the ${normalize(agentName)} agent` // or "rule", "playbook", etc. + }) + + // 5. Remove examples (if target doesn't support them) + result = result.replace(/[\s\S]*?<\/examples>/g, "") + + return result +} +``` + +**Deduplication Pattern (`uniqueName`):** + +Used when target has flat namespaces (Cursor, Copilot, Devin) or when name collisions occur: + +```typescript +function uniqueName(base: string, used: Set): string { + if (!used.has(base)) { + used.add(base) + return base + } + let index = 2 + while (used.has(`${base}-${index}`)) { + index += 1 + } + const name = `${base}-${index}` + used.add(name) + return name +} + +function normalizeName(value: string): string { + const trimmed = value.trim() + if (!trimmed) return "item" + const normalized = trimmed + .toLowerCase() + .replace(/[\\/]+/g, "-") + .replace(/[:\s]+/g, "-") + .replace(/[^a-z0-9_-]+/g, "-") + .replace(/-+/g, "-") + .replace(/^-+|-+$/g, "") + return normalized || "item" +} + +// Flatten: drops namespace prefix (workflows:plan → plan) +function flattenCommandName(name: string): string { + const normalized = normalizeName(name) + return normalized.replace(/^[a-z]+-/, "") // Drop prefix before first dash +} +``` + +**Key Learnings:** + +1. **Pre-scan for cross-references** — If target requires reference names (macros, URIs, IDs), build a map before conversion. Example: Devin needs macro names like `agent_kieran_rails_reviewer`, so pre-scan builds the map. + +2. **Content transformation is fragile** — Test extensively. Patterns that work for slash commands might false-match on file paths. Use negative lookahead to skip `/etc`, `/usr`, `/var`, etc. + +3. **Simplify heuristics, trust structural mapping** — Don't try to parse agent body for "You are..." or "NEVER do..." patterns. Instead, map agent.description → Overview, agent.body → Procedure, agent.capabilities → Specifications. Heuristics fail on edge cases and are hard to test. + +4. **Normalize early and consistently** — Use the same `normalizeName()` function throughout. Inconsistent normalization causes deduplication bugs. + +5. **MCP servers need target-specific handling:** + - **OpenCode:** Merge into `opencode.json` (preserve user keys) + - **Copilot:** Prefix env vars with `COPILOT_MCP_`, emit JSON + - **Devin:** Write setup instructions file (config is via web UI) + - **Cursor:** Pass through as-is + +6. **Warn on unsupported features** — Hooks, Gemini extensions, Kiro-incompatible MCP types. Emit to stderr and continue conversion. + +**Reference Implementations:** +- OpenCode: `src/converters/claude-to-opencode.ts` (most comprehensive) +- Devin: `src/converters/claude-to-devin.ts` (content transformation + cross-references) +- Copilot: `src/converters/claude-to-copilot.ts` (MCP prefixing pattern) + +--- + +### Phase 3: Writer (`src/targets/{target}.ts`) + +**Purpose:** Write converted bundle to disk in target-specific directory structure + +**Key Pattern:** + +```typescript +export async function write{Target}Bundle(outputRoot: string, bundle: {Target}Bundle): Promise { + const paths = resolve{Target}Paths(outputRoot) + await ensureDir(paths.root) + + // Write each component type + if (bundle.agents?.length > 0) { + const agentsDir = path.join(paths.root, "agents") + for (const agent of bundle.agents) { + await writeText(path.join(agentsDir, `${agent.name}.ext`), agent.content + "\n") + } + } + + if (bundle.commands?.length > 0) { + const commandsDir = path.join(paths.root, "commands") + for (const command of bundle.commands) { + await writeText(path.join(commandsDir, `${command.name}.ext`), command.content + "\n") + } + } + + // Copy skills (pass-through case) + if (bundle.skillDirs?.length > 0) { + const skillsDir = path.join(paths.root, "skills") + for (const skill of bundle.skillDirs) { + await copyDir(skill.sourceDir, path.join(skillsDir, skill.name)) + } + } + + // Write generated skills (converted from commands) + if (bundle.generatedSkills?.length > 0) { + const skillsDir = path.join(paths.root, "skills") + for (const skill of bundle.generatedSkills) { + await writeText(path.join(skillsDir, skill.name, "SKILL.md"), skill.content + "\n") + } + } + + // Write MCP config (target-specific location and format) + if (bundle.mcpServers && Object.keys(bundle.mcpServers).length > 0) { + const mcpPath = path.join(paths.root, "mcp.json") // or copilot-mcp-config.json, etc. + const backupPath = await backupFile(mcpPath) + if (backupPath) { + console.log(`Backed up existing MCP config to ${backupPath}`) + } + await writeJson(mcpPath, { mcpServers: bundle.mcpServers }) + } + + // Write instructions or setup guides + if (bundle.setupInstructions) { + const setupPath = path.join(paths.root, "setup-instructions.md") + await writeText(setupPath, bundle.setupInstructions + "\n") + } +} + +// Avoid double-nesting (.target/.target/) +function resolve{Target}Paths(outputRoot: string) { + const base = path.basename(outputRoot) + // If already pointing at .target, write directly into it + if (base === ".target") { + return { root: outputRoot } + } + // Otherwise nest under .target + return { root: path.join(outputRoot, ".target") } +} +``` + +**Backup Pattern (MCP configs only):** + +MCP configs are often pre-existing and user-edited. Backup before overwrite: + +```typescript +// From src/utils/files.ts +export async function backupFile(filePath: string): Promise { + if (!existsSync(filePath)) return null + const timestamp = new Date().toISOString().replace(/[:.]/g, "-") + const dirname = path.dirname(filePath) + const basename = path.basename(filePath) + const ext = path.extname(basename) + const name = basename.slice(0, -ext.length) + const backupPath = path.join(dirname, `${name}.${timestamp}${ext}`) + await copyFile(filePath, backupPath) + return backupPath +} +``` + +**Key Learnings:** + +1. **Always check for double-nesting** — If output root is already `.target`, don't nest again. Pattern: + ```typescript + if (path.basename(outputRoot) === ".target") { + return { root: outputRoot } // Write directly + } + return { root: path.join(outputRoot, ".target") } // Nest + ``` + +2. **Use `writeText` and `writeJson` helpers** — These handle directory creation and line endings consistently + +3. **Backup MCP configs before overwriting** — MCP JSON files are often hand-edited. Always backup with timestamp. + +4. **Empty bundles should succeed gracefully** — Don't fail if a component array is empty. Many plugins may have no commands or no skills. + +5. **File extensions matter** — Match target conventions exactly: + - Copilot: `.agent.md` (note the dot) + - Cursor: `.mdc` for rules + - Devin: `.devin.md` for playbooks + - OpenCode: `.md` for commands + +6. **Permissions for sensitive files** — MCP config with API keys should use `0o600`: + ```typescript + await writeJson(mcpPath, config, { mode: 0o600 }) + ``` + +**Reference Implementations:** +- Droid: `src/targets/droid.ts` (simpler pattern, good for learning) +- Copilot: `src/targets/copilot.ts` (double-nesting pattern) +- Devin: `src/targets/devin.ts` (setup instructions file) + +--- + +### Phase 4: CLI Wiring + +**File: `src/targets/index.ts`** + +Register the new target in the global target registry: + +```typescript +import { convertClaudeTo{Target} } from "../converters/claude-to-{target}" +import { write{Target}Bundle } from "./{target}" +import type { {Target}Bundle } from "../types/{target}" + +export const targets: Record> = { + // ... existing targets ... + {target}: { + name: "{target}", + implemented: true, + convert: convertClaudeTo{Target} as TargetHandler<{Target}Bundle>["convert"], + write: write{Target}Bundle as TargetHandler<{Target}Bundle>["write"], + }, +} +``` + +**File: `src/commands/convert.ts` and `src/commands/install.ts`** + +Add output root resolution: + +```typescript +// In resolveTargetOutputRoot() +if (targetName === "{target}") { + return path.join(outputRoot, ".{target}") +} + +// Update --to flag description +const toDescription = "Target format (opencode | codex | droid | cursor | copilot | kiro | {target})" +``` + +--- + +### Phase 5: Sync Support (Optional) + +**File: `src/sync/{target}.ts`** + +If the target supports syncing personal skills and MCP servers: + +```typescript +export async function syncTo{Target}(outputRoot: string): Promise { + const personalSkillsDir = path.join(expandHome("~/.claude/skills")) + const personalSettings = loadSettings(expandHome("~/.claude/settings.json")) + + const skillsDest = path.join(outputRoot, ".{target}", "skills") + await ensureDir(skillsDest) + + // Symlink personal skills + if (existsSync(personalSkillsDir)) { + const skills = readdirSync(personalSkillsDir) + for (const skill of skills) { + if (!isValidSkillName(skill)) continue + const source = path.join(personalSkillsDir, skill) + const dest = path.join(skillsDest, skill) + await forceSymlink(source, dest) + } + } + + // Merge MCP servers if applicable + if (personalSettings.mcpServers) { + const mcpPath = path.join(outputRoot, ".{target}", "mcp.json") + const existing = readJson(mcpPath) || {} + const merged = { + ...existing, + mcpServers: { + ...existing.mcpServers, + ...personalSettings.mcpServers, + }, + } + await writeJson(mcpPath, merged, { mode: 0o600 }) + } +} +``` + +**File: `src/commands/sync.ts`** + +```typescript +// Add to validTargets array +const validTargets = ["opencode", "codex", "droid", "cursor", "pi", "{target}"] as const + +// In resolveOutputRoot() +case "{target}": + return path.join(process.cwd(), ".{target}") + +// In main switch +case "{target}": + await syncTo{Target}(outputRoot) + break +``` + +--- + +### Phase 6: Tests + +**File: `tests/{target}-converter.test.ts`** + +Test converter using inline `ClaudePlugin` fixtures: + +```typescript +describe("convertClaudeTo{Target}", () => { + it("converts agents to {target} format", () => { + const plugin: ClaudePlugin = { + name: "test", + agents: [ + { + name: "test-agent", + description: "Test description", + body: "Test body", + capabilities: ["Cap 1", "Cap 2"], + }, + ], + commands: [], + skills: [], + } + + const bundle = convertClaudeTo{Target}(plugin, {}) + + expect(bundle.agents).toHaveLength(1) + expect(bundle.agents[0].name).toBe("test-agent") + expect(bundle.agents[0].content).toContain("Test description") + }) + + it("normalizes agent names", () => { + const plugin: ClaudePlugin = { + name: "test", + agents: [ + { name: "Test Agent", description: "", body: "", capabilities: [] }, + ], + commands: [], + skills: [], + } + + const bundle = convertClaudeTo{Target}(plugin, {}) + expect(bundle.agents[0].name).toBe("test-agent") + }) + + it("deduplicates colliding names", () => { + const plugin: ClaudePlugin = { + name: "test", + agents: [ + { name: "Agent Name", description: "", body: "", capabilities: [] }, + { name: "Agent Name", description: "", body: "", capabilities: [] }, + ], + commands: [], + skills: [], + } + + const bundle = convertClaudeTo{Target}(plugin, {}) + expect(bundle.agents.map(a => a.name)).toEqual(["agent-name", "agent-name-2"]) + }) + + it("transforms content paths (.claude → .{target})", () => { + const result = transformContentFor{Target}("See ~/.claude/config") + expect(result).toContain("~/.{target}/config") + }) + + it("warns when hooks are present", () => { + const spy = jest.spyOn(console, "warn") + const plugin: ClaudePlugin = { + name: "test", + agents: [], + commands: [], + skills: [], + hooks: { hooks: { "file:save": "test" } }, + } + + convertClaudeTo{Target}(plugin, {}) + expect(spy).toHaveBeenCalledWith(expect.stringContaining("hooks")) + }) +}) +``` + +**File: `tests/{target}-writer.test.ts`** + +Test writer using temp directories (from `tmp` package): + +```typescript +describe("write{Target}Bundle", () => { + it("writes agents to {target} format", async () => { + const tmpDir = await tmp.dir() + const bundle: {Target}Bundle = { + agents: [{ name: "test", content: "# Test\nBody" }], + commands: [], + skillDirs: [], + } + + await write{Target}Bundle(tmpDir.path, bundle) + + const written = readFileSync(path.join(tmpDir.path, ".{target}", "agents", "test.ext"), "utf-8") + expect(written).toContain("# Test") + }) + + it("does not double-nest when output root is .{target}", async () => { + const tmpDir = await tmp.dir() + const targetDir = path.join(tmpDir.path, ".{target}") + await ensureDir(targetDir) + + const bundle: {Target}Bundle = { + agents: [{ name: "test", content: "# Test" }], + commands: [], + skillDirs: [], + } + + await write{Target}Bundle(targetDir, bundle) + + // Should write to targetDir directly, not targetDir/.{target} + const written = path.join(targetDir, "agents", "test.ext") + expect(existsSync(written)).toBe(true) + }) + + it("backs up existing MCP config", async () => { + const tmpDir = await tmp.dir() + const mcpPath = path.join(tmpDir.path, ".{target}", "mcp.json") + await ensureDir(path.dirname(mcpPath)) + await writeJson(mcpPath, { existing: true }) + + const bundle: {Target}Bundle = { + agents: [], + commands: [], + skillDirs: [], + mcpServers: { "test": { command: "test" } }, + } + + await write{Target}Bundle(tmpDir.path, bundle) + + // Backup should exist + const backups = readdirSync(path.dirname(mcpPath)).filter(f => f.includes("mcp") && f.includes("-")) + expect(backups.length).toBeGreaterThan(0) + }) +}) +``` + +**Key Testing Patterns:** + +- Test normalization, deduplication, content transformation separately +- Use inline plugin fixtures (not file-based) +- For writer tests, use temp directories and verify file existence +- Test edge cases: empty names, empty bodies, special characters +- Test error handling: missing files, permission issues + +--- + +## Documentation Requirements + +**File: `docs/specs/{target}.md`** + +Document the target format specification: + +- Last verified date (link to official docs) +- Config file locations (project-level vs. user-level) +- Agent/command/skill format with field descriptions +- MCP configuration structure +- Character limits (if any) +- Example file + +**File: `README.md`** + +Add to supported targets list and include usage examples. + +--- + +## Common Pitfalls and Solutions + +| Pitfall | Solution | +|---------|----------| +| **Double-nesting** (`.cursor/.cursor/`) | Check `path.basename(outputRoot)` before nesting | +| **Inconsistent name normalization** | Use single `normalizeName()` function everywhere | +| **Fragile content transformation** | Test regex patterns against edge cases (file paths, URLs) | +| **Heuristic section extraction fails** | Use structural mapping (description → Overview, body → Procedure) instead | +| **MCP config overwrites user edits** | Always backup with timestamp before overwriting | +| **Skill body not loaded** | Verify `ClaudeSkill` has `skillPath` field for file reading | +| **Missing deduplication** | Build `usedNames` set before conversion, pass to each converter | +| **Unsupported features cause silent loss** | Always warn to stderr (hooks, incompatible MCP types, etc.) | +| **Test isolation failures** | Use unique temp directories per test, clean up afterward | +| **Command namespace collisions after flattening** | Use `uniqueName()` with deduplication, test multiple collisions | + +--- + +## Checklist for Adding a New Target + +Use this checklist when adding a new target provider: + +### Implementation +- [ ] Create `src/types/{target}.ts` with bundle and component types +- [ ] Implement `src/converters/claude-to-{target}.ts` with converter and content transformer +- [ ] Implement `src/targets/{target}.ts` with writer +- [ ] Register target in `src/targets/index.ts` +- [ ] Update `src/commands/convert.ts` (add output root resolution, update help text) +- [ ] Update `src/commands/install.ts` (same as convert.ts) +- [ ] (Optional) Implement `src/sync/{target}.ts` and update `src/commands/sync.ts` + +### Testing +- [ ] Create `tests/{target}-converter.test.ts` with converter tests +- [ ] Create `tests/{target}-writer.test.ts` with writer tests +- [ ] (Optional) Create `tests/sync-{target}.test.ts` with sync tests +- [ ] Run full test suite: `bun test` +- [ ] Manual test: `bun run src/index.ts convert --to {target} ./plugins/compound-engineering` + +### Documentation +- [ ] Create `docs/specs/{target}.md` with format specification +- [ ] Update `README.md` with target in list and usage examples +- [ ] Update `CHANGELOG.md` with new target + +### Version Bumping +- [ ] Use a `feat(...)` conventional commit so semantic-release cuts the next minor root CLI release on `main` +- [ ] Do not hand-start a separate root CLI version line in `package.json`; the root package follows the repo `v*` tags and semantic-release writes that version back after release +- [ ] Update plugin.json description if component counts changed +- [ ] Verify CHANGELOG entry is clear + +--- + +## References + +### Implementation Examples + +**Reference implementations by priority (easiest to hardest):** + +1. **Droid** (`src/targets/droid.ts`, `src/converters/claude-to-droid.ts`) — Simplest pattern, good learning baseline +2. **Copilot** (`src/targets/copilot.ts`, `src/converters/claude-to-copilot.ts`) — MCP prefixing, double-nesting guard +3. **Devin** (`src/converters/claude-to-devin.ts`) — Content transformation, cross-references, intermediate types +4. **OpenCode** (`src/converters/claude-to-opencode.ts`) — Most comprehensive, handles command structure and config merging + +### Key Utilities + +- `src/utils/frontmatter.ts` — `formatFrontmatter()` and `parseFrontmatter()` +- `src/utils/files.ts` — `writeText()`, `writeJson()`, `copyDir()`, `backupFile()`, `ensureDir()` +- `src/utils/resolve-home.ts` — `expandHome()` for `~/.{target}` path resolution + +### Existing Tests + +- `tests/cursor-converter.test.ts` — Comprehensive converter tests +- `tests/copilot-writer.test.ts` — Writer tests with temp directories +- `tests/sync-copilot.test.ts` — Sync pattern with symlinks and config merge + +--- + +## Related Files + +- `/C:/Source/compound-engineering-plugin/.claude-plugin/plugin.json` — Version and component counts +- `/C:/Source/compound-engineering-plugin/CHANGELOG.md` — Recent additions and patterns +- `/C:/Source/compound-engineering-plugin/README.md` — Usage examples for all targets +- `/C:/Source/compound-engineering-plugin/docs/solutions/plugin-versioning-requirements.md` — Checklist for releases diff --git a/docs/solutions/plugin-versioning-requirements.md b/docs/solutions/plugin-versioning-requirements.md index 5122780..aa53984 100644 --- a/docs/solutions/plugin-versioning-requirements.md +++ b/docs/solutions/plugin-versioning-requirements.md @@ -13,22 +13,22 @@ component: plugin-development When making changes to the compound-engineering plugin, documentation can get out of sync with the actual components (agents, commands, skills). This leads to confusion about what's included in each version and makes it difficult to track changes over time. +This document applies to the embedded marketplace plugin metadata, not the root CLI package release version. The root CLI package (`package.json`, root `CHANGELOG.md`, repo `v*` tags) is managed by semantic-release and follows the repository tag line. + ## Solution -**Every change to the plugin MUST include:** +**Routine PRs should not cut plugin releases.** -1. **Version bump in `plugin.json`** - - Follow semantic versioning (semver) - - MAJOR: Breaking changes or major reorganization - - MINOR: New agents, commands, or skills added - - PATCH: Bug fixes, documentation updates, minor improvements +The embedded plugin version is release-owned metadata. The maintainer uses a local slash command to choose the next version and generate release changelog entries after deciding which merged changes ship together. Because multiple PRs may merge before release, contributors should not guess release versions inside individual PRs. -2. **CHANGELOG.md update** - - Add entry under `## [Unreleased]` or new version section - - Use Keep a Changelog format - - Categories: Added, Changed, Deprecated, Removed, Fixed, Security +Contributors should: -3. **README.md verification** +1. **Avoid release bookkeeping in normal PRs** + - Do not manually bump `.claude-plugin/plugin.json` + - Do not manually bump `.claude-plugin/marketplace.json` + - Do not cut release sections in `CHANGELOG.md` + +2. **Keep substantive docs accurate** - Verify component counts match actual files - Verify agent/command/skill tables are accurate - Update descriptions if functionality changed @@ -38,8 +38,9 @@ When making changes to the compound-engineering plugin, documentation can get ou ```markdown Before committing changes to compound-engineering plugin: -- [ ] Version bumped in `.claude-plugin/plugin.json` -- [ ] CHANGELOG.md updated with changes +- [ ] No manual version bump in `.claude-plugin/plugin.json` +- [ ] No manual version bump in `.claude-plugin/marketplace.json` +- [ ] No manual release section added to `CHANGELOG.md` - [ ] README.md component counts verified - [ ] README.md tables updated (if adding/removing/renaming) - [ ] plugin.json description updated (if component counts changed) @@ -47,8 +48,8 @@ Before committing changes to compound-engineering plugin: ## File Locations -- Version: `.claude-plugin/plugin.json` → `"version": "X.Y.Z"` -- Changelog: `CHANGELOG.md` +- Version is release-owned: `.claude-plugin/plugin.json` and `.claude-plugin/marketplace.json` +- Changelog release sections are release-owned: `CHANGELOG.md` - Readme: `README.md` ## Example Workflow @@ -56,11 +57,10 @@ Before committing changes to compound-engineering plugin: When adding a new agent: 1. Create the agent file in `agents/[category]/` -2. Bump version in `plugin.json` (minor version for new agent) -3. Add to CHANGELOG under `### Added` -4. Add row to README agent table -5. Update README component count -6. Update plugin.json description with new counts +2. Update README agent table +3. Update README component count +4. Update plugin metadata description with new counts if needed +5. Leave version selection and release changelog generation to the maintainer's release command ## Prevention @@ -68,10 +68,12 @@ This documentation serves as a reminder. When Claude Code works on this plugin, 1. Check this doc before committing changes 2. Follow the checklist above -3. Never commit partial updates (all three files must be updated together) +3. Do not guess release versions in feature PRs ## Related Files -- `/Users/kieranklaassen/every-marketplace/plugins/compound-engineering/.claude-plugin/plugin.json` -- `/Users/kieranklaassen/every-marketplace/plugins/compound-engineering/CHANGELOG.md` -- `/Users/kieranklaassen/every-marketplace/plugins/compound-engineering/README.md` +- `/Users/kieranklaassen/compound-engineering-plugin/plugins/compound-engineering/.claude-plugin/plugin.json` +- `/Users/kieranklaassen/compound-engineering-plugin/plugins/compound-engineering/CHANGELOG.md` +- `/Users/kieranklaassen/compound-engineering-plugin/plugins/compound-engineering/README.md` +- `/Users/kieranklaassen/compound-engineering-plugin/package.json` +- `/Users/kieranklaassen/compound-engineering-plugin/CHANGELOG.md` diff --git a/docs/specs/copilot.md b/docs/specs/copilot.md new file mode 100644 index 0000000..bee2990 --- /dev/null +++ b/docs/specs/copilot.md @@ -0,0 +1,122 @@ +# GitHub Copilot Spec (Agents, Skills, MCP) + +Last verified: 2026-02-14 + +## Primary sources + +``` +https://docs.github.com/en/copilot/reference/custom-agents-configuration +https://docs.github.com/en/copilot/concepts/agents/about-agent-skills +https://docs.github.com/en/copilot/concepts/agents/coding-agent/mcp-and-coding-agent +``` + +## Config locations + +| Scope | Path | +|-------|------| +| Project agents | `.github/agents/*.agent.md` | +| Project skills | `.github/skills/*/SKILL.md` | +| Project instructions | `.github/copilot-instructions.md` | +| Path-specific instructions | `.github/instructions/*.instructions.md` | +| Project prompts | `.github/prompts/*.prompt.md` | +| Org/enterprise agents | `.github-private/agents/*.agent.md` | +| Personal skills | `~/.copilot/skills/*/SKILL.md` | +| Directory instructions | `AGENTS.md` (nearest ancestor wins) | + +## Agents (.agent.md files) + +- Custom agents are Markdown files with YAML frontmatter stored in `.github/agents/`. +- File extension is `.agent.md` (or `.md`). Filenames may only contain: `.`, `-`, `_`, `a-z`, `A-Z`, `0-9`. +- `description` is the only required frontmatter field. + +### Frontmatter fields + +| Field | Required | Default | Description | +|-------|----------|---------|-------------| +| `name` | No | Derived from filename | Display name | +| `description` | **Yes** | — | What the agent does | +| `tools` | No | `["*"]` | Tool access list. `[]` disables all tools. | +| `target` | No | both | `vscode`, `github-copilot`, or omit for both | +| `infer` | No | `true` | Auto-select based on task context | +| `model` | No | Platform default | AI model (works in IDE, may be ignored on github.com) | +| `mcp-servers` | No | — | MCP config (org/enterprise agents only) | +| `metadata` | No | — | Arbitrary key-value annotations | + +### Character limit + +Agent body content is limited to **30,000 characters**. + +### Tool names + +| Name | Aliases | Purpose | +|------|---------|---------| +| `execute` | `shell`, `Bash` | Run shell commands | +| `read` | `Read` | Read files | +| `edit` | `Edit`, `Write` | Modify files | +| `search` | `Grep`, `Glob` | Search files | +| `agent` | `Task` | Invoke other agents | +| `web` | `WebSearch`, `WebFetch` | Web access | + +## Skills (SKILL.md) + +- Skills follow the open SKILL.md standard (same format as Claude Code and Cursor). +- A skill is a directory containing `SKILL.md` plus optional `scripts/`, `references/`, and `assets/`. +- YAML frontmatter requires `name` and `description` fields. +- Skills are loaded on-demand when Copilot determines relevance. + +### Discovery locations + +| Scope | Path | +|-------|------| +| Project | `.github/skills/*/SKILL.md` | +| Project (Claude-compatible) | `.claude/skills/*/SKILL.md` | +| Project (auto-discovery) | `.agents/skills/*/SKILL.md` | +| Personal | `~/.copilot/skills/*/SKILL.md` | + +## MCP (Model Context Protocol) + +- MCP configuration is set via **Repository Settings > Copilot > Coding agent > MCP configuration** on GitHub. +- Repository-level agents **cannot** define MCP servers inline; use repository settings instead. +- Org/enterprise agents can embed MCP server definitions in frontmatter. +- All env var names must use the `COPILOT_MCP_` prefix. +- Only MCP tools are supported (not resources or prompts). + +### Config structure + +```json +{ + "mcpServers": { + "server-name": { + "type": "local", + "command": "npx", + "args": ["package"], + "tools": ["*"], + "env": { + "API_KEY": "COPILOT_MCP_API_KEY" + } + } + } +} +``` + +### Server types + +| Type | Fields | +|------|--------| +| Local/stdio | `type: "local"`, `command`, `args`, `tools`, `env` | +| Remote/SSE | `type: "sse"`, `url`, `tools`, `headers` | + +## Prompts (.prompt.md) + +- Reusable prompt files stored in `.github/prompts/`. +- Available in VS Code, Visual Studio, and JetBrains IDEs only (not on github.com). +- Invoked via `/promptname` in chat. +- Support variable syntax: `${input:name}`, `${file}`, `${selection}`. + +## Precedence + +1. Repository-level agents +2. Organization-level agents (`.github-private`) +3. Enterprise-level agents (`.github-private`) + +Within a repo, `AGENTS.md` files in directories provide nearest-ancestor-wins instructions. diff --git a/docs/specs/kiro.md b/docs/specs/kiro.md new file mode 100644 index 0000000..056be0d --- /dev/null +++ b/docs/specs/kiro.md @@ -0,0 +1,171 @@ +# Kiro CLI Spec (Custom Agents, Skills, Steering, MCP, Settings) + +Last verified: 2026-02-17 + +## Primary sources + +``` +https://kiro.dev/docs/cli/ +https://kiro.dev/docs/cli/custom-agents/configuration-reference/ +https://kiro.dev/docs/cli/skills/ +https://kiro.dev/docs/cli/steering/ +https://kiro.dev/docs/cli/mcp/ +https://kiro.dev/docs/cli/hooks/ +https://agentskills.io +``` + +## Config locations + +- Project-level config: `.kiro/` directory at project root. +- No global/user-level config directory — all config is project-scoped. + +## Directory structure + +``` +.kiro/ +├── agents/ +│ ├── .json # Agent configuration +│ └── prompts/ +│ └── .md # Agent prompt files +├── skills/ +│ └── / +│ └── SKILL.md # Skill definition +├── steering/ +│ └── .md # Always-on context files +└── settings/ + └── mcp.json # MCP server configuration +``` + +## Custom agents (JSON config + prompt files) + +- Custom agents are JSON files in `.kiro/agents/`. +- Each agent has a corresponding prompt `.md` file, referenced via `file://` URI. +- Agent config has 14 possible fields (see below). +- Agents are activated by user selection (no auto-activation). +- The converter outputs a subset of fields relevant to converted plugins. + +### Agent config fields + +| Field | Type | Used in conversion | Notes | +|---|---|---|---| +| `name` | string | Yes | Agent display name | +| `description` | string | Yes | Human-readable description | +| `prompt` | string or `file://` URI | Yes | System prompt or file reference | +| `tools` | string[] | Yes (`["*"]`) | Available tools | +| `resources` | string[] | Yes | `file://`, `skill://`, `knowledgeBase` URIs | +| `includeMcpJson` | boolean | Yes (`true`) | Inherit project MCP servers | +| `welcomeMessage` | string | Yes | Agent switch greeting | +| `mcpServers` | object | No | Per-agent MCP config (use includeMcpJson instead) | +| `toolAliases` | Record | No | Tool name remapping | +| `allowedTools` | string[] | No | Auto-approve patterns | +| `toolsSettings` | object | No | Per-tool configuration | +| `hooks` | object | No (future work) | 5 trigger types | +| `model` | string | No | Model selection | +| `keyboardShortcut` | string | No | Quick-switch shortcut | + +### Example agent config + +```json +{ + "name": "security-reviewer", + "description": "Reviews code for security vulnerabilities", + "prompt": "file://./prompts/security-reviewer.md", + "tools": ["*"], + "resources": [ + "file://.kiro/steering/**/*.md", + "skill://.kiro/skills/**/SKILL.md" + ], + "includeMcpJson": true, + "welcomeMessage": "Switching to security-reviewer. Reviews code for security vulnerabilities" +} +``` + +## Skills (SKILL.md standard) + +- Skills follow the open [Agent Skills](https://agentskills.io) standard. +- A skill is a folder containing `SKILL.md` plus optional supporting files. +- Skills live in `.kiro/skills/`. +- `SKILL.md` uses YAML frontmatter with `name` and `description` fields. +- Kiro activates skills on demand based on description matching. +- The `description` field is critical — Kiro uses it to decide when to activate the skill. + +### Constraints + +- Skill name: max 64 characters, pattern `^[a-z][a-z0-9-]*$`, no consecutive hyphens (`--`). +- Skill description: max 1024 characters. +- Skill name must match parent directory name. + +### Example + +```yaml +--- +name: workflows-plan +description: Plan work by analyzing requirements and creating actionable steps +--- + +# Planning Workflow + +Detailed instructions... +``` + +## Steering files + +- Markdown files in `.kiro/steering/`. +- Always loaded into every agent session's context. +- Equivalent to Claude Code's CLAUDE.md. +- Used for project-wide instructions, coding standards, and conventions. + +## MCP server configuration + +- MCP servers are configured in `.kiro/settings/mcp.json`. +- **Only stdio transport is supported** — `command` + `args` + `env`. +- HTTP/SSE transport (`url`, `headers`) is NOT supported by Kiro CLI. +- The converter skips HTTP-only MCP servers with a warning. + +### Example + +```json +{ + "mcpServers": { + "playwright": { + "command": "npx", + "args": ["-y", "@anthropic/mcp-playwright"] + }, + "context7": { + "command": "npx", + "args": ["-y", "@context7/mcp-server"] + } + } +} +``` + +## Hooks + +- Kiro supports 5 hook trigger types: `agentSpawn`, `userPromptSubmit`, `preToolUse`, `postToolUse`, `stop`. +- Hooks are configured inside agent JSON configs (not separate files). +- 3 of 5 triggers map to Claude Code hooks (`preToolUse`, `postToolUse`, `stop`). +- Not converted by the plugin converter for MVP — a warning is emitted. + +## Conversion lossy mappings + +| Claude Code Feature | Kiro Status | Notes | +|---|---|---| +| `Edit` tool (surgical replacement) | Degraded -> `write` (full-file) | Kiro write overwrites entire files | +| `context: fork` | Lost | No execution isolation control | +| `!`command`` dynamic injection | Lost | No pre-processing of markdown | +| `disable-model-invocation` | Lost | No invocation control | +| `allowed-tools` per skill | Lost | No tool permission scoping per skill | +| `$ARGUMENTS` interpolation | Lost | No structured argument passing | +| Claude hooks | Skipped | Future follow-up (near-1:1 for 3/5 triggers) | +| HTTP MCP servers | Skipped | Kiro only supports stdio transport | + +## Overwrite behavior during conversion + +| Content Type | Strategy | Rationale | +|---|---|---| +| Generated agents (JSON + prompt) | Overwrite | Generated, not user-authored | +| Generated skills (from commands) | Overwrite | Generated, not user-authored | +| Copied skills (pass-through) | Overwrite | Plugin is source of truth | +| Steering files | Overwrite | Generated from CLAUDE.md | +| `mcp.json` | Merge with backup | User may have added their own servers | +| User-created agents/skills | Preserved | Don't delete orphans | diff --git a/docs/specs/windsurf.md b/docs/specs/windsurf.md new file mode 100644 index 0000000..a895b52 --- /dev/null +++ b/docs/specs/windsurf.md @@ -0,0 +1,477 @@ +# Windsurf Editor Global Configuration Guide + +> **Purpose**: Technical reference for programmatically creating and managing Windsurf's global Skills, Workflows, and Rules. +> +> **Source**: Official Windsurf documentation at [docs.windsurf.com](https://docs.windsurf.com) + local file analysis. +> +> **Last Updated**: February 2026 + +--- + +## Table of Contents + +1. [Overview](#overview) +2. [Base Directory Structure](#base-directory-structure) +3. [Skills](#skills) +4. [Workflows](#workflows) +5. [Rules](#rules) +6. [Memories](#memories) +7. [System-Level Configuration (Enterprise)](#system-level-configuration-enterprise) +8. [Programmatic Creation Reference](#programmatic-creation-reference) +9. [Best Practices](#best-practices) + +--- + +## Overview + +Windsurf provides three main customization mechanisms: + +| Feature | Purpose | Invocation | +|---------|---------|------------| +| **Skills** | Complex multi-step tasks with supporting resources | Automatic (progressive disclosure) or `@skill-name` | +| **Workflows** | Reusable step-by-step procedures | Slash command `/workflow-name` | +| **Rules** | Behavioral guidelines and preferences | Trigger-based (always-on, glob, manual, or model decision) | + +All three support both **workspace-level** (project-specific) and **global** (user-wide) scopes. + +--- + +## Base Directory Structure + +### Global Configuration Root + +| OS | Path | +|----|------| +| **Windows** | `C:\Users\{USERNAME}\.codeium\windsurf\` | +| **macOS** | `~/.codeium/windsurf/` | +| **Linux** | `~/.codeium/windsurf/` | + +### Directory Layout + +``` +~/.codeium/windsurf/ +├── skills/ # Global skills (directories) +│ └── {skill-name}/ +│ └── SKILL.md +├── global_workflows/ # Global workflows (flat .md files) +│ └── {workflow-name}.md +├── rules/ # Global rules (flat .md files) +│ └── {rule-name}.md +├── memories/ +│ ├── global_rules.md # Always-on global rules (plain text) +│ └── *.pb # Auto-generated memories (protobuf) +├── mcp_config.json # MCP server configuration +└── user_settings.pb # User settings (protobuf) +``` + +--- + +## Skills + +Skills bundle instructions with supporting resources for complex, multi-step tasks. Cascade uses **progressive disclosure** to automatically invoke skills when relevant. + +### Storage Locations + +| Scope | Location | +|-------|----------| +| **Global** | `~/.codeium/windsurf/skills/{skill-name}/SKILL.md` | +| **Workspace** | `.windsurf/skills/{skill-name}/SKILL.md` | + +### Directory Structure + +Each skill is a **directory** (not a single file) containing: + +``` +{skill-name}/ +├── SKILL.md # Required: Main skill definition +├── references/ # Optional: Reference documentation +├── assets/ # Optional: Images, diagrams, etc. +├── scripts/ # Optional: Helper scripts +└── {any-other-files} # Optional: Templates, configs, etc. +``` + +### SKILL.md Format + +```markdown +--- +name: skill-name +description: Brief description shown to model to help it decide when to invoke the skill +--- + +# Skill Title + +Instructions for the skill go here in markdown format. + +## Section 1 +Step-by-step guidance... + +## Section 2 +Reference supporting files using relative paths: +- See [deployment-checklist.md](./deployment-checklist.md) +- Run script: [deploy.sh](./scripts/deploy.sh) +``` + +### Required YAML Frontmatter Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `name` | **Yes** | Unique identifier (lowercase letters, numbers, hyphens only). Must match directory name. | +| `description` | **Yes** | Explains what the skill does and when to use it. Critical for automatic invocation. | + +### Naming Convention + +- Use **lowercase-kebab-case**: `deploy-to-staging`, `code-review`, `setup-dev-environment` +- Name must match the directory name exactly + +### Invocation Methods + +1. **Automatic**: Cascade automatically invokes when request matches skill description +2. **Manual**: Type `@skill-name` in Cascade input + +### Example: Complete Skill + +``` +~/.codeium/windsurf/skills/deploy-to-production/ +├── SKILL.md +├── deployment-checklist.md +├── rollback-procedure.md +└── config-template.yaml +``` + +**SKILL.md:** +```markdown +--- +name: deploy-to-production +description: Guides the deployment process to production with safety checks. Use when deploying to prod, releasing, or pushing to production environment. +--- + +## Pre-deployment Checklist +1. Run all tests +2. Check for uncommitted changes +3. Verify environment variables + +## Deployment Steps +Follow these steps to deploy safely... + +See [deployment-checklist.md](./deployment-checklist.md) for full checklist. +See [rollback-procedure.md](./rollback-procedure.md) if issues occur. +``` + +--- + +## Workflows + +Workflows define step-by-step procedures invoked via slash commands. They guide Cascade through repetitive tasks. + +### Storage Locations + +| Scope | Location | +|-------|----------| +| **Global** | `~/.codeium/windsurf/global_workflows/{workflow-name}.md` | +| **Workspace** | `.windsurf/workflows/{workflow-name}.md` | + +### File Format + +Workflows are **single markdown files** (not directories): + +```markdown +--- +description: Short description of what the workflow does +--- + +# Workflow Title + +> Arguments: [optional arguments description] + +Step-by-step instructions in markdown. + +1. First step +2. Second step +3. Third step +``` + +### Required YAML Frontmatter Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `description` | **Yes** | Short title/description shown in UI | + +### Invocation + +- Slash command: `/workflow-name` +- Filename becomes the command (e.g., `deploy.md` → `/deploy`) + +### Constraints + +- **Character limit**: 12,000 characters per workflow file +- Workflows can call other workflows: Include instructions like "Call `/other-workflow`" + +### Example: Complete Workflow + +**File**: `~/.codeium/windsurf/global_workflows/address-pr-comments.md` + +```markdown +--- +description: Address all PR review comments systematically +--- + +# Address PR Comments + +> Arguments: [PR number] + +1. Check out the PR branch: `gh pr checkout [id]` + +2. Get comments on PR: + ```bash + gh api --paginate repos/[owner]/[repo]/pulls/[id]/comments | jq '.[] | {user: .user.login, body, path, line}' + ``` + +3. For EACH comment: + a. Print: "(index). From [user] on [file]:[lines] — [body]" + b. Analyze the file and line range + c. If unclear, ask for clarification + d. Make the change before moving to next comment + +4. Summarize what was done and which comments need attention +``` + +--- + +## Rules + +Rules provide persistent behavioral guidelines that influence how Cascade responds. + +### Storage Locations + +| Scope | Location | +|-------|----------| +| **Global** | `~/.codeium/windsurf/rules/{rule-name}.md` | +| **Workspace** | `.windsurf/rules/{rule-name}.md` | + +### File Format + +Rules are **single markdown files**: + +```markdown +--- +description: When to use this rule +trigger: activation_mode +globs: ["*.py", "src/**/*.ts"] +--- + +Rule instructions in markdown format. + +- Guideline 1 +- Guideline 2 +- Guideline 3 +``` + +### YAML Frontmatter Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `description` | **Yes** | Describes when to use the rule | +| `trigger` | Optional | Activation mode (see below) | +| `globs` | Optional | File patterns for glob trigger | + +### Activation Modes (trigger field) + +| Mode | Value | Description | +|------|-------|-------------| +| **Manual** | `manual` | Activated via `@mention` in Cascade input | +| **Always On** | `always` | Always applied to every conversation | +| **Model Decision** | `model_decision` | Model decides based on description | +| **Glob** | `glob` | Applied when working with files matching pattern | + +### Constraints + +- **Character limit**: 12,000 characters per rule file + +### Example: Complete Rule + +**File**: `~/.codeium/windsurf/rules/python-style.md` + +```markdown +--- +description: Python coding standards and style guidelines. Use when writing or reviewing Python code. +trigger: glob +globs: ["*.py", "**/*.py"] +--- + +# Python Coding Guidelines + +- Use type hints for all function parameters and return values +- Follow PEP 8 style guide +- Use early returns when possible +- Always add docstrings to public functions and classes +- Prefer f-strings over .format() or % formatting +- Use pathlib instead of os.path for file operations +``` + +--- + +## Memories + +### Global Rules (Always-On) + +**Location**: `~/.codeium/windsurf/memories/global_rules.md` + +This is a special file for rules that **always apply** to all conversations. Unlike rules in the `rules/` directory, this file: + +- Does **not** require YAML frontmatter +- Is plain text/markdown +- Is always active (no trigger configuration) + +**Format:** +```markdown +Plain text rules that always apply to all conversations. + +- Rule 1 +- Rule 2 +- Rule 3 +``` + +### Auto-Generated Memories + +Cascade automatically creates memories during conversations, stored as `.pb` (protobuf) files in `~/.codeium/windsurf/memories/`. These are managed by Windsurf and should not be manually edited. + +--- + +## System-Level Configuration (Enterprise) + +Enterprise organizations can deploy system-level configurations that apply globally and cannot be modified by end users. + +### System-Level Paths + +| Type | Windows | macOS | Linux/WSL | +|------|---------|-------|-----------| +| **Rules** | `C:\ProgramData\Windsurf\rules\*.md` | `/Library/Application Support/Windsurf/rules/*.md` | `/etc/windsurf/rules/*.md` | +| **Workflows** | `C:\ProgramData\Windsurf\workflows\*.md` | `/Library/Application Support/Windsurf/workflows/*.md` | `/etc/windsurf/workflows/*.md` | + +### Precedence Order + +When items with the same name exist at multiple levels: + +1. **System** (highest priority) - Organization-wide, deployed by IT +2. **Workspace** - Project-specific in `.windsurf/` +3. **Global** - User-defined in `~/.codeium/windsurf/` +4. **Built-in** - Default items provided by Windsurf + +--- + +## Programmatic Creation Reference + +### Quick Reference Table + +| Type | Path Pattern | Format | Key Fields | +|------|--------------|--------|------------| +| **Skill** | `skills/{name}/SKILL.md` | YAML frontmatter + markdown | `name`, `description` | +| **Workflow** | `global_workflows/{name}.md` (global) or `workflows/{name}.md` (workspace) | YAML frontmatter + markdown | `description` | +| **Rule** | `rules/{name}.md` | YAML frontmatter + markdown | `description`, `trigger`, `globs` | +| **Global Rules** | `memories/global_rules.md` | Plain text/markdown | None | + +### Minimal Templates + +#### Skill (SKILL.md) +```markdown +--- +name: my-skill +description: What this skill does and when to use it +--- + +Instructions here. +``` + +#### Workflow +```markdown +--- +description: What this workflow does +--- + +1. Step one +2. Step two +``` + +#### Rule +```markdown +--- +description: When this rule applies +trigger: model_decision +--- + +- Guideline one +- Guideline two +``` + +### Validation Checklist + +When programmatically creating items: + +- [ ] **Skills**: Directory exists with `SKILL.md` inside +- [ ] **Skills**: `name` field matches directory name exactly +- [ ] **Skills**: Name uses only lowercase letters, numbers, hyphens +- [ ] **Workflows/Rules**: File is `.md` extension +- [ ] **All**: YAML frontmatter uses `---` delimiters +- [ ] **All**: `description` field is present and meaningful +- [ ] **All**: File size under 12,000 characters (workflows/rules) + +--- + +## Best Practices + +### Writing Effective Descriptions + +The `description` field is critical for automatic invocation. Be specific: + +**Good:** +```yaml +description: Guides deployment to staging environment with pre-flight checks. Use when deploying to staging, testing releases, or preparing for production. +``` + +**Bad:** +```yaml +description: Deployment stuff +``` + +### Formatting Guidelines + +- Use bullet points and numbered lists (easier for Cascade to follow) +- Use markdown headers to organize sections +- Keep rules concise and specific +- Avoid generic rules like "write good code" (already built-in) + +### XML Tags for Grouping + +XML tags can effectively group related rules: + +```markdown + +- Use early returns when possible +- Always add documentation for new functions +- Prefer composition over inheritance + + + +- Write unit tests for all public methods +- Maintain 80% code coverage + +``` + +### Skills vs Rules vs Workflows + +| Use Case | Recommended | +|----------|-------------| +| Multi-step procedure with supporting files | **Skill** | +| Repeatable CLI/automation sequence | **Workflow** | +| Coding style preferences | **Rule** | +| Project conventions | **Rule** | +| Deployment procedure | **Skill** or **Workflow** | +| Code review checklist | **Skill** | + +--- + +## Additional Resources + +- **Official Documentation**: [docs.windsurf.com](https://docs.windsurf.com) +- **Skills Specification**: [agentskills.io](https://agentskills.io/home) +- **Rule Templates**: [windsurf.com/editor/directory](https://windsurf.com/editor/directory) diff --git a/package.json b/package.json index 832a5b2..f6be12a 100644 --- a/package.json +++ b/package.json @@ -1,11 +1,13 @@ { "name": "@every-env/compound-plugin", - "version": "0.7.0", + "version": "2.37.1", "type": "module", "private": false, "bin": { - "compound-plugin": "./src/index.ts" + "compound-plugin": "src/index.ts" }, + "homepage": "https://github.com/EveryInc/compound-engineering-plugin", + "repository": "https://github.com/EveryInc/compound-engineering-plugin", "publishConfig": { "access": "public" }, @@ -14,13 +16,17 @@ "convert": "bun run src/index.ts convert", "list": "bun run src/index.ts list", "cli:install": "bun run src/index.ts install", - "test": "bun test" + "test": "bun test", + "release:dry-run": "semantic-release --dry-run" }, "dependencies": { "citty": "^0.1.6", "js-yaml": "^4.1.0" }, "devDependencies": { - "bun-types": "^1.0.0" + "@semantic-release/changelog": "^6.0.3", + "@semantic-release/git": "^10.0.1", + "bun-types": "^1.0.0", + "semantic-release": "^25.0.3" } } diff --git a/plans/grow-your-own-garden-plugin-architecture.md b/plans/grow-your-own-garden-plugin-architecture.md deleted file mode 100644 index b64c054..0000000 --- a/plans/grow-your-own-garden-plugin-architecture.md +++ /dev/null @@ -1,102 +0,0 @@ -# Grow Your Own Garden: Adaptive Agent Ecosystem - -> **Issue:** https://github.com/EveryInc/compound-engineering-plugin/issues/20 - -## The Idea - -Everyone grows their own garden, but we're all using the same process. - -Start from a **seed** (minimal core: `/plan`, `/work`, `/review`, `/compound`). Each `/compound` loop can suggest adding agents based on what you're working on—like building up a test suite to prevent regressions, but for code review expertise. - -## Current Problem - -- Monolithic plugin: 24 agents, users use ~30% -- No personalization (same agents for Rails dev and Python dev) -- Static collection that doesn't adapt - -## Proposed Solution - -### The Seed (Core Plugin) - -4 commands + minimal agents: - -| Component | What's Included | -|-----------|-----------------| -| Commands | `/plan`, `/work`, `/review`, `/compound` | -| Review Agents | security, performance, simplicity, architecture, patterns | -| Research Agents | best-practices, framework-docs, git-history, repo-analyst | -| Skills | compound-docs, file-todos, git-worktree | -| MCP Servers | playwright, context7 | - -### The Growth Loop - -After each `/compound`: - -``` -✅ Learning documented - -💡 It looks like you're using Rails. - Would you like to add the "DHH Rails Reviewer"? - - [y] Yes [n] No [x] Never ask -``` - -Three sources of new agents: -1. **Predefined** - "You're using Rails, add DHH reviewer?" -2. **Dynamic** - "You're using actor model, create an expert?" -3. **Custom** - "Want to create an agent for this pattern?" - -### Agent Storage - -``` -.claude/agents/ → Project-specific (highest priority) -~/.claude/agents/ → User's garden -plugin/agents/ → From installed plugins -``` - -## Implementation Phases - -### Phase 1: Split the Plugin -- Create `agent-library/` with framework-specific agents (Rails, Python, TypeScript, Frontend) -- Keep `compound-engineering` as core with universal agents -- No breaking changes—existing users unaffected - -### Phase 2: Agent Discovery -- `/review` discovers agents from all three locations -- Project agents override user agents override plugin agents - -### Phase 3: Growth via /compound -- Detect tech stack (Gemfile, package.json, etc.) -- Suggest relevant agents after documenting learnings -- Install accepted agents to `~/.claude/agents/` - -### Phase 4: Management -- `/agents list` - See your garden -- `/agents add ` - Add from library -- `/agents disable ` - Temporarily disable - -## What Goes Where - -**Core (seed):** 11 framework-agnostic agents -- security-sentinel, performance-oracle, code-simplicity-reviewer -- architecture-strategist, pattern-recognition-specialist -- 4 research agents, 2 workflow agents - -**Agent Library:** 10 specialized agents -- Rails: kieran-rails, dhh-rails, data-integrity (3) -- Python: kieran-python (1) -- TypeScript: kieran-typescript (1) -- Frontend: julik-races, design-iterator, design-reviewer, figma-sync (4) -- Editorial: every-style-editor (1) - -## Key Constraint - -Claude Code doesn't support plugin dependencies. Each plugin must be independent. Users manually install what they need, or we suggest additions via `/compound`. - -## Acceptance Criteria - -- [ ] Core plugin works standalone with universal agents -- [ ] `/compound` suggests agents based on detected tech stack -- [ ] Users can accept/decline suggestions -- [ ] `/agents` command for garden management -- [ ] No breaking changes for existing users diff --git a/plans/landing-page-launchkit-refresh.md b/plans/landing-page-launchkit-refresh.md deleted file mode 100644 index 8384ee2..0000000 --- a/plans/landing-page-launchkit-refresh.md +++ /dev/null @@ -1,279 +0,0 @@ -# Landing Page LaunchKit Refresh - -## Overview - -Review and enhance the `/docs/index.html` landing page using LaunchKit elements and Pragmatic Technical Writing style (Hunt/Thomas, Joel Spolsky). The current implementation is strong but can be refined section-by-section. - -## Current State Assessment - -### What's Working Well -- Specific, outcome-focused hero headline ("12 expert opinions in 30 seconds") -- Developer-authentic copywriting (N+1 queries, CORS, SQL injection) -- Stats section with clear metrics (23 agents, 16 commands, 11 skills, 2 MCP servers) -- Philosophy section with concrete story (N+1 query bug) -- Three-step installation with actual commands -- FAQ accordion following LaunchKit patterns -- Categorized feature sections with code examples - -### Missing Elements (From Best Practices Research) -1. **Social Proof Section** - No testimonials, GitHub stars, or user metrics -2. **Visual Demo** - No GIF/animation showing the tool in action -3. **Arrow icons on CTAs** - 26% conversion boost from studies -4. **Trust indicators** - Open source badge, license info - ---- - -## Section-by-Section Review Plan - -### 1. Hero Section (lines 56-78) - -**Current:** -```html -

Your Code Reviews Just Got 12 Expert Opinions. In 30 Seconds.

-``` - -**Review Checklist:** -- [ ] Headline follows Pragmatic Writing (concrete before abstract) ✅ -- [ ] Eyebrow badge is current (Version 2.6.0) - verify -- [ ] Description paragraph under 3 sentences ✅ -- [ ] Button group has arrow icon on primary CTA -- [ ] "Read the Docs" secondary CTA present ✅ - -**Potential Improvements:** -- Add `→` arrow to "Install Plugin" button -- Consider adding animated terminal GIF below buttons showing `/review` in action - -### 2. Stats Section (lines 81-104) - -**Current:** 4 stat cards (23 agents, 16 commands, 11 skills, 2 MCP servers) - -**Review Checklist:** -- [ ] Numbers are accurate (verify against actual file counts) -- [ ] Icons are appropriate for each stat -- [ ] Hover effects working properly -- [ ] Mobile layout (2x2 grid) is readable - -**Potential Improvements:** -- Add "developers using" or "reviews run" metric if available -- Consider adding subtle animation on scroll - -### 3. Philosophy Section (lines 107-192) - -**Current:** "Why Your Third Code Review Should Be Easier Than Your First" with N+1 query story - -**Review Checklist:** -- [ ] Opens with concrete story (N+1 query) ✅ -- [ ] Quote block is memorable and quotable -- [ ] Four pillars (Plan, Delegate, Assess, Codify) are clear -- [ ] Each pillar has: tagline, description, tool tags -- [ ] Descriptions use "you" voice ✅ - -**Potential Improvements:** -- Review pillar descriptions for passive voice -- Ensure each pillar description follows PAS (Problem, Agitate, Solve) pattern -- Check tool tags are accurate and current - -### 4. Agents Section (lines 195-423) - -**Current:** 23 agents in 5 categories (Review, Research, Design, Workflow, Docs) - -**Review Checklist:** -- [ ] All 23 agents are listed (count actual files) -- [ ] Categories are logical and scannable -- [ ] Each card has: name, badge, description, usage code -- [ ] Descriptions are conversational (not passive) -- [ ] Critical badges (Security, Data) stand out - -**Potential Improvements:** -- Review agent descriptions against pragmatic writing checklist -- Ensure descriptions answer "when would I use this?" -- Add concrete scenarios to generic descriptions - -### 5. Commands Section (lines 426-561) - -**Current:** 16 commands in 2 categories (Workflow, Utility) - -**Review Checklist:** -- [ ] All 16 commands are listed (count actual files) -- [ ] Core workflow commands are highlighted -- [ ] Descriptions are action-oriented -- [ ] Command names match actual implementation - -**Potential Improvements:** -- Review command descriptions for passive voice -- Lead with outcomes, not features -- Add "saves you X minutes" framing where appropriate - -### 6. Skills Section (lines 564-703) - -**Current:** 11 skills in 3 categories (Development, Content/Workflow, Image Generation) - -**Review Checklist:** -- [ ] All 11 skills are listed (count actual directories) -- [ ] Featured skill (gemini-imagegen) is properly highlighted -- [ ] API key requirement is clear -- [ ] Skill invocation syntax is correct - -**Potential Improvements:** -- Review skill descriptions against pragmatic writing -- Ensure each skill answers "what problem does this solve?" - -### 7. MCP Servers Section (lines 706-751) - -**Current:** 2 MCP servers (Playwright, Context7) - -**Review Checklist:** -- [ ] Tool lists are accurate -- [ ] Descriptions explain WHY not just WHAT -- [ ] Framework support list is current (100+) - -**Potential Improvements:** -- Add concrete example of each server in action -- Consider before/after comparison - -### 8. Installation Section (lines 754-798) - -**Current:** "Three Commands. Zero Configuration." with 3 steps - -**Review Checklist:** -- [ ] Commands are accurate and work -- [ ] Step 3 shows actual usage examples -- [ ] Timeline visual (vertical line) renders correctly -- [ ] Copy buttons work on code blocks - -**Potential Improvements:** -- Add copy-to-clipboard functionality if missing -- Consider adding "What you'll see" output example - -### 9. FAQ Section (lines 801-864) - -**Current:** 5 questions in accordion format - -**Review Checklist:** -- [ ] Questions address real objections -- [ ] Answers are conversational (use "you") -- [ ] Accordion expand/collapse works -- [ ] No passive voice in answers - -**Potential Improvements:** -- Review for weasel words ("best practices suggest") -- Ensure answers are direct and actionable - -### 10. CTA Section (lines 868-886) - -**Current:** "Install Once. Compound Forever." with Install + GitHub buttons - -**Review Checklist:** -- [ ] Badge is eye-catching ("Free & Open Source") -- [ ] Headline restates core value proposition -- [ ] Primary CTA has arrow icon ✅ -- [ ] Trust line at bottom - -**Potential Improvements:** -- Review trust line copy -- Consider adding social proof element - ---- - -## NEW: Social Proof Section (To Add) - -**Position:** After Stats section, before Philosophy section - -**Components:** -- GitHub stars counter (dynamic or static) -- "Trusted by X developers" metric -- 2-3 testimonial quotes (if available) -- Company logos (if applicable) - -**LaunchKit Pattern:** -```html - -``` - ---- - -## Pragmatic Writing Style Checklist (Apply to ALL Copy) - -### The Five Laws -1. **Concrete Before Abstract** - Story/example first, then principle -2. **Physical Analogies** - Import metaphors readers understand -3. **Conversational Register** - Use "you", contractions, asides -4. **Numbered Frameworks** - Create referenceable structures -5. **Humor as Architecture** - Mental anchors for dense content - -### Anti-Patterns to Find and Fix -- [ ] "It is recommended that..." → "Do this:" -- [ ] "Best practices suggest..." → "Here's what works:" -- [ ] Passive voice → Active voice -- [ ] Abstract claims → Specific examples -- [ ] Walls of text → Scannable lists - -### Quality Checklist (Per Section) -- [ ] Opens with concrete story or example? -- [ ] Can reader skim headers and get the arc? -- [ ] Uses "you" at least once? -- [ ] Clear action reader can take? -- [ ] Reads aloud like speech? - ---- - -## Implementation Phases - -### Phase 1: Copy Audit (No HTML Changes) -1. Read through entire page -2. Flag passive voice instances -3. Flag abstract claims without examples -4. Flag missing "you" voice -5. Document improvements needed - -### Phase 2: Copy Rewrites -1. Rewrite flagged sections following pragmatic style -2. Ensure each section passes quality checklist -3. Maintain existing HTML structure - -### Phase 3: Component Additions -1. Add arrow icons to primary CTAs -2. Add social proof section (if data available) -3. Consider visual demo element - -### Phase 4: Verification -1. Validate all counts (agents, commands, skills) -2. Test all links and buttons -3. Verify mobile responsiveness -4. Check accessibility - ---- - -## Files to Modify - -| File | Changes | -|------|---------| -| `docs/index.html` | Copy rewrites, potential new section | -| `docs/css/style.css` | Social proof styles (if adding) | - ---- - -## Success Criteria - -1. All copy passes Pragmatic Writing quality checklist -2. No passive voice in any description -3. Every feature section answers "why should I care?" -4. Stats are accurate against actual file counts -5. Page loads in <3 seconds -6. Mobile layout is fully functional - ---- - -## References - -- LaunchKit Template: https://launchkit.evilmartians.io/ -- Pragmatic Writing Skill: `~/.claude/skills/pragmatic-writing-skill/SKILL.md` -- Current Landing Page: `/Users/kieranklaassen/every-marketplace/docs/index.html` -- Style CSS: `/Users/kieranklaassen/every-marketplace/docs/css/style.css` diff --git a/plugins/coding-tutor/.cursor-plugin/plugin.json b/plugins/coding-tutor/.cursor-plugin/plugin.json new file mode 100644 index 0000000..dc5e6c0 --- /dev/null +++ b/plugins/coding-tutor/.cursor-plugin/plugin.json @@ -0,0 +1,21 @@ +{ + "name": "coding-tutor", + "displayName": "Coding Tutor", + "version": "1.2.1", + "description": "Personalized coding tutorials that use your actual codebase for examples with spaced repetition quizzes", + "author": { + "name": "Nityesh Agarwal" + }, + "homepage": "https://github.com/EveryInc/compound-engineering-plugin", + "repository": "https://github.com/EveryInc/compound-engineering-plugin", + "license": "MIT", + "keywords": [ + "cursor", + "plugin", + "coding", + "programming", + "tutorial", + "learning", + "spaced-repetition" + ] +} diff --git a/plugins/compound-engineering/.claude-plugin/plugin.json b/plugins/compound-engineering/.claude-plugin/plugin.json index bbcbdfc..82eab81 100644 --- a/plugins/compound-engineering/.claude-plugin/plugin.json +++ b/plugins/compound-engineering/.claude-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "compound-engineering", - "version": "2.39.0", - "description": "AI-powered development tools. 25 agents, 25 commands, 24 skills, 1 MCP server for code review, research, design, and workflow automation.", + "version": "2.40.0", + "description": "AI-powered development tools. 30 agents, 56 skills, 7 commands, 1 MCP server for code review, research, design, and workflow automation.", "author": { "name": "Kieran Klaassen", "email": "kieran@every.to", @@ -15,7 +15,8 @@ "compound-engineering", "workflow-automation", "code-review", - "fastapi", + "rails", + "ruby", "python", "typescript", "knowledge-management", diff --git a/plugins/compound-engineering/.cursor-plugin/plugin.json b/plugins/compound-engineering/.cursor-plugin/plugin.json new file mode 100644 index 0000000..1fe85ac --- /dev/null +++ b/plugins/compound-engineering/.cursor-plugin/plugin.json @@ -0,0 +1,31 @@ +{ + "name": "compound-engineering", + "displayName": "Compound Engineering", + "version": "2.33.0", + "description": "AI-powered development tools. 28 agents, 22 commands, 19 skills, 1 MCP server for code review, research, design, and workflow automation.", + "author": { + "name": "Kieran Klaassen", + "email": "kieran@every.to", + "url": "https://github.com/kieranklaassen" + }, + "homepage": "https://every.to/source-code/my-ai-had-already-fixed-the-code-before-i-saw-it", + "repository": "https://github.com/EveryInc/compound-engineering-plugin", + "license": "MIT", + "keywords": [ + "cursor", + "plugin", + "ai-powered", + "compound-engineering", + "workflow-automation", + "code-review", + "rails", + "ruby", + "python", + "typescript", + "knowledge-management", + "image-generation", + "agent-browser", + "browser-automation" + ], + "mcpServers": ".mcp.json" +} diff --git a/plugins/compound-engineering/.mcp.json b/plugins/compound-engineering/.mcp.json new file mode 100644 index 0000000..4290fa6 --- /dev/null +++ b/plugins/compound-engineering/.mcp.json @@ -0,0 +1,11 @@ +{ + "mcpServers": { + "context7": { + "type": "http", + "url": "https://mcp.context7.com/mcp", + "headers": { + "x-api-key": "${CONTEXT7_API_KEY:-}" + } + } + } +} diff --git a/plugins/compound-engineering/CHANGELOG.md b/plugins/compound-engineering/CHANGELOG.md index 7b8ad39..869ad97 100644 --- a/plugins/compound-engineering/CHANGELOG.md +++ b/plugins/compound-engineering/CHANGELOG.md @@ -5,79 +5,120 @@ All notable changes to the compound-engineering plugin will be documented in thi The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -<<<<<<< Updated upstream ## [2.39.0] - 2026-03-10 ### Added -- **`/essay-edit` command** - Expert essay editor that polishes written work through two passes: structural review using the Saunders framework (via `story-lens` skill) and granular line-level editing. Preserves the author's voice (via `john-voice` skill) and guards against "timid scribe" syndrome — edits sharpen language without softening intent or defanging strong claims. - -## [2.38.0] - 2026-03-08 - -### Added - -- **`/essay-outline` command** - Transform a brain dump into a story-structured essay outline. Pressure tests ideas for a real thesis and payoff, applies the Saunders storytelling framework (via `story-lens` skill) to validate hook, escalation, and conclusion, then produces a tight outline written to file. - -## [2.37.0] - 2026-03-08 - -### Added - -- **`story-lens` skill** - Evaluate prose quality using George Saunders's storytelling framework (beat causality, escalation, three E's, character accumulation, moral/technical unity) -- **`sync-confluence` skill** - Sync local markdown docs to Confluence Cloud pages via REST API. Handles first-time setup, page creation, and bulk updates with automatic mapping file management. - -## [2.36.0] - 2026-02-27 - -### Added - -- **`john-voice` skill** - Write content in John Lamb's authentic voice across all venues and audiences -- **`jira-ticket-writer` skill** - Draft and create Jira tickets with tone pressure-testing and user approval via Atlassian MCP -- **`excalidraw-png-export` skill** - Create diagrams, architecture visuals, and flowcharts as PNG files using Excalidraw MCP and Playwright +- **ce:compound context budget precheck** — Warns when context is constrained and offers compact-safe mode to avoid compaction mid-compound ([#235](https://github.com/EveryInc/compound-engineering-plugin/pull/235)) +- **ce:plan daily sequence numbers** — Plan filenames now include a 3-digit daily sequence number (e.g., `2026-03-10-001-feat-...`) to prevent collisions ([#238](https://github.com/EveryInc/compound-engineering-plugin/pull/238)) +- **ce:review serial mode** — Pass `--serial` flag (or auto-detects when 6+ agents configured) to run review agents sequentially, preventing context limit crashes ([#237](https://github.com/EveryInc/compound-engineering-plugin/pull/237)) +- **agent-browser inspection & debugging commands** — Added JS eval, console/errors, network, storage, device emulation, element debugging, recording/tracing, tabs, and advanced mouse commands to agent-browser skill ([#236](https://github.com/EveryInc/compound-engineering-plugin/pull/236)) +- **test-browser port detection** — Auto-detects dev server port from CLAUDE.md, package.json, or .env files; supports `--port` flag ([#233](https://github.com/EveryInc/compound-engineering-plugin/pull/233)) +- **lfg phase gating** — Added explicit GATE checks between /lfg steps to enforce plan-before-work ordering ([#231](https://github.com/EveryInc/compound-engineering-plugin/pull/231)) ### Fixed -- Updated component counts across plugin.json, marketplace.json, and README to reflect actual file counts (22 skills) -- Synced marketplace.json version to match plugin.json - -## [2.35.1] - 2026-02-17 - -### Added - -- **`upstream-merge` skill** - Structured workflow for incorporating upstream git changes while preserving local fork intent. Integrates with file-todos system for triage tracking. +- **Context7 API key auth** — MCP server config now passes `CONTEXT7_API_KEY` via `x-api-key` header to avoid anonymous rate limits ([#232](https://github.com/EveryInc/compound-engineering-plugin/pull/232)) +- **CLI: MCP server merge order** — `sync` now correctly overwrites same-named MCP servers with plugin values instead of preserving stale entries ### Removed -- **`dspy-python` skill** - Deleted per triage decision (project uses LangChain/LangGraph, not DSPy) +- **every-style-editor agent** — Removed duplicate agent; functionality already exists as `every-style-editor` skill ([#234](https://github.com/EveryInc/compound-engineering-plugin/pull/234)) -## [2.35.0] - 2026-02-16 +### Contributors + +- Matt Van Horn ([@mvanhorn](https://x.com/mvanhorn)) — PRs #231–#238 + +--- + +## [2.38.1] - 2026-03-01 + +### Fixed + +- **Cross-platform `AskUserQuestion` fallback** — `setup` skill and `create-new-skill`/`add-workflow` workflows now include an "Interaction Method" preamble that instructs non-Claude LLMs (Codex, Gemini, Copilot, Kiro) to use numbered lists instead of `AskUserQuestion`, preventing silent auto-configuration. ([#204](https://github.com/EveryInc/compound-engineering-plugin/issues/204)) +- **Codex AGENTS.md `AskUserQuestion` mapping** — Strengthened from "ask the user in chat" to structured numbered-list guidance with multi-select support and a "never skip or auto-configure" rule. +- **Skill compliance checklist** — Added `AskUserQuestion` lint rule to `CLAUDE.md` to prevent recurrence. + +--- + +## [2.38.0] - 2026-03-01 + +### Changed +- `workflows:plan`, `workflows:work`, `workflows:review`, `workflows:brainstorm`, `workflows:compound` renamed to `ce:plan`, `ce:work`, `ce:review`, `ce:brainstorm`, `ce:compound` for clarity — the `ce:` prefix unambiguously identifies these as compound-engineering commands + +### Deprecated +- `workflows:*` commands — all five remain functional as aliases that forward to their `ce:*` equivalents with a deprecation notice. Will be removed in a future version. + +--- + +## [2.37.2] - 2026-03-01 + +### Added + +- **CLI: auto-detect install targets** — `bunx @every-env/compound-plugin install compound-engineering --to all` auto-detects installed AI coding tools and installs to all of them in one command. ([#191](https://github.com/EveryInc/compound-engineering-plugin/pull/191)) +- **CLI: Gemini sync** — `sync --target gemini` symlinks personal skills to `.gemini/skills/` and merges MCP servers into `.gemini/settings.json`. ([#191](https://github.com/EveryInc/compound-engineering-plugin/pull/191)) +- **CLI: sync defaults to `--target all`** — Running `sync` with no target now syncs to all detected tools automatically. ([#191](https://github.com/EveryInc/compound-engineering-plugin/pull/191)) + +--- + +## [2.37.1] - 2026-03-01 + +### Fixed + +- **`/workflows:review` rendering** — Fixed broken markdown output: "Next Steps" items 3 & 4 and Severity Breakdown no longer leak outside the Summary Report template, section numbering fixed (was jumping 5→7, now correct), removed orphaned fenced code block delimiters that caused the entire End-to-End Testing section to render as a code block, and fixed unclosed quoted string in section 1. ([#214](https://github.com/EveryInc/compound-engineering-plugin/pull/214)) — thanks [@XSAM](https://github.com/XSAM)! +- **`.worktrees` gitignore** — Added `.worktrees/` to `.gitignore` to prevent worktree directories created by the `git-worktree` skill from being tracked. ([#213](https://github.com/EveryInc/compound-engineering-plugin/pull/213)) — thanks [@XSAM](https://github.com/XSAM)! + +--- + +## [2.37.0] - 2026-03-01 + +### Added + +- **`proof` skill** — Create, edit, comment on, and share markdown documents via Proof's web API and local bridge. Supports document creation, track-changes suggestions, comments, and bulk rewrites. No authentication required for creating shared documents. +- **Optional Proof sharing in `/workflows:brainstorm`** — "Share to Proof" is now a menu option in Phase 4 handoff, letting you upload the brainstorm document when you want to, rather than automatically on every run. +- **Optional Proof sharing in `/workflows:plan`** — "Share to Proof" is now a menu option in Post-Generation Options, letting you upload the plan file on demand rather than automatically. + +--- + +## [2.36.0] - 2026-03-01 + +### Added + +- **OpenClaw install target** — `bunx @every-env/compound-plugin install compound-engineering --to openclaw` now installs the plugin to OpenClaw's extensions directory. ([#217](https://github.com/EveryInc/compound-engineering-plugin/pull/217)) — thanks [@TrendpilotAI](https://github.com/TrendpilotAI)! +- **Qwen Code install target** — `bunx @every-env/compound-plugin install compound-engineering --to qwen` now installs the plugin to Qwen Code's extensions directory. ([#220](https://github.com/EveryInc/compound-engineering-plugin/pull/220)) — thanks [@rlam3](https://github.com/rlam3)! +- **Windsurf install target** — `bunx @every-env/compound-plugin install compound-engineering --to windsurf` converts plugins to Windsurf format. Agents become Windsurf skills, commands become flat workflows, and MCP servers write to `mcp_config.json`. Defaults to global scope (`~/.codeium/windsurf/`); use `--scope workspace` for project-level output. ([#202](https://github.com/EveryInc/compound-engineering-plugin/pull/202)) — thanks [@rburnham52](https://github.com/rburnham52)! + +### Fixed + +- **`create-agent-skill` / `heal-skill` YAML crash** — `argument-hint` values containing special characters now properly quoted to prevent YAML parse errors in the Claude Code TUI. ([#219](https://github.com/EveryInc/compound-engineering-plugin/pull/219)) — thanks [@solon](https://github.com/solon)! +- **`resolve-pr-parallel` skill name** — Renamed from `resolve_pr_parallel` (underscore) to `resolve-pr-parallel` (hyphen) to match the standard naming convention. ([#202](https://github.com/EveryInc/compound-engineering-plugin/pull/202)) — thanks [@rburnham52](https://github.com/rburnham52)! + +--- + +## [2.35.2] - 2026-02-20 ### Changed -- **Backend focus shift: Ruby/Rails -> Python/FastAPI** - Comprehensive conversion of backend-focused components - - All backend-related agents and skills now target Python/FastAPI instead of Ruby/Rails - - TypeScript/React frontend components remain unchanged +- **`/workflows:plan` brainstorm integration** — When plan finds a brainstorm document, it now heavily references it throughout. Added `origin:` frontmatter field to plan templates, brainstorm cross-check in final review, and "Sources" section at the bottom of all three plan templates (MINIMAL, MORE, A LOT). Brainstorm decisions are carried forward with explicit references (`see brainstorm: `) and a mandatory scan before finalizing ensures nothing is dropped. -### Added +--- -- **`tiangolo-fastapi-reviewer` agent** - FastAPI code review from Sebastián Ramírez's perspective -- **`python-package-readme-writer` agent** - Create concise READMEs for Python packages -- **`fastapi-style` skill** - Write FastAPI code following opinionated best practices -- **`python-package-writer` skill** - Write Python packages following production-ready patterns -- **Enhanced `kieran-python-reviewer` agent** - Now includes 9 FastAPI-specific convention sections -- **Updated `lint` agent** - Now targets Python files -- **`/pr-comments-to-todos` command** - Fetch PR review comments and convert them into todo files for triage -- **Pressure Test framework** in workflows:review - Critical evaluation of agent findings before creating todos +## [2.35.1] - 2026-02-18 -### Removed +### Changed -- **`dhh-rails-reviewer` agent** - Replaced by tiangolo-fastapi-reviewer -- **`kieran-rails-reviewer` agent** - Functionality merged into kieran-python-reviewer -- **`ankane-readme-writer` agent** - Replaced by python-package-readme-writer -- **3 design agents** - design-implementation-reviewer, design-iterator, figma-design-sync -- **`dhh-rails-style` skill** - Replaced by fastapi-style -- **`andrew-kane-gem-writer` skill** - Replaced by python-package-writer -- **`dspy-ruby` skill** - Removed (not used; LangChain/LangGraph is the actual stack) -- **`dspy-python` skill** - Removed (not used; LangChain/LangGraph is the actual stack) -- **`/plan_review` command** - Absorbed into workflows/plan via document-review skill +- **`/workflows:work` system-wide test check** — Added "System-Wide Test Check" to the task execution loop. Before marking a task done, forces five questions: what callbacks/middleware fire when this runs? Do tests exercise the real chain or just mocked isolation? Can failure leave orphaned state? What other interfaces need the same change? Do error strategies align across layers? Includes skip criteria for leaf-node changes. Also added integration test guidance to the "Test Continuously" section. +- **`/workflows:plan` system-wide impact templates** — Added "System-Wide Impact" section to MORE and A LOT plan templates (interaction graph, error propagation, state lifecycle, API surface parity, integration test scenarios) as lightweight prompts to flag risks during planning. + +--- + +## [2.35.0] - 2026-02-17 + +### Fixed + +- **`/lfg` and `/slfg` first-run failures** — Made ralph-loop step optional with graceful fallback when `ralph-wiggum` skill is not installed (#154). Added explicit "do not stop" instruction across all steps (#134). +- **`/workflows:plan` not writing file in pipeline** — Added mandatory "Write Plan File" step with explicit Write tool instructions before Post-Generation Options. The file is now always written to disk before any interactive prompts (#155). Also adds pipeline-mode note to skip AskUserQuestion calls when invoked from LFG/SLFG (#134). +- **Agent namespace typo in `/workflows:plan`** — `Task spec-flow-analyzer(...)` now uses the full qualified name `Task compound-engineering:workflow:spec-flow-analyzer(...)` to prevent Claude from prepending the wrong `workflows:` prefix (#193). --- @@ -149,7 +190,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - All 29 agent descriptions trimmed from ~1,400 to ~180 chars avg (examples moved to agent body) - 18 manual commands marked `disable-model-invocation: true` (side-effect commands like `/lfg`, `/deploy-docs`, `/triage`, etc.) - 6 manual skills marked `disable-model-invocation: true` (`orchestrating-swarms`, `git-worktree`, `skill-creator`, `compound-docs`, `file-todos`, `resolve-pr-parallel`) -- **git-worktree**: Remove confirmation prompt for worktree creation ([@Sam Xie](https://github.com/samxie)) +- **git-worktree**: Remove confirmation prompt for worktree creation ([@Sam Xie](https://github.com/XSAM)) - **Prevent subagents from writing intermediary files** in compound workflow ([@Trevin Chow](https://github.com/trevin)) ### Fixed diff --git a/plugins/compound-engineering/CLAUDE.md b/plugins/compound-engineering/CLAUDE.md index dc34c27..339b062 100644 --- a/plugins/compound-engineering/CLAUDE.md +++ b/plugins/compound-engineering/CLAUDE.md @@ -2,24 +2,24 @@ ## Versioning Requirements -**IMPORTANT**: Every change to this plugin MUST include updates to all three files: +**IMPORTANT**: Routine PRs should not cut releases for this plugin. -1. **`.claude-plugin/plugin.json`** - Bump version using semver -2. **`CHANGELOG.md`** - Document changes using Keep a Changelog format -3. **`README.md`** - Verify/update component counts and tables +The repo uses an automatied release process to prepare plugin releases, including version selection and changelog generation. Because multiple PRs may merge before the next release, contributors cannot know the final released version from within an individual PR. -### Version Bumping Rules +### Contributor Rules -- **MAJOR** (1.0.0 → 2.0.0): Breaking changes, major reorganization -- **MINOR** (1.0.0 → 1.1.0): New agents, commands, or skills -- **PATCH** (1.0.0 → 1.0.1): Bug fixes, doc updates, minor improvements +- Do **not** manually bump `.claude-plugin/plugin.json` version in a normal feature PR. +- Do **not** manually bump `.claude-plugin/marketplace.json` plugin version in a normal feature PR. +- Do **not** cut a release section in `CHANGELOG.md` for a normal feature PR. +- Do update substantive docs that are part of the actual change, such as `README.md`, component tables, usage instructions, or counts when they would otherwise become inaccurate. ### Pre-Commit Checklist Before committing ANY changes: -- [ ] Version bumped in `.claude-plugin/plugin.json` -- [ ] CHANGELOG.md updated with changes +- [ ] No manual release-version bump in `.claude-plugin/plugin.json` +- [ ] No manual release-version bump in `.claude-plugin/marketplace.json` +- [ ] No manual release entry added to `CHANGELOG.md` - [ ] README.md component counts verified - [ ] README.md tables accurate (agents, commands, skills) - [ ] plugin.json description matches current counts @@ -34,23 +34,26 @@ agents/ ├── workflow/ # Workflow automation agents └── docs/ # Documentation agents -commands/ -├── workflows/ # Core workflow commands (workflows:plan, workflows:review, etc.) -└── *.md # Utility commands - skills/ -└── *.md # All skills at root level +├── ce-*/ # Core workflow skills (ce:plan, ce:review, etc.) +├── workflows-*/ # Deprecated aliases for ce:* skills +└── */ # All other skills ``` +> **Note:** Commands were migrated to skills in v2.39.0. All former +> `/command-name` slash commands now live under `skills/command-name/SKILL.md` +> and work identically (Claude Code 2.1.3+ merged the two formats). + ## Command Naming Convention -**Workflow commands** use `workflows:` prefix to avoid collisions with built-in commands: -- `/workflows:plan` - Create implementation plans -- `/workflows:review` - Run comprehensive code reviews -- `/workflows:work` - Execute work items systematically -- `/workflows:compound` - Document solved problems +**Workflow commands** use `ce:` prefix to unambiguously identify them as compound-engineering commands: +- `/ce:plan` - Create implementation plans +- `/ce:review` - Run comprehensive code reviews +- `/ce:work` - Execute work items systematically +- `/ce:compound` - Document solved problems +- `/ce:brainstorm` - Explore requirements and approaches before planning -**Why `workflows:`?** Claude Code has built-in `/plan` and `/review` commands. Using `name: workflows:plan` in frontmatter creates a unique `/workflows:plan` command with no collision. +**Why `ce:`?** Claude Code has built-in `/plan` and `/review` commands. The `ce:` namespace (short for compound-engineering) makes it immediately clear these commands belong to this plugin. The legacy `workflows:` prefix is still supported as deprecated aliases that forward to the `ce:*` equivalents. ## Skill Compliance Checklist @@ -73,6 +76,11 @@ When adding or modifying skills, verify compliance with skill-creator spec: - [ ] Use imperative/infinitive form (verb-first instructions) - [ ] Avoid second person ("you should") - use objective language ("To accomplish X, do Y") +### AskUserQuestion Usage + +- [ ] If the skill uses `AskUserQuestion`, it must include an "Interaction Method" preamble explaining the numbered-list fallback for non-Claude environments +- [ ] Prefer avoiding `AskUserQuestion` entirely (see `brainstorming/SKILL.md` pattern) for skills intended to run cross-platform + ### Quick Validation Command ```bash diff --git a/plugins/compound-engineering/README.md b/plugins/compound-engineering/README.md index 289c7bb..c6fd2d5 100644 --- a/plugins/compound-engineering/README.md +++ b/plugins/compound-engineering/README.md @@ -6,16 +6,16 @@ AI-powered development tools that get smarter with every use. Make each unit of | Component | Count | |-----------|-------| -| Agents | 25 | -| Commands | 25 | -| Skills | 24 | +| Agents | 28 | +| Commands | 22 | +| Skills | 20 | | MCP Servers | 1 | ## Agents Agents are organized into categories for easier discovery. -### Review (14) +### Review (15) | Agent | Description | |-------|-------------| @@ -25,14 +25,15 @@ Agents are organized into categories for easier discovery. | `data-integrity-guardian` | Database migrations and data integrity | | `data-migration-expert` | Validate ID mappings match production, check for swapped values | | `deployment-verification-agent` | Create Go/No-Go deployment checklists for risky data changes | +| `dhh-rails-reviewer` | Rails review from DHH's perspective | | `julik-frontend-races-reviewer` | Review JavaScript/Stimulus code for race conditions | +| `kieran-rails-reviewer` | Rails code review with strict conventions | | `kieran-python-reviewer` | Python code review with strict conventions | | `kieran-typescript-reviewer` | TypeScript code review with strict conventions | | `pattern-recognition-specialist` | Analyze code for patterns and anti-patterns | | `performance-oracle` | Performance analysis and optimization | -| `schema-drift-detector` | Detect unrelated schema changes in PRs | +| `schema-drift-detector` | Detect unrelated schema.rb changes in PRs | | `security-sentinel` | Security audits and vulnerability assessments | -| `tiangolo-fastapi-reviewer` | FastAPI code review from tiangolo's perspective | ### Research (5) @@ -44,13 +45,20 @@ Agents are organized into categories for easier discovery. | `learnings-researcher` | Search institutional learnings for relevant past solutions | | `repo-research-analyst` | Research repository structure and conventions | -### Workflow (5) +### Design (3) + +| Agent | Description | +|-------|-------------| +| `design-implementation-reviewer` | Verify UI implementations match Figma designs | +| `design-iterator` | Iteratively refine UI through systematic design iterations | +| `figma-design-sync` | Synchronize web implementations with Figma designs | + +### Workflow (4) | Agent | Description | |-------|-------------| | `bug-reproduction-validator` | Systematically reproduce and validate bug reports | -| `every-style-editor` | Edit content to conform to Every's style guide | -| `lint` | Run linting and code quality checks on Python files | +| `lint` | Run linting and code quality checks on Ruby and ERB files | | `pr-comment-resolver` | Address PR comments and implement fixes | | `spec-flow-analyzer` | Analyze user flows and identify gaps in specifications | @@ -58,21 +66,23 @@ Agents are organized into categories for easier discovery. | Agent | Description | |-------|-------------| -| `python-package-readme-writer` | Create READMEs following concise documentation style for Python packages | +| `ankane-readme-writer` | Create READMEs following Ankane-style template for Ruby gems | ## Commands ### Workflow Commands -Core workflow commands use `workflows:` prefix to avoid collisions with built-in commands: +Core workflow commands use `ce:` prefix to unambiguously identify them as compound-engineering commands: | Command | Description | |---------|-------------| -| `/workflows:brainstorm` | Explore requirements and approaches before planning | -| `/workflows:plan` | Create implementation plans | -| `/workflows:review` | Run comprehensive code reviews | -| `/workflows:work` | Execute work items systematically | -| `/workflows:compound` | Document solved problems to compound team knowledge | +| `/ce:brainstorm` | Explore requirements and approaches before planning | +| `/ce:plan` | Create implementation plans | +| `/ce:review` | Run comprehensive code reviews | +| `/ce:work` | Execute work items systematically | +| `/ce:compound` | Document solved problems to compound team knowledge | + +> **Deprecated aliases:** `/workflows:plan`, `/workflows:work`, `/workflows:review`, `/workflows:brainstorm`, `/workflows:compound` still work but show a deprecation warning. Use `ce:*` equivalents. ### Utility Commands @@ -85,19 +95,16 @@ Core workflow commands use `workflows:` prefix to avoid collisions with built-in | `/create-agent-skill` | Create or edit Claude Code skills | | `/generate_command` | Generate new slash commands | | `/heal-skill` | Fix skill documentation issues | +| `/sync` | Sync Claude Code config across machines | | `/report-bug` | Report a bug in the plugin | | `/reproduce-bug` | Reproduce bugs using logs and console | | `/resolve_parallel` | Resolve TODO comments in parallel | +| `/resolve_pr_parallel` | Resolve PR comments in parallel | | `/resolve_todo_parallel` | Resolve todos in parallel | | `/triage` | Triage and prioritize issues | | `/test-browser` | Run browser tests on PR-affected pages | -| `/test-xcode` | Build and test iOS apps on simulator | +| `/xcode-test` | Build and test iOS apps on simulator | | `/feature-video` | Record video walkthroughs and add to PR description | -| `/agent-native-audit` | Run comprehensive agent-native architecture review | -| `/deploy-docs` | Validate and prepare documentation for GitHub Pages | -| `/pr-comments-to-todos` | Fetch PR comments and convert to todo files | -| `/essay-outline` | Transform a brain dump into a story-structured essay outline | -| `/essay-edit` | Polish written essays with expert structural and line-level editing | ## Skills @@ -111,12 +118,13 @@ Core workflow commands use `workflows:` prefix to avoid collisions with built-in | Skill | Description | |-------|-------------| +| `andrew-kane-gem-writer` | Write Ruby gems following Andrew Kane's patterns | | `compound-docs` | Capture solved problems as categorized documentation | | `create-agent-skills` | Expert guidance for creating Claude Code skills | -| `fastapi-style` | Write Python/FastAPI code following opinionated best practices | +| `dhh-rails-style` | Write Ruby/Rails code in DHH's 37signals style | +| `dspy-ruby` | Build type-safe LLM applications with DSPy.rb | | `frontend-design` | Create production-grade frontend interfaces | -| `python-package-writer` | Write Python packages following production-ready patterns | -| `skill-creator` | Guide for creating effective Claude Code skills | + ### Content & Workflow @@ -127,12 +135,9 @@ Core workflow commands use `workflows:` prefix to avoid collisions with built-in | `every-style-editor` | Review copy for Every's style guide compliance | | `file-todos` | File-based todo tracking system | | `git-worktree` | Manage Git worktrees for parallel development | -| `jira-ticket-writer` | Draft and create Jira tickets with tone review and user approval | -| `john-voice` | Write content in John Lamb's authentic voice across all venues | +| `proof` | Create, edit, and share documents via Proof collaborative editor | | `resolve-pr-parallel` | Resolve PR review comments in parallel | | `setup` | Configure which review agents run for your project | -| `story-lens` | Evaluate prose quality using George Saunders's storytelling framework | -| `upstream-merge` | Incorporate upstream git changes while preserving local fork intent | ### Multi-Agent Orchestration @@ -152,12 +157,6 @@ Core workflow commands use `workflows:` prefix to avoid collisions with built-in |-------|-------------| | `agent-browser` | CLI-based browser automation using Vercel's agent-browser | -### Diagramming & Visualization - -| Skill | Description | -|-------|-------------| -| `excalidraw-png-export` | Create diagrams and flowcharts as PNG using Excalidraw MCP | - ### Image Generation | Skill | Description | @@ -186,10 +185,12 @@ Core workflow commands use `workflows:` prefix to avoid collisions with built-in - `resolve-library-id` - Find library ID for a framework/package - `get-library-docs` - Get documentation for a specific library -Supports 100+ frameworks including FastAPI, React, Next.js, Vue, Django, SQLAlchemy, and more. +Supports 100+ frameworks including Rails, React, Next.js, Vue, Django, Laravel, and more. MCP servers start automatically when the plugin is enabled. +**Authentication:** To avoid anonymous rate limits, set the `CONTEXT7_API_KEY` environment variable with your Context7 API key. The plugin passes this automatically via the `x-api-key` header. Without it, requests go unauthenticated and will quickly hit the anonymous quota limit. + ## Browser Automation This plugin uses **agent-browser CLI** for browser automation tasks. Install it globally: @@ -220,13 +221,16 @@ claude /plugin install compound-engineering "mcpServers": { "context7": { "type": "http", - "url": "https://mcp.context7.com/mcp" + "url": "https://mcp.context7.com/mcp", + "headers": { + "x-api-key": "${CONTEXT7_API_KEY:-}" + } } } } ``` -Or add it globally in `~/.claude/settings.json` for all projects. +Set `CONTEXT7_API_KEY` in your environment to authenticate. Or add it globally in `~/.claude/settings.json` for all projects. ## Version History diff --git a/plugins/compound-engineering/agents/research/git-history-analyzer.md b/plugins/compound-engineering/agents/research/git-history-analyzer.md index fca36ca..296e480 100644 --- a/plugins/compound-engineering/agents/research/git-history-analyzer.md +++ b/plugins/compound-engineering/agents/research/git-history-analyzer.md @@ -56,4 +56,4 @@ When analyzing, consider: Your insights should help developers understand not just what the code does, but why it evolved to its current state, informing better decisions for future changes. -Note that files in `docs/plans/` and `docs/solutions/` are compound-engineering pipeline artifacts created by `/workflows:plan`. They are intentional, permanent living documents — do not recommend their removal or characterize them as unnecessary. +Note that files in `docs/plans/` and `docs/solutions/` are compound-engineering pipeline artifacts created by `/ce:plan`. They are intentional, permanent living documents — do not recommend their removal or characterize them as unnecessary. diff --git a/plugins/compound-engineering/agents/research/learnings-researcher.md b/plugins/compound-engineering/agents/research/learnings-researcher.md index a53a260..a681242 100644 --- a/plugins/compound-engineering/agents/research/learnings-researcher.md +++ b/plugins/compound-engineering/agents/research/learnings-researcher.md @@ -1,7 +1,7 @@ --- name: learnings-researcher description: "Searches docs/solutions/ for relevant past solutions by frontmatter metadata. Use before implementing features or fixing problems to surface institutional knowledge and prevent repeated mistakes." -model: haiku +model: inherit --- @@ -257,7 +257,7 @@ Structure your findings as: ## Integration Points This agent is designed to be invoked by: -- `/workflows:plan` - To inform planning with institutional knowledge +- `/ce:plan` - To inform planning with institutional knowledge - `/deepen-plan` - To add depth with relevant learnings - Manual invocation before starting work on a feature diff --git a/plugins/compound-engineering/agents/review/code-simplicity-reviewer.md b/plugins/compound-engineering/agents/review/code-simplicity-reviewer.md index d7e01ff..0627822 100644 --- a/plugins/compound-engineering/agents/review/code-simplicity-reviewer.md +++ b/plugins/compound-engineering/agents/review/code-simplicity-reviewer.md @@ -48,7 +48,7 @@ When reviewing code, you will: - Eliminate extensibility points without clear use cases - Question generic solutions for specific problems - Remove "just in case" code - - Never flag `docs/plans/*.md` or `docs/solutions/*.md` for removal — these are compound-engineering pipeline artifacts created by `/workflows:plan` and used as living documents by `/workflows:work` + - Never flag `docs/plans/*.md` or `docs/solutions/*.md` for removal — these are compound-engineering pipeline artifacts created by `/ce:plan` and used as living documents by `/ce:work` 6. **Optimize for Readability**: - Prefer self-documenting code over comments diff --git a/plugins/compound-engineering/agents/workflow/every-style-editor.md b/plugins/compound-engineering/agents/workflow/every-style-editor.md deleted file mode 100644 index 061375d..0000000 --- a/plugins/compound-engineering/agents/workflow/every-style-editor.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -name: every-style-editor -description: "Reviews and edits text content to conform to Every's editorial style guide. Use when written content needs style compliance checks for headlines, punctuation, voice, and formatting." -tools: Task, Glob, Grep, LS, ExitPlanMode, Read, Edit, MultiEdit, Write, NotebookRead, NotebookEdit, WebFetch, TodoWrite, WebSearch -model: inherit ---- - -You are an expert copy editor specializing in Every's house style guide. Your role is to meticulously review text content and suggest edits to ensure compliance with Every's specific editorial standards. - -When reviewing content, you will: - -1. **Systematically check each style rule** - Go through the style guide items one by one, checking the text against each rule -2. **Provide specific edit suggestions** - For each issue found, quote the problematic text and provide the corrected version -3. **Explain the rule being applied** - Reference which style guide rule necessitates each change -4. **Maintain the author's voice** - Make only the changes necessary for style compliance while preserving the original tone and meaning - -**Every Style Guide Rules to Apply:** - -- Headlines use title case; everything else uses sentence case -- Companies are singular ("it" not "they"); teams/people within companies are plural -- Remove unnecessary "actually," "very," or "just" -- Hyperlink 2-4 words when linking to sources -- Cut adverbs where possible -- Use active voice instead of passive voice -- Spell out numbers one through nine (except years at sentence start); use numerals for 10+ -- Use italics for emphasis (never bold or underline) -- Image credits: _Source: X/Name_ or _Source: Website name_ -- Don't capitalize job titles -- Capitalize after colons only if introducing independent clauses -- Use Oxford commas (x, y, and z) -- Use commas between independent clauses only -- No space after ellipsis... -- Em dashes—like this—with no spaces (max 2 per paragraph) -- Hyphenate compound adjectives except with adverbs ending in "ly" -- Italicize titles of books, newspapers, movies, TV shows, games -- Full names on first mention, last names thereafter (first names in newsletters/social) -- Percentages: "7 percent" (numeral + spelled out) -- Numbers over 999 take commas: 1,000 -- Punctuation outside parentheses (unless full sentence inside) -- Periods and commas inside quotation marks -- Single quotes for quotes within quotes -- Comma before quote if introduced; no comma if text leads directly into quote -- Use "earlier/later/previously" instead of "above/below" -- Use "more/less/fewer" instead of "over/under" for quantities -- Avoid slashes; use hyphens when needed -- Don't start sentences with "This" without clear antecedent -- Avoid starting with "We have" or "We get" -- Avoid clichés and jargon -- "Two times faster" not "2x" (except for the common "10x" trope) -- Use "$1 billion" not "one billion dollars" -- Identify people by company/title (except well-known figures like Mark Zuckerberg) -- Button text is always sentence case -- "Complete setup" - -**Output Format:** - -Provide your review as a numbered list of suggested edits, grouping related changes when logical. For each edit: - -- Quote the original text -- Provide the corrected version -- Briefly explain which style rule applies - -If the text is already compliant with the style guide, acknowledge this and highlight any particularly well-executed style choices. - -Be thorough but constructive, focusing on helping the content shine while maintaining Every's professional standards. diff --git a/plugins/compound-engineering/commands/lfg.md b/plugins/compound-engineering/commands/lfg.md deleted file mode 100644 index 5d971fc..0000000 --- a/plugins/compound-engineering/commands/lfg.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: lfg -description: Full autonomous engineering workflow -argument-hint: "[feature description]" -disable-model-invocation: true ---- - -Run these slash commands in order. Do not do anything else. - -1. `/ralph-wiggum:ralph-loop "finish all slash commands" --completion-promise "DONE"` -2. `/workflows:plan $ARGUMENTS` -3. `/compound-engineering:deepen-plan` -4. `/workflows:work` -5. `/workflows:review` -6. `/compound-engineering:resolve_todo_parallel` -7. `/compound-engineering:test-browser` -8. `/compound-engineering:feature-video` -9. Output `DONE` when video is in PR - -Start with step 1 now. diff --git a/plugins/compound-engineering/skills/agent-browser/SKILL.md b/plugins/compound-engineering/skills/agent-browser/SKILL.md index 3ff264e..9c9879d 100644 --- a/plugins/compound-engineering/skills/agent-browser/SKILL.md +++ b/plugins/compound-engineering/skills/agent-browser/SKILL.md @@ -3,9 +3,9 @@ name: agent-browser description: Browser automation using Vercel's agent-browser CLI. Use when you need to interact with web pages, fill forms, take screenshots, or scrape data. Alternative to Playwright MCP - uses Bash commands with ref-based element selection. Triggers on "browse website", "fill form", "click button", "take screenshot", "scrape page", "web automation". --- -# agent-browser: CLI Browser Automation +# Browser Automation with agent-browser -Vercel's headless browser automation CLI designed for AI agents. Uses ref-based selection (@e1, @e2) from accessibility snapshots. +The CLI uses Chrome/Chromium via CDP directly. Install via `npm i -g agent-browser`, `brew install agent-browser`, or `cargo install agent-browser`. Run `agent-browser install` to download Chrome. ## Setup Check @@ -23,185 +23,627 @@ agent-browser install # Downloads Chromium ## Core Workflow -**The snapshot + ref pattern is optimal for LLMs:** +Every browser automation follows this pattern: -1. **Navigate** to URL -2. **Snapshot** to get interactive elements with refs -3. **Interact** using refs (@e1, @e2, etc.) -4. **Re-snapshot** after navigation or DOM changes +1. **Navigate**: `agent-browser open ` +2. **Snapshot**: `agent-browser snapshot -i` (get element refs like `@e1`, `@e2`) +3. **Interact**: Use refs to click, fill, select +4. **Re-snapshot**: After navigation or DOM changes, get fresh refs ```bash -# Step 1: Open URL -agent-browser open https://example.com - -# Step 2: Get interactive elements with refs -agent-browser snapshot -i --json - -# Step 3: Interact using refs -agent-browser click @e1 -agent-browser fill @e2 "search query" - -# Step 4: Re-snapshot after changes +agent-browser open https://example.com/form agent-browser snapshot -i -``` +# Output: @e1 [input type="email"], @e2 [input type="password"], @e3 [button] "Submit" -## Key Commands - -### Navigation - -```bash -agent-browser open # Navigate to URL -agent-browser back # Go back -agent-browser forward # Go forward -agent-browser reload # Reload page -agent-browser close # Close browser -``` - -### Snapshots (Essential for AI) - -```bash -agent-browser snapshot # Full accessibility tree -agent-browser snapshot -i # Interactive elements only (recommended) -agent-browser snapshot -i --json # JSON output for parsing -agent-browser snapshot -c # Compact (remove empty elements) -agent-browser snapshot -d 3 # Limit depth -``` - -### Interactions - -```bash -agent-browser click @e1 # Click element -agent-browser dblclick @e1 # Double-click -agent-browser fill @e1 "text" # Clear and fill input -agent-browser type @e1 "text" # Type without clearing -agent-browser press Enter # Press key -agent-browser hover @e1 # Hover element -agent-browser check @e1 # Check checkbox -agent-browser uncheck @e1 # Uncheck checkbox -agent-browser select @e1 "option" # Select dropdown option -agent-browser scroll down 500 # Scroll (up/down/left/right) -agent-browser scrollintoview @e1 # Scroll element into view -``` - -### Get Information - -```bash -agent-browser get text @e1 # Get element text -agent-browser get html @e1 # Get element HTML -agent-browser get value @e1 # Get input value -agent-browser get attr href @e1 # Get attribute -agent-browser get title # Get page title -agent-browser get url # Get current URL -agent-browser get count "button" # Count matching elements -``` - -### Screenshots & PDFs - -```bash -agent-browser screenshot # Viewport screenshot -agent-browser screenshot --full # Full page -agent-browser screenshot output.png # Save to file -agent-browser screenshot --full output.png # Full page to file -agent-browser pdf output.pdf # Save as PDF -``` - -### Wait - -```bash -agent-browser wait @e1 # Wait for element -agent-browser wait 2000 # Wait milliseconds -agent-browser wait "text" # Wait for text to appear -``` - -## Semantic Locators (Alternative to Refs) - -```bash -agent-browser find role button click --name "Submit" -agent-browser find text "Sign up" click -agent-browser find label "Email" fill "user@example.com" -agent-browser find placeholder "Search..." fill "query" -``` - -## Sessions (Parallel Browsers) - -```bash -# Run multiple independent browser sessions -agent-browser --session browser1 open https://site1.com -agent-browser --session browser2 open https://site2.com - -# List active sessions -agent-browser session list -``` - -## Examples - -### Login Flow - -```bash -agent-browser open https://app.example.com/login -agent-browser snapshot -i -# Output shows: textbox "Email" [ref=e1], textbox "Password" [ref=e2], button "Sign in" [ref=e3] agent-browser fill @e1 "user@example.com" agent-browser fill @e2 "password123" agent-browser click @e3 -agent-browser wait 2000 -agent-browser snapshot -i # Verify logged in +agent-browser wait --load networkidle +agent-browser snapshot -i # Check result ``` -### Search and Extract +## Command Chaining + +Commands can be chained with `&&` in a single shell invocation. The browser persists between commands via a background daemon, so chaining is safe and more efficient than separate calls. ```bash -agent-browser open https://news.ycombinator.com -agent-browser snapshot -i --json -# Parse JSON to find story links -agent-browser get text @e12 # Get headline text -agent-browser click @e12 # Click to open story +# Chain open + wait + snapshot in one call +agent-browser open https://example.com && agent-browser wait --load networkidle && agent-browser snapshot -i + +# Chain multiple interactions +agent-browser fill @e1 "user@example.com" && agent-browser fill @e2 "password123" && agent-browser click @e3 + +# Navigate and capture +agent-browser open https://example.com && agent-browser wait --load networkidle && agent-browser screenshot page.png ``` -### Form Filling +**When to chain:** Use `&&` when you don't need to read the output of an intermediate command before proceeding (e.g., open + wait + screenshot). Run commands separately when you need to parse the output first (e.g., snapshot to discover refs, then interact using those refs). + +## Handling Authentication + +When automating a site that requires login, choose the approach that fits: + +**Option 1: Import auth from the user's browser (fastest for one-off tasks)** ```bash -agent-browser open https://forms.example.com +# Connect to the user's running Chrome (they're already logged in) +agent-browser --auto-connect state save ./auth.json +# Use that auth state +agent-browser --state ./auth.json open https://app.example.com/dashboard +``` + +State files contain session tokens in plaintext -- add to `.gitignore` and delete when no longer needed. Set `AGENT_BROWSER_ENCRYPTION_KEY` for encryption at rest. + +**Option 2: Persistent profile (simplest for recurring tasks)** + +```bash +# First run: login manually or via automation +agent-browser --profile ~/.myapp open https://app.example.com/login +# ... fill credentials, submit ... + +# All future runs: already authenticated +agent-browser --profile ~/.myapp open https://app.example.com/dashboard +``` + +**Option 3: Session name (auto-save/restore cookies + localStorage)** + +```bash +agent-browser --session-name myapp open https://app.example.com/login +# ... login flow ... +agent-browser close # State auto-saved + +# Next time: state auto-restored +agent-browser --session-name myapp open https://app.example.com/dashboard +``` + +**Option 4: Auth vault (credentials stored encrypted, login by name)** + +```bash +echo "$PASSWORD" | agent-browser auth save myapp --url https://app.example.com/login --username user --password-stdin +agent-browser auth login myapp +``` + +**Option 5: State file (manual save/load)** + +```bash +# After logging in: +agent-browser state save ./auth.json +# In a future session: +agent-browser state load ./auth.json +agent-browser open https://app.example.com/dashboard +``` + +See [references/authentication.md](references/authentication.md) for OAuth, 2FA, cookie-based auth, and token refresh patterns. + +## Essential Commands + +```bash +# Navigation +agent-browser open # Navigate (aliases: goto, navigate) +agent-browser close # Close browser + +# Snapshot +agent-browser snapshot -i # Interactive elements with refs (recommended) +agent-browser snapshot -i -C # Include cursor-interactive elements (divs with onclick, cursor:pointer) +agent-browser snapshot -s "#selector" # Scope to CSS selector + +# Interaction (use @refs from snapshot) +agent-browser click @e1 # Click element +agent-browser click @e1 --new-tab # Click and open in new tab +agent-browser fill @e2 "text" # Clear and type text +agent-browser type @e2 "text" # Type without clearing +agent-browser select @e1 "option" # Select dropdown option +agent-browser check @e1 # Check checkbox +agent-browser press Enter # Press key +agent-browser keyboard type "text" # Type at current focus (no selector) +agent-browser keyboard inserttext "text" # Insert without key events +agent-browser scroll down 500 # Scroll page +agent-browser scroll down 500 --selector "div.content" # Scroll within a specific container + +# Get information +agent-browser get text @e1 # Get element text +agent-browser get url # Get current URL +agent-browser get title # Get page title +agent-browser get cdp-url # Get CDP WebSocket URL + +# Wait +agent-browser wait @e1 # Wait for element +agent-browser wait --load networkidle # Wait for network idle +agent-browser wait --url "**/page" # Wait for URL pattern +agent-browser wait 2000 # Wait milliseconds +agent-browser wait --text "Welcome" # Wait for text to appear (substring match) +agent-browser wait --fn "!document.body.innerText.includes('Loading...')" # Wait for text to disappear +agent-browser wait "#spinner" --state hidden # Wait for element to disappear + +# Downloads +agent-browser download @e1 ./file.pdf # Click element to trigger download +agent-browser wait --download ./output.zip # Wait for any download to complete +agent-browser --download-path ./downloads open # Set default download directory + +# Viewport & Device Emulation +agent-browser set viewport 1920 1080 # Set viewport size (default: 1280x720) +agent-browser set viewport 1920 1080 2 # 2x retina (same CSS size, higher res screenshots) +agent-browser set device "iPhone 14" # Emulate device (viewport + user agent) + +# Capture +agent-browser screenshot # Screenshot to temp dir +agent-browser screenshot --full # Full page screenshot +agent-browser screenshot --annotate # Annotated screenshot with numbered element labels +agent-browser screenshot --screenshot-dir ./shots # Save to custom directory +agent-browser screenshot --screenshot-format jpeg --screenshot-quality 80 +agent-browser pdf output.pdf # Save as PDF + +# Clipboard +agent-browser clipboard read # Read text from clipboard +agent-browser clipboard write "Hello, World!" # Write text to clipboard +agent-browser clipboard copy # Copy current selection +agent-browser clipboard paste # Paste from clipboard + +# Diff (compare page states) +agent-browser diff snapshot # Compare current vs last snapshot +agent-browser diff snapshot --baseline before.txt # Compare current vs saved file +agent-browser diff screenshot --baseline before.png # Visual pixel diff +agent-browser diff url # Compare two pages +agent-browser diff url --wait-until networkidle # Custom wait strategy +agent-browser diff url --selector "#main" # Scope to element +``` + +## Common Patterns + +### Form Submission + +```bash +agent-browser open https://example.com/signup agent-browser snapshot -i -agent-browser fill @e1 "John Doe" -agent-browser fill @e2 "john@example.com" -agent-browser select @e3 "United States" -agent-browser check @e4 # Agree to terms -agent-browser click @e5 # Submit button -agent-browser screenshot confirmation.png +agent-browser fill @e1 "Jane Doe" +agent-browser fill @e2 "jane@example.com" +agent-browser select @e3 "California" +agent-browser check @e4 +agent-browser click @e5 +agent-browser wait --load networkidle ``` -### Debug Mode +### Authentication with Auth Vault (Recommended) ```bash -# Run with visible browser window -agent-browser --headed open https://example.com -agent-browser --headed snapshot -i -agent-browser --headed click @e1 +# Save credentials once (encrypted with AGENT_BROWSER_ENCRYPTION_KEY) +# Recommended: pipe password via stdin to avoid shell history exposure +echo "pass" | agent-browser auth save github --url https://github.com/login --username user --password-stdin + +# Login using saved profile (LLM never sees password) +agent-browser auth login github + +# List/show/delete profiles +agent-browser auth list +agent-browser auth show github +agent-browser auth delete github ``` -## JSON Output - -Add `--json` for structured output: +### Authentication with State Persistence ```bash +# Login once and save state +agent-browser open https://app.example.com/login +agent-browser snapshot -i +agent-browser fill @e1 "$USERNAME" +agent-browser fill @e2 "$PASSWORD" +agent-browser click @e3 +agent-browser wait --url "**/dashboard" +agent-browser state save auth.json + +# Reuse in future sessions +agent-browser state load auth.json +agent-browser open https://app.example.com/dashboard +``` + +### Session Persistence + +```bash +# Auto-save/restore cookies and localStorage across browser restarts +agent-browser --session-name myapp open https://app.example.com/login +# ... login flow ... +agent-browser close # State auto-saved to ~/.agent-browser/sessions/ + +# Next time, state is auto-loaded +agent-browser --session-name myapp open https://app.example.com/dashboard + +# Encrypt state at rest +export AGENT_BROWSER_ENCRYPTION_KEY=$(openssl rand -hex 32) +agent-browser --session-name secure open https://app.example.com + +# Manage saved states +agent-browser state list +agent-browser state show myapp-default.json +agent-browser state clear myapp +agent-browser state clean --older-than 7 +``` + +### Data Extraction + +```bash +agent-browser open https://example.com/products +agent-browser snapshot -i +agent-browser get text @e5 # Get specific element text +agent-browser get text body > page.txt # Get all page text + +# JSON output for parsing agent-browser snapshot -i --json +agent-browser get text @e1 --json ``` -Returns: +### Parallel Sessions + +```bash +agent-browser --session site1 open https://site-a.com +agent-browser --session site2 open https://site-b.com + +agent-browser --session site1 snapshot -i +agent-browser --session site2 snapshot -i + +agent-browser session list +``` + +### Connect to Existing Chrome + +```bash +# Auto-discover running Chrome with remote debugging enabled +agent-browser --auto-connect open https://example.com +agent-browser --auto-connect snapshot + +# Or with explicit CDP port +agent-browser --cdp 9222 snapshot +``` + +### Color Scheme (Dark Mode) + +```bash +# Persistent dark mode via flag (applies to all pages and new tabs) +agent-browser --color-scheme dark open https://example.com + +# Or via environment variable +AGENT_BROWSER_COLOR_SCHEME=dark agent-browser open https://example.com + +# Or set during session (persists for subsequent commands) +agent-browser set media dark +``` + +### Viewport & Responsive Testing + +```bash +# Set a custom viewport size (default is 1280x720) +agent-browser set viewport 1920 1080 +agent-browser screenshot desktop.png + +# Test mobile-width layout +agent-browser set viewport 375 812 +agent-browser screenshot mobile.png + +# Retina/HiDPI: same CSS layout at 2x pixel density +# Screenshots stay at logical viewport size, but content renders at higher DPI +agent-browser set viewport 1920 1080 2 +agent-browser screenshot retina.png + +# Device emulation (sets viewport + user agent in one step) +agent-browser set device "iPhone 14" +agent-browser screenshot device.png +``` + +The `scale` parameter (3rd argument) sets `window.devicePixelRatio` without changing CSS layout. Use it when testing retina rendering or capturing higher-resolution screenshots. + +### Visual Browser (Debugging) + +```bash +agent-browser --headed open https://example.com +agent-browser highlight @e1 # Highlight element +agent-browser inspect # Open Chrome DevTools for the active page +agent-browser record start demo.webm # Record session +agent-browser profiler start # Start Chrome DevTools profiling +agent-browser profiler stop trace.json # Stop and save profile (path optional) +``` + +Use `AGENT_BROWSER_HEADED=1` to enable headed mode via environment variable. Browser extensions work in both headed and headless mode. + +### Local Files (PDFs, HTML) + +```bash +# Open local files with file:// URLs +agent-browser --allow-file-access open file:///path/to/document.pdf +agent-browser --allow-file-access open file:///path/to/page.html +agent-browser screenshot output.png +``` + +### iOS Simulator (Mobile Safari) + +```bash +# List available iOS simulators +agent-browser device list + +# Launch Safari on a specific device +agent-browser -p ios --device "iPhone 16 Pro" open https://example.com + +# Same workflow as desktop - snapshot, interact, re-snapshot +agent-browser -p ios snapshot -i +agent-browser -p ios tap @e1 # Tap (alias for click) +agent-browser -p ios fill @e2 "text" +agent-browser -p ios swipe up # Mobile-specific gesture + +# Take screenshot +agent-browser -p ios screenshot mobile.png + +# Close session (shuts down simulator) +agent-browser -p ios close +``` + +**Requirements:** macOS with Xcode, Appium (`npm install -g appium && appium driver install xcuitest`) + +**Real devices:** Works with physical iOS devices if pre-configured. Use `--device ""` where UDID is from `xcrun xctrace list devices`. + +## Security + +All security features are opt-in. By default, agent-browser imposes no restrictions on navigation, actions, or output. + +### Content Boundaries (Recommended for AI Agents) + +Enable `--content-boundaries` to wrap page-sourced output in markers that help LLMs distinguish tool output from untrusted page content: + +```bash +export AGENT_BROWSER_CONTENT_BOUNDARIES=1 +agent-browser snapshot +# Output: +# --- AGENT_BROWSER_PAGE_CONTENT nonce= origin=https://example.com --- +# [accessibility tree] +# --- END_AGENT_BROWSER_PAGE_CONTENT nonce= --- +``` + +### Domain Allowlist + +Restrict navigation to trusted domains. Wildcards like `*.example.com` also match the bare domain `example.com`. Sub-resource requests, WebSocket, and EventSource connections to non-allowed domains are also blocked. Include CDN domains your target pages depend on: + +```bash +export AGENT_BROWSER_ALLOWED_DOMAINS="example.com,*.example.com" +agent-browser open https://example.com # OK +agent-browser open https://malicious.com # Blocked +``` + +### Action Policy + +Use a policy file to gate destructive actions: + +```bash +export AGENT_BROWSER_ACTION_POLICY=./policy.json +``` + +Example `policy.json`: + +```json +{ "default": "deny", "allow": ["navigate", "snapshot", "click", "scroll", "wait", "get"] } +``` + +Auth vault operations (`auth login`, etc.) bypass action policy but domain allowlist still applies. + +### Output Limits + +Prevent context flooding from large pages: + +```bash +export AGENT_BROWSER_MAX_OUTPUT=50000 +``` + +## Diffing (Verifying Changes) + +Use `diff snapshot` after performing an action to verify it had the intended effect. This compares the current accessibility tree against the last snapshot taken in the session. + +```bash +# Typical workflow: snapshot -> action -> diff +agent-browser snapshot -i # Take baseline snapshot +agent-browser click @e2 # Perform action +agent-browser diff snapshot # See what changed (auto-compares to last snapshot) +``` + +For visual regression testing or monitoring: + +```bash +# Save a baseline screenshot, then compare later +agent-browser screenshot baseline.png +# ... time passes or changes are made ... +agent-browser diff screenshot --baseline baseline.png + +# Compare staging vs production +agent-browser diff url https://staging.example.com https://prod.example.com --screenshot +``` + +`diff snapshot` output uses `+` for additions and `-` for removals, similar to git diff. `diff screenshot` produces a diff image with changed pixels highlighted in red, plus a mismatch percentage. + +## Timeouts and Slow Pages + +The default timeout is 25 seconds. This can be overridden with the `AGENT_BROWSER_DEFAULT_TIMEOUT` environment variable (value in milliseconds). For slow websites or large pages, use explicit waits instead of relying on the default timeout: + +```bash +# Wait for network activity to settle (best for slow pages) +agent-browser wait --load networkidle + +# Wait for a specific element to appear +agent-browser wait "#content" +agent-browser wait @e1 + +# Wait for a specific URL pattern (useful after redirects) +agent-browser wait --url "**/dashboard" + +# Wait for a JavaScript condition +agent-browser wait --fn "document.readyState === 'complete'" + +# Wait a fixed duration (milliseconds) as a last resort +agent-browser wait 5000 +``` + +When dealing with consistently slow websites, use `wait --load networkidle` after `open` to ensure the page is fully loaded before taking a snapshot. If a specific element is slow to render, wait for it directly with `wait ` or `wait @ref`. + +## Session Management and Cleanup + +When running multiple agents or automations concurrently, always use named sessions to avoid conflicts: + +```bash +# Each agent gets its own isolated session +agent-browser --session agent1 open site-a.com +agent-browser --session agent2 open site-b.com + +# Check active sessions +agent-browser session list +``` + +Always close your browser session when done to avoid leaked processes: + +```bash +agent-browser close # Close default session +agent-browser --session agent1 close # Close specific session +``` + +If a previous session was not closed properly, the daemon may still be running. Use `agent-browser close` to clean it up before starting new work. + +To auto-shutdown the daemon after a period of inactivity (useful for ephemeral/CI environments): + +```bash +AGENT_BROWSER_IDLE_TIMEOUT_MS=60000 agent-browser open example.com +``` + +## Ref Lifecycle (Important) + +Refs (`@e1`, `@e2`, etc.) are invalidated when the page changes. Always re-snapshot after: + +- Clicking links or buttons that navigate +- Form submissions +- Dynamic content loading (dropdowns, modals) + +```bash +agent-browser click @e5 # Navigates to new page +agent-browser snapshot -i # MUST re-snapshot +agent-browser click @e1 # Use new refs +``` + +## Annotated Screenshots (Vision Mode) + +Use `--annotate` to take a screenshot with numbered labels overlaid on interactive elements. Each label `[N]` maps to ref `@eN`. This also caches refs, so you can interact with elements immediately without a separate snapshot. + +```bash +agent-browser screenshot --annotate +# Output includes the image path and a legend: +# [1] @e1 button "Submit" +# [2] @e2 link "Home" +# [3] @e3 textbox "Email" +agent-browser click @e2 # Click using ref from annotated screenshot +``` + +Use annotated screenshots when: + +- The page has unlabeled icon buttons or visual-only elements +- You need to verify visual layout or styling +- Canvas or chart elements are present (invisible to text snapshots) +- You need spatial reasoning about element positions + +## Semantic Locators (Alternative to Refs) + +When refs are unavailable or unreliable, use semantic locators: + +```bash +agent-browser find text "Sign In" click +agent-browser find label "Email" fill "user@test.com" +agent-browser find role button click --name "Submit" +agent-browser find placeholder "Search" type "query" +agent-browser find testid "submit-btn" click +``` + +## JavaScript Evaluation (eval) + +Use `eval` to run JavaScript in the browser context. **Shell quoting can corrupt complex expressions** -- use `--stdin` or `-b` to avoid issues. + +```bash +# Simple expressions work with regular quoting +agent-browser eval 'document.title' +agent-browser eval 'document.querySelectorAll("img").length' + +# Complex JS: use --stdin with heredoc (RECOMMENDED) +agent-browser eval --stdin <<'EVALEOF' +JSON.stringify( + Array.from(document.querySelectorAll("img")) + .filter(i => !i.alt) + .map(i => ({ src: i.src.split("/").pop(), width: i.width })) +) +EVALEOF + +# Alternative: base64 encoding (avoids all shell escaping issues) +agent-browser eval -b "$(echo -n 'Array.from(document.querySelectorAll("a")).map(a => a.href)' | base64)" +``` + +**Why this matters:** When the shell processes your command, inner double quotes, `!` characters (history expansion), backticks, and `$()` can all corrupt the JavaScript before it reaches agent-browser. The `--stdin` and `-b` flags bypass shell interpretation entirely. + +**Rules of thumb:** + +- Single-line, no nested quotes -> regular `eval 'expression'` with single quotes is fine +- Nested quotes, arrow functions, template literals, or multiline -> use `eval --stdin <<'EVALEOF'` +- Programmatic/generated scripts -> use `eval -b` with base64 + +## Configuration File + +Create `agent-browser.json` in the project root for persistent settings: + ```json { - "success": true, - "data": { - "refs": { - "e1": {"name": "Submit", "role": "button"}, - "e2": {"name": "Email", "role": "textbox"} - }, - "snapshot": "- button \"Submit\" [ref=e1]\n- textbox \"Email\" [ref=e2]" - } + "headed": true, + "proxy": "http://localhost:8080", + "profile": "./browser-data" } ``` +Priority (lowest to highest): `~/.agent-browser/config.json` < `./agent-browser.json` < env vars < CLI flags. Use `--config ` or `AGENT_BROWSER_CONFIG` env var for a custom config file (exits with error if missing/invalid). All CLI options map to camelCase keys (e.g., `--executable-path` -> `"executablePath"`). Boolean flags accept `true`/`false` values (e.g., `--headed false` overrides config). Extensions from user and project configs are merged, not replaced. + +## Browser Engine Selection + +Use `--engine` to choose a local browser engine. The default is `chrome`. + +```bash +# Use Lightpanda (fast headless browser, requires separate install) +agent-browser --engine lightpanda open example.com + +# Via environment variable +export AGENT_BROWSER_ENGINE=lightpanda +agent-browser open example.com + +# With custom binary path +agent-browser --engine lightpanda --executable-path /path/to/lightpanda open example.com +``` + +Supported engines: +- `chrome` (default) -- Chrome/Chromium via CDP +- `lightpanda` -- Lightpanda headless browser via CDP (10x faster, 10x less memory than Chrome) + +Lightpanda does not support `--extension`, `--profile`, `--state`, or `--allow-file-access`. Install Lightpanda from https://lightpanda.io/docs/open-source/installation. + +## Deep-Dive Documentation + +| Reference | When to Use | +| -------------------------------------------------------------------- | --------------------------------------------------------- | +| [references/commands.md](references/commands.md) | Full command reference with all options | +| [references/snapshot-refs.md](references/snapshot-refs.md) | Ref lifecycle, invalidation rules, troubleshooting | +| [references/session-management.md](references/session-management.md) | Parallel sessions, state persistence, concurrent scraping | +| [references/authentication.md](references/authentication.md) | Login flows, OAuth, 2FA handling, state reuse | +| [references/video-recording.md](references/video-recording.md) | Recording workflows for debugging and documentation | +| [references/profiling.md](references/profiling.md) | Chrome DevTools profiling for performance analysis | +| [references/proxy-support.md](references/proxy-support.md) | Proxy configuration, geo-testing, rotating proxies | + +## Ready-to-Use Templates + +| Template | Description | +| ------------------------------------------------------------------------ | ----------------------------------- | +| [templates/form-automation.sh](templates/form-automation.sh) | Form filling with validation | +| [templates/authenticated-session.sh](templates/authenticated-session.sh) | Login once, reuse state | +| [templates/capture-workflow.sh](templates/capture-workflow.sh) | Content extraction with screenshots | + +```bash +./templates/form-automation.sh https://example.com/form +./templates/authenticated-session.sh https://app.example.com/login +./templates/capture-workflow.sh https://example.com ./output +``` + ## vs Playwright MCP | Feature | agent-browser (CLI) | Playwright MCP | diff --git a/plugins/compound-engineering/skills/agent-browser/references/authentication.md b/plugins/compound-engineering/skills/agent-browser/references/authentication.md new file mode 100644 index 0000000..cb300ce --- /dev/null +++ b/plugins/compound-engineering/skills/agent-browser/references/authentication.md @@ -0,0 +1,303 @@ +# Authentication Patterns + +Login flows, session persistence, OAuth, 2FA, and authenticated browsing. + +**Related**: [commands.md](commands.md) for full command reference, [SKILL.md](../SKILL.md) for quick start. + +## Contents + +- [Import Auth from Your Browser](#import-auth-from-your-browser) +- [Persistent Profiles](#persistent-profiles) +- [Session Persistence](#session-persistence) +- [Basic Login Flow](#basic-login-flow) +- [Saving Authentication State](#saving-authentication-state) +- [Restoring Authentication](#restoring-authentication) +- [OAuth / SSO Flows](#oauth--sso-flows) +- [Two-Factor Authentication](#two-factor-authentication) +- [HTTP Basic Auth](#http-basic-auth) +- [Cookie-Based Auth](#cookie-based-auth) +- [Token Refresh Handling](#token-refresh-handling) +- [Security Best Practices](#security-best-practices) + +## Import Auth from Your Browser + +The fastest way to authenticate is to reuse cookies from a Chrome session you are already logged into. + +**Step 1: Start Chrome with remote debugging** + +```bash +# macOS +"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome" --remote-debugging-port=9222 + +# Linux +google-chrome --remote-debugging-port=9222 + +# Windows +"C:\Program Files\Google\Chrome\Application\chrome.exe" --remote-debugging-port=9222 +``` + +Log in to your target site(s) in this Chrome window as you normally would. + +> **Security note:** `--remote-debugging-port` exposes full browser control on localhost. Any local process can connect and read cookies, execute JS, etc. Only use on trusted machines and close Chrome when done. + +**Step 2: Grab the auth state** + +```bash +# Auto-discover the running Chrome and save its cookies + localStorage +agent-browser --auto-connect state save ./my-auth.json +``` + +**Step 3: Reuse in automation** + +```bash +# Load auth at launch +agent-browser --state ./my-auth.json open https://app.example.com/dashboard + +# Or load into an existing session +agent-browser state load ./my-auth.json +agent-browser open https://app.example.com/dashboard +``` + +This works for any site, including those with complex OAuth flows, SSO, or 2FA -- as long as Chrome already has valid session cookies. + +> **Security note:** State files contain session tokens in plaintext. Add them to `.gitignore`, delete when no longer needed, and set `AGENT_BROWSER_ENCRYPTION_KEY` for encryption at rest. See [Security Best Practices](#security-best-practices). + +**Tip:** Combine with `--session-name` so the imported auth auto-persists across restarts: + +```bash +agent-browser --session-name myapp state load ./my-auth.json +# From now on, state is auto-saved/restored for "myapp" +``` + +## Persistent Profiles + +Use `--profile` to point agent-browser at a Chrome user data directory. This persists everything (cookies, IndexedDB, service workers, cache) across browser restarts without explicit save/load: + +```bash +# First run: login once +agent-browser --profile ~/.myapp-profile open https://app.example.com/login +# ... complete login flow ... + +# All subsequent runs: already authenticated +agent-browser --profile ~/.myapp-profile open https://app.example.com/dashboard +``` + +Use different paths for different projects or test users: + +```bash +agent-browser --profile ~/.profiles/admin open https://app.example.com +agent-browser --profile ~/.profiles/viewer open https://app.example.com +``` + +Or set via environment variable: + +```bash +export AGENT_BROWSER_PROFILE=~/.myapp-profile +agent-browser open https://app.example.com/dashboard +``` + +## Session Persistence + +Use `--session-name` to auto-save and restore cookies + localStorage by name, without managing files: + +```bash +# Auto-saves state on close, auto-restores on next launch +agent-browser --session-name twitter open https://twitter.com +# ... login flow ... +agent-browser close # state saved to ~/.agent-browser/sessions/ + +# Next time: state is automatically restored +agent-browser --session-name twitter open https://twitter.com +``` + +Encrypt state at rest: + +```bash +export AGENT_BROWSER_ENCRYPTION_KEY=$(openssl rand -hex 32) +agent-browser --session-name secure open https://app.example.com +``` + +## Basic Login Flow + +```bash +# Navigate to login page +agent-browser open https://app.example.com/login +agent-browser wait --load networkidle + +# Get form elements +agent-browser snapshot -i +# Output: @e1 [input type="email"], @e2 [input type="password"], @e3 [button] "Sign In" + +# Fill credentials +agent-browser fill @e1 "user@example.com" +agent-browser fill @e2 "password123" + +# Submit +agent-browser click @e3 +agent-browser wait --load networkidle + +# Verify login succeeded +agent-browser get url # Should be dashboard, not login +``` + +## Saving Authentication State + +After logging in, save state for reuse: + +```bash +# Login first (see above) +agent-browser open https://app.example.com/login +agent-browser snapshot -i +agent-browser fill @e1 "user@example.com" +agent-browser fill @e2 "password123" +agent-browser click @e3 +agent-browser wait --url "**/dashboard" + +# Save authenticated state +agent-browser state save ./auth-state.json +``` + +## Restoring Authentication + +Skip login by loading saved state: + +```bash +# Load saved auth state +agent-browser state load ./auth-state.json + +# Navigate directly to protected page +agent-browser open https://app.example.com/dashboard + +# Verify authenticated +agent-browser snapshot -i +``` + +## OAuth / SSO Flows + +For OAuth redirects: + +```bash +# Start OAuth flow +agent-browser open https://app.example.com/auth/google + +# Handle redirects automatically +agent-browser wait --url "**/accounts.google.com**" +agent-browser snapshot -i + +# Fill Google credentials +agent-browser fill @e1 "user@gmail.com" +agent-browser click @e2 # Next button +agent-browser wait 2000 +agent-browser snapshot -i +agent-browser fill @e3 "password" +agent-browser click @e4 # Sign in + +# Wait for redirect back +agent-browser wait --url "**/app.example.com**" +agent-browser state save ./oauth-state.json +``` + +## Two-Factor Authentication + +Handle 2FA with manual intervention: + +```bash +# Login with credentials +agent-browser open https://app.example.com/login --headed # Show browser +agent-browser snapshot -i +agent-browser fill @e1 "user@example.com" +agent-browser fill @e2 "password123" +agent-browser click @e3 + +# Wait for user to complete 2FA manually +echo "Complete 2FA in the browser window..." +agent-browser wait --url "**/dashboard" --timeout 120000 + +# Save state after 2FA +agent-browser state save ./2fa-state.json +``` + +## HTTP Basic Auth + +For sites using HTTP Basic Authentication: + +```bash +# Set credentials before navigation +agent-browser set credentials username password + +# Navigate to protected resource +agent-browser open https://protected.example.com/api +``` + +## Cookie-Based Auth + +Manually set authentication cookies: + +```bash +# Set auth cookie +agent-browser cookies set session_token "abc123xyz" + +# Navigate to protected page +agent-browser open https://app.example.com/dashboard +``` + +## Token Refresh Handling + +For sessions with expiring tokens: + +```bash +#!/bin/bash +# Wrapper that handles token refresh + +STATE_FILE="./auth-state.json" + +# Try loading existing state +if [[ -f "$STATE_FILE" ]]; then + agent-browser state load "$STATE_FILE" + agent-browser open https://app.example.com/dashboard + + # Check if session is still valid + URL=$(agent-browser get url) + if [[ "$URL" == *"/login"* ]]; then + echo "Session expired, re-authenticating..." + # Perform fresh login + agent-browser snapshot -i + agent-browser fill @e1 "$USERNAME" + agent-browser fill @e2 "$PASSWORD" + agent-browser click @e3 + agent-browser wait --url "**/dashboard" + agent-browser state save "$STATE_FILE" + fi +else + # First-time login + agent-browser open https://app.example.com/login + # ... login flow ... +fi +``` + +## Security Best Practices + +1. **Never commit state files** - They contain session tokens + ```bash + echo "*.auth-state.json" >> .gitignore + ``` + +2. **Use environment variables for credentials** + ```bash + agent-browser fill @e1 "$APP_USERNAME" + agent-browser fill @e2 "$APP_PASSWORD" + ``` + +3. **Clean up after automation** + ```bash + agent-browser cookies clear + rm -f ./auth-state.json + ``` + +4. **Use short-lived sessions for CI/CD** + ```bash + # Don't persist state in CI + agent-browser open https://app.example.com/login + # ... login and perform actions ... + agent-browser close # Session ends, nothing persisted + ``` diff --git a/plugins/compound-engineering/skills/agent-browser/references/commands.md b/plugins/compound-engineering/skills/agent-browser/references/commands.md new file mode 100644 index 0000000..383a748 --- /dev/null +++ b/plugins/compound-engineering/skills/agent-browser/references/commands.md @@ -0,0 +1,266 @@ +# Command Reference + +Complete reference for all agent-browser commands. For quick start and common patterns, see SKILL.md. + +## Navigation + +```bash +agent-browser open # Navigate to URL (aliases: goto, navigate) + # Supports: https://, http://, file://, about:, data:// + # Auto-prepends https:// if no protocol given +agent-browser back # Go back +agent-browser forward # Go forward +agent-browser reload # Reload page +agent-browser close # Close browser (aliases: quit, exit) +agent-browser connect 9222 # Connect to browser via CDP port +``` + +## Snapshot (page analysis) + +```bash +agent-browser snapshot # Full accessibility tree +agent-browser snapshot -i # Interactive elements only (recommended) +agent-browser snapshot -c # Compact output +agent-browser snapshot -d 3 # Limit depth to 3 +agent-browser snapshot -s "#main" # Scope to CSS selector +``` + +## Interactions (use @refs from snapshot) + +```bash +agent-browser click @e1 # Click +agent-browser click @e1 --new-tab # Click and open in new tab +agent-browser dblclick @e1 # Double-click +agent-browser focus @e1 # Focus element +agent-browser fill @e2 "text" # Clear and type +agent-browser type @e2 "text" # Type without clearing +agent-browser press Enter # Press key (alias: key) +agent-browser press Control+a # Key combination +agent-browser keydown Shift # Hold key down +agent-browser keyup Shift # Release key +agent-browser hover @e1 # Hover +agent-browser check @e1 # Check checkbox +agent-browser uncheck @e1 # Uncheck checkbox +agent-browser select @e1 "value" # Select dropdown option +agent-browser select @e1 "a" "b" # Select multiple options +agent-browser scroll down 500 # Scroll page (default: down 300px) +agent-browser scrollintoview @e1 # Scroll element into view (alias: scrollinto) +agent-browser drag @e1 @e2 # Drag and drop +agent-browser upload @e1 file.pdf # Upload files +``` + +## Get Information + +```bash +agent-browser get text @e1 # Get element text +agent-browser get html @e1 # Get innerHTML +agent-browser get value @e1 # Get input value +agent-browser get attr @e1 href # Get attribute +agent-browser get title # Get page title +agent-browser get url # Get current URL +agent-browser get cdp-url # Get CDP WebSocket URL +agent-browser get count ".item" # Count matching elements +agent-browser get box @e1 # Get bounding box +agent-browser get styles @e1 # Get computed styles (font, color, bg, etc.) +``` + +## Check State + +```bash +agent-browser is visible @e1 # Check if visible +agent-browser is enabled @e1 # Check if enabled +agent-browser is checked @e1 # Check if checked +``` + +## Screenshots and PDF + +```bash +agent-browser screenshot # Save to temporary directory +agent-browser screenshot path.png # Save to specific path +agent-browser screenshot --full # Full page +agent-browser pdf output.pdf # Save as PDF +``` + +## Video Recording + +```bash +agent-browser record start ./demo.webm # Start recording +agent-browser click @e1 # Perform actions +agent-browser record stop # Stop and save video +agent-browser record restart ./take2.webm # Stop current + start new +``` + +## Wait + +```bash +agent-browser wait @e1 # Wait for element +agent-browser wait 2000 # Wait milliseconds +agent-browser wait --text "Success" # Wait for text (or -t) +agent-browser wait --url "**/dashboard" # Wait for URL pattern (or -u) +agent-browser wait --load networkidle # Wait for network idle (or -l) +agent-browser wait --fn "window.ready" # Wait for JS condition (or -f) +``` + +## Mouse Control + +```bash +agent-browser mouse move 100 200 # Move mouse +agent-browser mouse down left # Press button +agent-browser mouse up left # Release button +agent-browser mouse wheel 100 # Scroll wheel +``` + +## Semantic Locators (alternative to refs) + +```bash +agent-browser find role button click --name "Submit" +agent-browser find text "Sign In" click +agent-browser find text "Sign In" click --exact # Exact match only +agent-browser find label "Email" fill "user@test.com" +agent-browser find placeholder "Search" type "query" +agent-browser find alt "Logo" click +agent-browser find title "Close" click +agent-browser find testid "submit-btn" click +agent-browser find first ".item" click +agent-browser find last ".item" click +agent-browser find nth 2 "a" hover +``` + +## Browser Settings + +```bash +agent-browser set viewport 1920 1080 # Set viewport size +agent-browser set viewport 1920 1080 2 # 2x retina (same CSS size, higher res screenshots) +agent-browser set device "iPhone 14" # Emulate device +agent-browser set geo 37.7749 -122.4194 # Set geolocation (alias: geolocation) +agent-browser set offline on # Toggle offline mode +agent-browser set headers '{"X-Key":"v"}' # Extra HTTP headers +agent-browser set credentials user pass # HTTP basic auth (alias: auth) +agent-browser set media dark # Emulate color scheme +agent-browser set media light reduced-motion # Light mode + reduced motion +``` + +## Cookies and Storage + +```bash +agent-browser cookies # Get all cookies +agent-browser cookies set name value # Set cookie +agent-browser cookies clear # Clear cookies +agent-browser storage local # Get all localStorage +agent-browser storage local key # Get specific key +agent-browser storage local set k v # Set value +agent-browser storage local clear # Clear all +``` + +## Network + +```bash +agent-browser network route # Intercept requests +agent-browser network route --abort # Block requests +agent-browser network route --body '{}' # Mock response +agent-browser network unroute [url] # Remove routes +agent-browser network requests # View tracked requests +agent-browser network requests --filter api # Filter requests +``` + +## Tabs and Windows + +```bash +agent-browser tab # List tabs +agent-browser tab new [url] # New tab +agent-browser tab 2 # Switch to tab by index +agent-browser tab close # Close current tab +agent-browser tab close 2 # Close tab by index +agent-browser window new # New window +``` + +## Frames + +```bash +agent-browser frame "#iframe" # Switch to iframe +agent-browser frame main # Back to main frame +``` + +## Dialogs + +```bash +agent-browser dialog accept [text] # Accept dialog +agent-browser dialog dismiss # Dismiss dialog +``` + +## JavaScript + +```bash +agent-browser eval "document.title" # Simple expressions only +agent-browser eval -b "" # Any JavaScript (base64 encoded) +agent-browser eval --stdin # Read script from stdin +``` + +Use `-b`/`--base64` or `--stdin` for reliable execution. Shell escaping with nested quotes and special characters is error-prone. + +```bash +# Base64 encode your script, then: +agent-browser eval -b "ZG9jdW1lbnQucXVlcnlTZWxlY3RvcignW3NyYyo9Il9uZXh0Il0nKQ==" + +# Or use stdin with heredoc for multiline scripts: +cat <<'EOF' | agent-browser eval --stdin +const links = document.querySelectorAll('a'); +Array.from(links).map(a => a.href); +EOF +``` + +## State Management + +```bash +agent-browser state save auth.json # Save cookies, storage, auth state +agent-browser state load auth.json # Restore saved state +``` + +## Global Options + +```bash +agent-browser --session ... # Isolated browser session +agent-browser --json ... # JSON output for parsing +agent-browser --headed ... # Show browser window (not headless) +agent-browser --full ... # Full page screenshot (-f) +agent-browser --cdp ... # Connect via Chrome DevTools Protocol +agent-browser -p ... # Cloud browser provider (--provider) +agent-browser --proxy ... # Use proxy server +agent-browser --proxy-bypass # Hosts to bypass proxy +agent-browser --headers ... # HTTP headers scoped to URL's origin +agent-browser --executable-path

# Custom browser executable +agent-browser --extension ... # Load browser extension (repeatable) +agent-browser --ignore-https-errors # Ignore SSL certificate errors +agent-browser --help # Show help (-h) +agent-browser --version # Show version (-V) +agent-browser --help # Show detailed help for a command +``` + +## Debugging + +```bash +agent-browser --headed open example.com # Show browser window +agent-browser --cdp 9222 snapshot # Connect via CDP port +agent-browser connect 9222 # Alternative: connect command +agent-browser console # View console messages +agent-browser console --clear # Clear console +agent-browser errors # View page errors +agent-browser errors --clear # Clear errors +agent-browser highlight @e1 # Highlight element +agent-browser inspect # Open Chrome DevTools for this session +agent-browser trace start # Start recording trace +agent-browser trace stop trace.zip # Stop and save trace +agent-browser profiler start # Start Chrome DevTools profiling +agent-browser profiler stop trace.json # Stop and save profile +``` + +## Environment Variables + +```bash +AGENT_BROWSER_SESSION="mysession" # Default session name +AGENT_BROWSER_EXECUTABLE_PATH="/path/chrome" # Custom browser path +AGENT_BROWSER_EXTENSIONS="/ext1,/ext2" # Comma-separated extension paths +AGENT_BROWSER_PROVIDER="browserbase" # Cloud browser provider +AGENT_BROWSER_STREAM_PORT="9223" # WebSocket streaming port +AGENT_BROWSER_HOME="/path/to/agent-browser" # Custom install location +``` diff --git a/plugins/compound-engineering/skills/agent-browser/references/profiling.md b/plugins/compound-engineering/skills/agent-browser/references/profiling.md new file mode 100644 index 0000000..9e80d4c --- /dev/null +++ b/plugins/compound-engineering/skills/agent-browser/references/profiling.md @@ -0,0 +1,120 @@ +# Profiling + +Capture Chrome DevTools performance profiles during browser automation for performance analysis. + +**Related**: [commands.md](commands.md) for full command reference, [SKILL.md](../SKILL.md) for quick start. + +## Contents + +- [Basic Profiling](#basic-profiling) +- [Profiler Commands](#profiler-commands) +- [Categories](#categories) +- [Use Cases](#use-cases) +- [Output Format](#output-format) +- [Viewing Profiles](#viewing-profiles) +- [Limitations](#limitations) + +## Basic Profiling + +```bash +# Start profiling +agent-browser profiler start + +# Perform actions +agent-browser navigate https://example.com +agent-browser click "#button" +agent-browser wait 1000 + +# Stop and save +agent-browser profiler stop ./trace.json +``` + +## Profiler Commands + +```bash +# Start profiling with default categories +agent-browser profiler start + +# Start with custom trace categories +agent-browser profiler start --categories "devtools.timeline,v8.execute,blink.user_timing" + +# Stop profiling and save to file +agent-browser profiler stop ./trace.json +``` + +## Categories + +The `--categories` flag accepts a comma-separated list of Chrome trace categories. Default categories include: + +- `devtools.timeline` -- standard DevTools performance traces +- `v8.execute` -- time spent running JavaScript +- `blink` -- renderer events +- `blink.user_timing` -- `performance.mark()` / `performance.measure()` calls +- `latencyInfo` -- input-to-latency tracking +- `renderer.scheduler` -- task scheduling and execution +- `toplevel` -- broad-spectrum basic events + +Several `disabled-by-default-*` categories are also included for detailed timeline, call stack, and V8 CPU profiling data. + +## Use Cases + +### Diagnosing Slow Page Loads + +```bash +agent-browser profiler start +agent-browser navigate https://app.example.com +agent-browser wait --load networkidle +agent-browser profiler stop ./page-load-profile.json +``` + +### Profiling User Interactions + +```bash +agent-browser navigate https://app.example.com +agent-browser profiler start +agent-browser click "#submit" +agent-browser wait 2000 +agent-browser profiler stop ./interaction-profile.json +``` + +### CI Performance Regression Checks + +```bash +#!/bin/bash +agent-browser profiler start +agent-browser navigate https://app.example.com +agent-browser wait --load networkidle +agent-browser profiler stop "./profiles/build-${BUILD_ID}.json" +``` + +## Output Format + +The output is a JSON file in Chrome Trace Event format: + +```json +{ + "traceEvents": [ + { "cat": "devtools.timeline", "name": "RunTask", "ph": "X", "ts": 12345, "dur": 100 }, + ... + ], + "metadata": { + "clock-domain": "LINUX_CLOCK_MONOTONIC" + } +} +``` + +The `metadata.clock-domain` field is set based on the host platform (Linux or macOS). On Windows it is omitted. + +## Viewing Profiles + +Load the output JSON file in any of these tools: + +- **Chrome DevTools**: Performance panel > Load profile (Ctrl+Shift+I > Performance) +- **Perfetto UI**: https://ui.perfetto.dev/ -- drag and drop the JSON file +- **Trace Viewer**: `chrome://tracing` in any Chromium browser + +## Limitations + +- Only works with Chromium-based browsers (Chrome, Edge). Not supported on Firefox or WebKit. +- Trace data accumulates in memory while profiling is active (capped at 5 million events). Stop profiling promptly after the area of interest. +- Data collection on stop has a 30-second timeout. If the browser is unresponsive, the stop command may fail. diff --git a/plugins/compound-engineering/skills/agent-browser/references/proxy-support.md b/plugins/compound-engineering/skills/agent-browser/references/proxy-support.md new file mode 100644 index 0000000..e86a8fe --- /dev/null +++ b/plugins/compound-engineering/skills/agent-browser/references/proxy-support.md @@ -0,0 +1,194 @@ +# Proxy Support + +Proxy configuration for geo-testing, rate limiting avoidance, and corporate environments. + +**Related**: [commands.md](commands.md) for global options, [SKILL.md](../SKILL.md) for quick start. + +## Contents + +- [Basic Proxy Configuration](#basic-proxy-configuration) +- [Authenticated Proxy](#authenticated-proxy) +- [SOCKS Proxy](#socks-proxy) +- [Proxy Bypass](#proxy-bypass) +- [Common Use Cases](#common-use-cases) +- [Verifying Proxy Connection](#verifying-proxy-connection) +- [Troubleshooting](#troubleshooting) +- [Best Practices](#best-practices) + +## Basic Proxy Configuration + +Use the `--proxy` flag or set proxy via environment variable: + +```bash +# Via CLI flag +agent-browser --proxy "http://proxy.example.com:8080" open https://example.com + +# Via environment variable +export HTTP_PROXY="http://proxy.example.com:8080" +agent-browser open https://example.com + +# HTTPS proxy +export HTTPS_PROXY="https://proxy.example.com:8080" +agent-browser open https://example.com + +# Both +export HTTP_PROXY="http://proxy.example.com:8080" +export HTTPS_PROXY="http://proxy.example.com:8080" +agent-browser open https://example.com +``` + +## Authenticated Proxy + +For proxies requiring authentication: + +```bash +# Include credentials in URL +export HTTP_PROXY="http://username:password@proxy.example.com:8080" +agent-browser open https://example.com +``` + +## SOCKS Proxy + +```bash +# SOCKS5 proxy +export ALL_PROXY="socks5://proxy.example.com:1080" +agent-browser open https://example.com + +# SOCKS5 with auth +export ALL_PROXY="socks5://user:pass@proxy.example.com:1080" +agent-browser open https://example.com +``` + +## Proxy Bypass + +Skip proxy for specific domains using `--proxy-bypass` or `NO_PROXY`: + +```bash +# Via CLI flag +agent-browser --proxy "http://proxy.example.com:8080" --proxy-bypass "localhost,*.internal.com" open https://example.com + +# Via environment variable +export NO_PROXY="localhost,127.0.0.1,.internal.company.com" +agent-browser open https://internal.company.com # Direct connection +agent-browser open https://external.com # Via proxy +``` + +## Common Use Cases + +### Geo-Location Testing + +```bash +#!/bin/bash +# Test site from different regions using geo-located proxies + +PROXIES=( + "http://us-proxy.example.com:8080" + "http://eu-proxy.example.com:8080" + "http://asia-proxy.example.com:8080" +) + +for proxy in "${PROXIES[@]}"; do + export HTTP_PROXY="$proxy" + export HTTPS_PROXY="$proxy" + + region=$(echo "$proxy" | grep -oP '^\w+-\w+') + echo "Testing from: $region" + + agent-browser --session "$region" open https://example.com + agent-browser --session "$region" screenshot "./screenshots/$region.png" + agent-browser --session "$region" close +done +``` + +### Rotating Proxies for Scraping + +```bash +#!/bin/bash +# Rotate through proxy list to avoid rate limiting + +PROXY_LIST=( + "http://proxy1.example.com:8080" + "http://proxy2.example.com:8080" + "http://proxy3.example.com:8080" +) + +URLS=( + "https://site.com/page1" + "https://site.com/page2" + "https://site.com/page3" +) + +for i in "${!URLS[@]}"; do + proxy_index=$((i % ${#PROXY_LIST[@]})) + export HTTP_PROXY="${PROXY_LIST[$proxy_index]}" + export HTTPS_PROXY="${PROXY_LIST[$proxy_index]}" + + agent-browser open "${URLS[$i]}" + agent-browser get text body > "output-$i.txt" + agent-browser close + + sleep 1 # Polite delay +done +``` + +### Corporate Network Access + +```bash +#!/bin/bash +# Access internal sites via corporate proxy + +export HTTP_PROXY="http://corpproxy.company.com:8080" +export HTTPS_PROXY="http://corpproxy.company.com:8080" +export NO_PROXY="localhost,127.0.0.1,.company.com" + +# External sites go through proxy +agent-browser open https://external-vendor.com + +# Internal sites bypass proxy +agent-browser open https://intranet.company.com +``` + +## Verifying Proxy Connection + +```bash +# Check your apparent IP +agent-browser open https://httpbin.org/ip +agent-browser get text body +# Should show proxy's IP, not your real IP +``` + +## Troubleshooting + +### Proxy Connection Failed + +```bash +# Test proxy connectivity first +curl -x http://proxy.example.com:8080 https://httpbin.org/ip + +# Check if proxy requires auth +export HTTP_PROXY="http://user:pass@proxy.example.com:8080" +``` + +### SSL/TLS Errors Through Proxy + +Some proxies perform SSL inspection. If you encounter certificate errors: + +```bash +# For testing only - not recommended for production +agent-browser open https://example.com --ignore-https-errors +``` + +### Slow Performance + +```bash +# Use proxy only when necessary +export NO_PROXY="*.cdn.com,*.static.com" # Direct CDN access +``` + +## Best Practices + +1. **Use environment variables** - Don't hardcode proxy credentials +2. **Set NO_PROXY appropriately** - Avoid routing local traffic through proxy +3. **Test proxy before automation** - Verify connectivity with simple requests +4. **Handle proxy failures gracefully** - Implement retry logic for unstable proxies +5. **Rotate proxies for large scraping jobs** - Distribute load and avoid bans diff --git a/plugins/compound-engineering/skills/agent-browser/references/session-management.md b/plugins/compound-engineering/skills/agent-browser/references/session-management.md new file mode 100644 index 0000000..bb5312d --- /dev/null +++ b/plugins/compound-engineering/skills/agent-browser/references/session-management.md @@ -0,0 +1,193 @@ +# Session Management + +Multiple isolated browser sessions with state persistence and concurrent browsing. + +**Related**: [authentication.md](authentication.md) for login patterns, [SKILL.md](../SKILL.md) for quick start. + +## Contents + +- [Named Sessions](#named-sessions) +- [Session Isolation Properties](#session-isolation-properties) +- [Session State Persistence](#session-state-persistence) +- [Common Patterns](#common-patterns) +- [Default Session](#default-session) +- [Session Cleanup](#session-cleanup) +- [Best Practices](#best-practices) + +## Named Sessions + +Use `--session` flag to isolate browser contexts: + +```bash +# Session 1: Authentication flow +agent-browser --session auth open https://app.example.com/login + +# Session 2: Public browsing (separate cookies, storage) +agent-browser --session public open https://example.com + +# Commands are isolated by session +agent-browser --session auth fill @e1 "user@example.com" +agent-browser --session public get text body +``` + +## Session Isolation Properties + +Each session has independent: +- Cookies +- LocalStorage / SessionStorage +- IndexedDB +- Cache +- Browsing history +- Open tabs + +## Session State Persistence + +### Save Session State + +```bash +# Save cookies, storage, and auth state +agent-browser state save /path/to/auth-state.json +``` + +### Load Session State + +```bash +# Restore saved state +agent-browser state load /path/to/auth-state.json + +# Continue with authenticated session +agent-browser open https://app.example.com/dashboard +``` + +### State File Contents + +```json +{ + "cookies": [...], + "localStorage": {...}, + "sessionStorage": {...}, + "origins": [...] +} +``` + +## Common Patterns + +### Authenticated Session Reuse + +```bash +#!/bin/bash +# Save login state once, reuse many times + +STATE_FILE="/tmp/auth-state.json" + +# Check if we have saved state +if [[ -f "$STATE_FILE" ]]; then + agent-browser state load "$STATE_FILE" + agent-browser open https://app.example.com/dashboard +else + # Perform login + agent-browser open https://app.example.com/login + agent-browser snapshot -i + agent-browser fill @e1 "$USERNAME" + agent-browser fill @e2 "$PASSWORD" + agent-browser click @e3 + agent-browser wait --load networkidle + + # Save for future use + agent-browser state save "$STATE_FILE" +fi +``` + +### Concurrent Scraping + +```bash +#!/bin/bash +# Scrape multiple sites concurrently + +# Start all sessions +agent-browser --session site1 open https://site1.com & +agent-browser --session site2 open https://site2.com & +agent-browser --session site3 open https://site3.com & +wait + +# Extract from each +agent-browser --session site1 get text body > site1.txt +agent-browser --session site2 get text body > site2.txt +agent-browser --session site3 get text body > site3.txt + +# Cleanup +agent-browser --session site1 close +agent-browser --session site2 close +agent-browser --session site3 close +``` + +### A/B Testing Sessions + +```bash +# Test different user experiences +agent-browser --session variant-a open "https://app.com?variant=a" +agent-browser --session variant-b open "https://app.com?variant=b" + +# Compare +agent-browser --session variant-a screenshot /tmp/variant-a.png +agent-browser --session variant-b screenshot /tmp/variant-b.png +``` + +## Default Session + +When `--session` is omitted, commands use the default session: + +```bash +# These use the same default session +agent-browser open https://example.com +agent-browser snapshot -i +agent-browser close # Closes default session +``` + +## Session Cleanup + +```bash +# Close specific session +agent-browser --session auth close + +# List active sessions +agent-browser session list +``` + +## Best Practices + +### 1. Name Sessions Semantically + +```bash +# GOOD: Clear purpose +agent-browser --session github-auth open https://github.com +agent-browser --session docs-scrape open https://docs.example.com + +# AVOID: Generic names +agent-browser --session s1 open https://github.com +``` + +### 2. Always Clean Up + +```bash +# Close sessions when done +agent-browser --session auth close +agent-browser --session scrape close +``` + +### 3. Handle State Files Securely + +```bash +# Don't commit state files (contain auth tokens!) +echo "*.auth-state.json" >> .gitignore + +# Delete after use +rm /tmp/auth-state.json +``` + +### 4. Timeout Long Sessions + +```bash +# Set timeout for automated scripts +timeout 60 agent-browser --session long-task get text body +``` diff --git a/plugins/compound-engineering/skills/agent-browser/references/snapshot-refs.md b/plugins/compound-engineering/skills/agent-browser/references/snapshot-refs.md new file mode 100644 index 0000000..22b242c --- /dev/null +++ b/plugins/compound-engineering/skills/agent-browser/references/snapshot-refs.md @@ -0,0 +1,194 @@ +# Snapshot and Refs + +Compact element references that reduce context usage dramatically for AI agents. + +**Related**: [commands.md](commands.md) for full command reference, [SKILL.md](../SKILL.md) for quick start. + +## Contents + +- [How Refs Work](#how-refs-work) +- [Snapshot Command](#the-snapshot-command) +- [Using Refs](#using-refs) +- [Ref Lifecycle](#ref-lifecycle) +- [Best Practices](#best-practices) +- [Ref Notation Details](#ref-notation-details) +- [Troubleshooting](#troubleshooting) + +## How Refs Work + +Traditional approach: +``` +Full DOM/HTML -> AI parses -> CSS selector -> Action (~3000-5000 tokens) +``` + +agent-browser approach: +``` +Compact snapshot -> @refs assigned -> Direct interaction (~200-400 tokens) +``` + +## The Snapshot Command + +```bash +# Basic snapshot (shows page structure) +agent-browser snapshot + +# Interactive snapshot (-i flag) - RECOMMENDED +agent-browser snapshot -i +``` + +### Snapshot Output Format + +``` +Page: Example Site - Home +URL: https://example.com + +@e1 [header] + @e2 [nav] + @e3 [a] "Home" + @e4 [a] "Products" + @e5 [a] "About" + @e6 [button] "Sign In" + +@e7 [main] + @e8 [h1] "Welcome" + @e9 [form] + @e10 [input type="email"] placeholder="Email" + @e11 [input type="password"] placeholder="Password" + @e12 [button type="submit"] "Log In" + +@e13 [footer] + @e14 [a] "Privacy Policy" +``` + +## Using Refs + +Once you have refs, interact directly: + +```bash +# Click the "Sign In" button +agent-browser click @e6 + +# Fill email input +agent-browser fill @e10 "user@example.com" + +# Fill password +agent-browser fill @e11 "password123" + +# Submit the form +agent-browser click @e12 +``` + +## Ref Lifecycle + +**IMPORTANT**: Refs are invalidated when the page changes! + +```bash +# Get initial snapshot +agent-browser snapshot -i +# @e1 [button] "Next" + +# Click triggers page change +agent-browser click @e1 + +# MUST re-snapshot to get new refs! +agent-browser snapshot -i +# @e1 [h1] "Page 2" <- Different element now! +``` + +## Best Practices + +### 1. Always Snapshot Before Interacting + +```bash +# CORRECT +agent-browser open https://example.com +agent-browser snapshot -i # Get refs first +agent-browser click @e1 # Use ref + +# WRONG +agent-browser open https://example.com +agent-browser click @e1 # Ref doesn't exist yet! +``` + +### 2. Re-Snapshot After Navigation + +```bash +agent-browser click @e5 # Navigates to new page +agent-browser snapshot -i # Get new refs +agent-browser click @e1 # Use new refs +``` + +### 3. Re-Snapshot After Dynamic Changes + +```bash +agent-browser click @e1 # Opens dropdown +agent-browser snapshot -i # See dropdown items +agent-browser click @e7 # Select item +``` + +### 4. Snapshot Specific Regions + +For complex pages, snapshot specific areas: + +```bash +# Snapshot just the form +agent-browser snapshot @e9 +``` + +## Ref Notation Details + +``` +@e1 [tag type="value"] "text content" placeholder="hint" +| | | | | +| | | | +- Additional attributes +| | | +- Visible text +| | +- Key attributes shown +| +- HTML tag name ++- Unique ref ID +``` + +### Common Patterns + +``` +@e1 [button] "Submit" # Button with text +@e2 [input type="email"] # Email input +@e3 [input type="password"] # Password input +@e4 [a href="/page"] "Link Text" # Anchor link +@e5 [select] # Dropdown +@e6 [textarea] placeholder="Message" # Text area +@e7 [div class="modal"] # Container (when relevant) +@e8 [img alt="Logo"] # Image +@e9 [checkbox] checked # Checked checkbox +@e10 [radio] selected # Selected radio +``` + +## Troubleshooting + +### "Ref not found" Error + +```bash +# Ref may have changed - re-snapshot +agent-browser snapshot -i +``` + +### Element Not Visible in Snapshot + +```bash +# Scroll down to reveal element +agent-browser scroll down 1000 +agent-browser snapshot -i + +# Or wait for dynamic content +agent-browser wait 1000 +agent-browser snapshot -i +``` + +### Too Many Elements + +```bash +# Snapshot specific container +agent-browser snapshot @e5 + +# Or use get text for content-only extraction +agent-browser get text @e5 +``` diff --git a/plugins/compound-engineering/skills/agent-browser/references/video-recording.md b/plugins/compound-engineering/skills/agent-browser/references/video-recording.md new file mode 100644 index 0000000..e6a9fb4 --- /dev/null +++ b/plugins/compound-engineering/skills/agent-browser/references/video-recording.md @@ -0,0 +1,173 @@ +# Video Recording + +Capture browser automation as video for debugging, documentation, or verification. + +**Related**: [commands.md](commands.md) for full command reference, [SKILL.md](../SKILL.md) for quick start. + +## Contents + +- [Basic Recording](#basic-recording) +- [Recording Commands](#recording-commands) +- [Use Cases](#use-cases) +- [Best Practices](#best-practices) +- [Output Format](#output-format) +- [Limitations](#limitations) + +## Basic Recording + +```bash +# Start recording +agent-browser record start ./demo.webm + +# Perform actions +agent-browser open https://example.com +agent-browser snapshot -i +agent-browser click @e1 +agent-browser fill @e2 "test input" + +# Stop and save +agent-browser record stop +``` + +## Recording Commands + +```bash +# Start recording to file +agent-browser record start ./output.webm + +# Stop current recording +agent-browser record stop + +# Restart with new file (stops current + starts new) +agent-browser record restart ./take2.webm +``` + +## Use Cases + +### Debugging Failed Automation + +```bash +#!/bin/bash +# Record automation for debugging + +agent-browser record start ./debug-$(date +%Y%m%d-%H%M%S).webm + +# Run your automation +agent-browser open https://app.example.com +agent-browser snapshot -i +agent-browser click @e1 || { + echo "Click failed - check recording" + agent-browser record stop + exit 1 +} + +agent-browser record stop +``` + +### Documentation Generation + +```bash +#!/bin/bash +# Record workflow for documentation + +agent-browser record start ./docs/how-to-login.webm + +agent-browser open https://app.example.com/login +agent-browser wait 1000 # Pause for visibility + +agent-browser snapshot -i +agent-browser fill @e1 "demo@example.com" +agent-browser wait 500 + +agent-browser fill @e2 "password" +agent-browser wait 500 + +agent-browser click @e3 +agent-browser wait --load networkidle +agent-browser wait 1000 # Show result + +agent-browser record stop +``` + +### CI/CD Test Evidence + +```bash +#!/bin/bash +# Record E2E test runs for CI artifacts + +TEST_NAME="${1:-e2e-test}" +RECORDING_DIR="./test-recordings" +mkdir -p "$RECORDING_DIR" + +agent-browser record start "$RECORDING_DIR/$TEST_NAME-$(date +%s).webm" + +# Run test +if run_e2e_test; then + echo "Test passed" +else + echo "Test failed - recording saved" +fi + +agent-browser record stop +``` + +## Best Practices + +### 1. Add Pauses for Clarity + +```bash +# Slow down for human viewing +agent-browser click @e1 +agent-browser wait 500 # Let viewer see result +``` + +### 2. Use Descriptive Filenames + +```bash +# Include context in filename +agent-browser record start ./recordings/login-flow-2024-01-15.webm +agent-browser record start ./recordings/checkout-test-run-42.webm +``` + +### 3. Handle Recording in Error Cases + +```bash +#!/bin/bash +set -e + +cleanup() { + agent-browser record stop 2>/dev/null || true + agent-browser close 2>/dev/null || true +} +trap cleanup EXIT + +agent-browser record start ./automation.webm +# ... automation steps ... +``` + +### 4. Combine with Screenshots + +```bash +# Record video AND capture key frames +agent-browser record start ./flow.webm + +agent-browser open https://example.com +agent-browser screenshot ./screenshots/step1-homepage.png + +agent-browser click @e1 +agent-browser screenshot ./screenshots/step2-after-click.png + +agent-browser record stop +``` + +## Output Format + +- Default format: WebM (VP8/VP9 codec) +- Compatible with all modern browsers and video players +- Compressed but high quality + +## Limitations + +- Recording adds slight overhead to automation +- Large recordings can consume significant disk space +- Some headless environments may have codec limitations diff --git a/plugins/compound-engineering/skills/agent-browser/templates/authenticated-session.sh b/plugins/compound-engineering/skills/agent-browser/templates/authenticated-session.sh new file mode 100755 index 0000000..b66c928 --- /dev/null +++ b/plugins/compound-engineering/skills/agent-browser/templates/authenticated-session.sh @@ -0,0 +1,105 @@ +#!/bin/bash +# Template: Authenticated Session Workflow +# Purpose: Login once, save state, reuse for subsequent runs +# Usage: ./authenticated-session.sh [state-file] +# +# RECOMMENDED: Use the auth vault instead of this template: +# echo "" | agent-browser auth save myapp --url --username --password-stdin +# agent-browser auth login myapp +# The auth vault stores credentials securely and the LLM never sees passwords. +# +# Environment variables: +# APP_USERNAME - Login username/email +# APP_PASSWORD - Login password +# +# Two modes: +# 1. Discovery mode (default): Shows form structure so you can identify refs +# 2. Login mode: Performs actual login after you update the refs +# +# Setup steps: +# 1. Run once to see form structure (discovery mode) +# 2. Update refs in LOGIN FLOW section below +# 3. Set APP_USERNAME and APP_PASSWORD +# 4. Delete the DISCOVERY section + +set -euo pipefail + +LOGIN_URL="${1:?Usage: $0 [state-file]}" +STATE_FILE="${2:-./auth-state.json}" + +echo "Authentication workflow: $LOGIN_URL" + +# ================================================================ +# SAVED STATE: Skip login if valid saved state exists +# ================================================================ +if [[ -f "$STATE_FILE" ]]; then + echo "Loading saved state from $STATE_FILE..." + if agent-browser --state "$STATE_FILE" open "$LOGIN_URL" 2>/dev/null; then + agent-browser wait --load networkidle + + CURRENT_URL=$(agent-browser get url) + if [[ "$CURRENT_URL" != *"login"* ]] && [[ "$CURRENT_URL" != *"signin"* ]]; then + echo "Session restored successfully" + agent-browser snapshot -i + exit 0 + fi + echo "Session expired, performing fresh login..." + agent-browser close 2>/dev/null || true + else + echo "Failed to load state, re-authenticating..." + fi + rm -f "$STATE_FILE" +fi + +# ================================================================ +# DISCOVERY MODE: Shows form structure (delete after setup) +# ================================================================ +echo "Opening login page..." +agent-browser open "$LOGIN_URL" +agent-browser wait --load networkidle + +echo "" +echo "Login form structure:" +echo "---" +agent-browser snapshot -i +echo "---" +echo "" +echo "Next steps:" +echo " 1. Note the refs: username=@e?, password=@e?, submit=@e?" +echo " 2. Update the LOGIN FLOW section below with your refs" +echo " 3. Set: export APP_USERNAME='...' APP_PASSWORD='...'" +echo " 4. Delete this DISCOVERY MODE section" +echo "" +agent-browser close +exit 0 + +# ================================================================ +# LOGIN FLOW: Uncomment and customize after discovery +# ================================================================ +# : "${APP_USERNAME:?Set APP_USERNAME environment variable}" +# : "${APP_PASSWORD:?Set APP_PASSWORD environment variable}" +# +# agent-browser open "$LOGIN_URL" +# agent-browser wait --load networkidle +# agent-browser snapshot -i +# +# # Fill credentials (update refs to match your form) +# agent-browser fill @e1 "$APP_USERNAME" +# agent-browser fill @e2 "$APP_PASSWORD" +# agent-browser click @e3 +# agent-browser wait --load networkidle +# +# # Verify login succeeded +# FINAL_URL=$(agent-browser get url) +# if [[ "$FINAL_URL" == *"login"* ]] || [[ "$FINAL_URL" == *"signin"* ]]; then +# echo "Login failed - still on login page" +# agent-browser screenshot /tmp/login-failed.png +# agent-browser close +# exit 1 +# fi +# +# # Save state for future runs +# echo "Saving state to $STATE_FILE" +# agent-browser state save "$STATE_FILE" +# echo "Login successful" +# agent-browser snapshot -i diff --git a/plugins/compound-engineering/skills/agent-browser/templates/capture-workflow.sh b/plugins/compound-engineering/skills/agent-browser/templates/capture-workflow.sh new file mode 100755 index 0000000..3bc93ad --- /dev/null +++ b/plugins/compound-engineering/skills/agent-browser/templates/capture-workflow.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# Template: Content Capture Workflow +# Purpose: Extract content from web pages (text, screenshots, PDF) +# Usage: ./capture-workflow.sh [output-dir] +# +# Outputs: +# - page-full.png: Full page screenshot +# - page-structure.txt: Page element structure with refs +# - page-text.txt: All text content +# - page.pdf: PDF version +# +# Optional: Load auth state for protected pages + +set -euo pipefail + +TARGET_URL="${1:?Usage: $0 [output-dir]}" +OUTPUT_DIR="${2:-.}" + +echo "Capturing: $TARGET_URL" +mkdir -p "$OUTPUT_DIR" + +# Optional: Load authentication state +# if [[ -f "./auth-state.json" ]]; then +# echo "Loading authentication state..." +# agent-browser state load "./auth-state.json" +# fi + +# Navigate to target +agent-browser open "$TARGET_URL" +agent-browser wait --load networkidle + +# Get metadata +TITLE=$(agent-browser get title) +URL=$(agent-browser get url) +echo "Title: $TITLE" +echo "URL: $URL" + +# Capture full page screenshot +agent-browser screenshot --full "$OUTPUT_DIR/page-full.png" +echo "Saved: $OUTPUT_DIR/page-full.png" + +# Get page structure with refs +agent-browser snapshot -i > "$OUTPUT_DIR/page-structure.txt" +echo "Saved: $OUTPUT_DIR/page-structure.txt" + +# Extract all text content +agent-browser get text body > "$OUTPUT_DIR/page-text.txt" +echo "Saved: $OUTPUT_DIR/page-text.txt" + +# Save as PDF +agent-browser pdf "$OUTPUT_DIR/page.pdf" +echo "Saved: $OUTPUT_DIR/page.pdf" + +# Optional: Extract specific elements using refs from structure +# agent-browser get text @e5 > "$OUTPUT_DIR/main-content.txt" + +# Optional: Handle infinite scroll pages +# for i in {1..5}; do +# agent-browser scroll down 1000 +# agent-browser wait 1000 +# done +# agent-browser screenshot --full "$OUTPUT_DIR/page-scrolled.png" + +# Cleanup +agent-browser close + +echo "" +echo "Capture complete:" +ls -la "$OUTPUT_DIR" diff --git a/plugins/compound-engineering/skills/agent-browser/templates/form-automation.sh b/plugins/compound-engineering/skills/agent-browser/templates/form-automation.sh new file mode 100755 index 0000000..6784fcd --- /dev/null +++ b/plugins/compound-engineering/skills/agent-browser/templates/form-automation.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# Template: Form Automation Workflow +# Purpose: Fill and submit web forms with validation +# Usage: ./form-automation.sh +# +# This template demonstrates the snapshot-interact-verify pattern: +# 1. Navigate to form +# 2. Snapshot to get element refs +# 3. Fill fields using refs +# 4. Submit and verify result +# +# Customize: Update the refs (@e1, @e2, etc.) based on your form's snapshot output + +set -euo pipefail + +FORM_URL="${1:?Usage: $0 }" + +echo "Form automation: $FORM_URL" + +# Step 1: Navigate to form +agent-browser open "$FORM_URL" +agent-browser wait --load networkidle + +# Step 2: Snapshot to discover form elements +echo "" +echo "Form structure:" +agent-browser snapshot -i + +# Step 3: Fill form fields (customize these refs based on snapshot output) +# +# Common field types: +# agent-browser fill @e1 "John Doe" # Text input +# agent-browser fill @e2 "user@example.com" # Email input +# agent-browser fill @e3 "SecureP@ss123" # Password input +# agent-browser select @e4 "Option Value" # Dropdown +# agent-browser check @e5 # Checkbox +# agent-browser click @e6 # Radio button +# agent-browser fill @e7 "Multi-line text" # Textarea +# agent-browser upload @e8 /path/to/file.pdf # File upload +# +# Uncomment and modify: +# agent-browser fill @e1 "Test User" +# agent-browser fill @e2 "test@example.com" +# agent-browser click @e3 # Submit button + +# Step 4: Wait for submission +# agent-browser wait --load networkidle +# agent-browser wait --url "**/success" # Or wait for redirect + +# Step 5: Verify result +echo "" +echo "Result:" +agent-browser get url +agent-browser snapshot -i + +# Optional: Capture evidence +agent-browser screenshot /tmp/form-result.png +echo "Screenshot saved: /tmp/form-result.png" + +# Cleanup +agent-browser close +echo "Done" diff --git a/plugins/compound-engineering/commands/agent-native-audit.md b/plugins/compound-engineering/skills/agent-native-audit/SKILL.md similarity index 100% rename from plugins/compound-engineering/commands/agent-native-audit.md rename to plugins/compound-engineering/skills/agent-native-audit/SKILL.md diff --git a/plugins/compound-engineering/skills/brainstorming/SKILL.md b/plugins/compound-engineering/skills/brainstorming/SKILL.md index 0a994dd..5a092cd 100644 --- a/plugins/compound-engineering/skills/brainstorming/SKILL.md +++ b/plugins/compound-engineering/skills/brainstorming/SKILL.md @@ -131,7 +131,7 @@ topic: - [Any unresolved questions for the planning phase] ## Next Steps -→ `/workflows:plan` for implementation details +→ `/ce:plan` for implementation details ``` **Output Location:** `docs/brainstorms/YYYY-MM-DD--brainstorm.md` @@ -140,7 +140,7 @@ topic: Present clear options for what to do next: -1. **Proceed to planning** → Run `/workflows:plan` +1. **Proceed to planning** → Run `/ce:plan` 2. **Refine further** → Continue exploring the design 3. **Done for now** → User will return later @@ -187,4 +187,4 @@ Planning answers **HOW** to build it: - Technical details and code patterns - Testing strategy and verification -When brainstorm output exists, `/workflows:plan` should detect it and use it as input, skipping its own idea refinement phase. +When brainstorm output exists, `/ce:plan` should detect it and use it as input, skipping its own idea refinement phase. diff --git a/plugins/compound-engineering/commands/workflows/brainstorm.md b/plugins/compound-engineering/skills/ce-brainstorm/SKILL.md similarity index 77% rename from plugins/compound-engineering/commands/workflows/brainstorm.md rename to plugins/compound-engineering/skills/ce-brainstorm/SKILL.md index b4f3a0f..2649c15 100644 --- a/plugins/compound-engineering/commands/workflows/brainstorm.md +++ b/plugins/compound-engineering/skills/ce-brainstorm/SKILL.md @@ -1,5 +1,5 @@ --- -name: workflows:brainstorm +name: ce:brainstorm description: Explore requirements and approaches through collaborative dialogue before planning implementation argument-hint: "[feature idea or problem to explore]" --- @@ -8,7 +8,7 @@ argument-hint: "[feature idea or problem to explore]" **Note: The current year is 2026.** Use this when dating brainstorm documents. -Brainstorming helps answer **WHAT** to build through collaborative dialogue. It precedes `/workflows:plan`, which answers **HOW** to build it. +Brainstorming helps answer **WHAT** to build through collaborative dialogue. It precedes `/ce:plan`, which answers **HOW** to build it. **Process knowledge:** Load the `brainstorming` skill for detailed question techniques, approach exploration patterns, and YAGNI principles. @@ -33,7 +33,7 @@ Evaluate whether brainstorming is needed based on the feature description. - Constrained, well-defined scope **If requirements are already clear:** -Use **AskUserQuestion tool** to suggest: "Your requirements seem detailed enough to proceed directly to planning. Should I run `/workflows:plan` instead, or would you like to explore the idea further?" +Use **AskUserQuestion tool** to suggest: "Your requirements seem detailed enough to proceed directly to planning. Should I run `/ce:plan` instead, or would you like to explore the idea further?" ### Phase 1: Understand the Idea @@ -41,7 +41,7 @@ Use **AskUserQuestion tool** to suggest: "Your requirements seem detailed enough Run a quick repo scan to understand existing patterns: -- Task repo-research-analyst("Understand existing patterns related to: ") +- Task compound-engineering:research:repo-research-analyst("Understand existing patterns related to: ") Focus on: similar features, established patterns, CLAUDE.md guidance. @@ -88,9 +88,25 @@ Use **AskUserQuestion tool** to present next steps: **Options:** 1. **Review and refine** - Improve the document through structured self-review -2. **Proceed to planning** - Run `/workflows:plan` (will auto-detect this brainstorm) -3. **Ask more questions** - I have more questions to clarify before moving on -4. **Done for now** - Return later +2. **Proceed to planning** - Run `/ce:plan` (will auto-detect this brainstorm) +3. **Share to Proof** - Upload to Proof for collaborative review and sharing +4. **Ask more questions** - I have more questions to clarify before moving on +5. **Done for now** - Return later + +**If user selects "Share to Proof":** + +```bash +CONTENT=$(cat docs/brainstorms/YYYY-MM-DD--brainstorm.md) +TITLE="Brainstorm: " +RESPONSE=$(curl -s -X POST https://www.proofeditor.ai/share/markdown \ + -H "Content-Type: application/json" \ + -d "$(jq -n --arg title "$TITLE" --arg markdown "$CONTENT" --arg by "ai:compound" '{title: $title, markdown: $markdown, by: $by}')") +PROOF_URL=$(echo "$RESPONSE" | jq -r '.tokenUrl') +``` + +Display the URL prominently: `View & collaborate in Proof: ` + +If the curl fails, skip silently. Then return to the Phase 4 options. **If user selects "Ask more questions":** YOU (Claude) return to Phase 1.2 (Collaborative Dialogue) and continue asking the USER questions one at a time to further refine the design. The user wants YOU to probe deeper - ask about edge cases, constraints, preferences, or areas not yet explored. Continue until the user is satisfied, then return to Phase 4. @@ -100,8 +116,8 @@ Load the `document-review` skill and apply it to the brainstorm document. When document-review returns "Review complete", present next steps: -1. **Move to planning** - Continue to `/workflows:plan` with this document -2. **Done for now** - Brainstorming complete. To start planning later: `/workflows:plan [document-path]` +1. **Move to planning** - Continue to `/ce:plan` with this document +2. **Done for now** - Brainstorming complete. To start planning later: `/ce:plan [document-path]` ## Output Summary @@ -116,7 +132,7 @@ Key decisions: - [Decision 1] - [Decision 2] -Next: Run `/workflows:plan` when ready to implement. +Next: Run `/ce:plan` when ready to implement. ``` ## Important Guidelines diff --git a/plugins/compound-engineering/commands/workflows/compound.md b/plugins/compound-engineering/skills/ce-compound/SKILL.md similarity index 78% rename from plugins/compound-engineering/commands/workflows/compound.md rename to plugins/compound-engineering/skills/ce-compound/SKILL.md index 9dffc1a..d06b63b 100644 --- a/plugins/compound-engineering/commands/workflows/compound.md +++ b/plugins/compound-engineering/skills/ce-compound/SKILL.md @@ -1,5 +1,5 @@ --- -name: workflows:compound +name: ce:compound description: Document a recently solved problem to compound your team's knowledge argument-hint: "[optional: brief context about the fix]" --- @@ -17,11 +17,19 @@ Captures problem solutions while context is fresh, creating structured documenta ## Usage ```bash -/workflows:compound # Document the most recent fix -/workflows:compound [brief context] # Provide additional context hint +/ce:compound # Document the most recent fix +/ce:compound [brief context] # Provide additional context hint ``` -## Execution Strategy: Two-Phase Orchestration +## Execution Strategy + +**Always run full mode by default.** Proceed directly to Phase 1 unless the user explicitly requests compact-safe mode (e.g., `/ce:compound --compact` or "use compact mode"). + +Compact-safe mode exists as a lightweight alternative — see the **Compact-Safe Mode** section below. It's there if the user wants it, not something to push. + +--- + +### Full Mode **Only ONE file gets written - the final documentation.** @@ -99,6 +107,44 @@ Based on problem type, optionally invoke specialized agents to review the docume +--- + +### Compact-Safe Mode + + +**Single-pass alternative for context-constrained sessions.** + +When context budget is tight, this mode skips parallel subagents entirely. The orchestrator performs all work in a single pass, producing a minimal but complete solution document. + + +The orchestrator (main conversation) performs ALL of the following in one sequential pass: + +1. **Extract from conversation**: Identify the problem, root cause, and solution from conversation history +2. **Classify**: Determine category and filename (same categories as full mode) +3. **Write minimal doc**: Create `docs/solutions/[category]/[filename].md` with: + - YAML frontmatter (title, category, date, tags) + - Problem description (1-2 sentences) + - Root cause (1-2 sentences) + - Solution with key code snippets + - One prevention tip +4. **Skip specialized agent reviews** (Phase 3) to conserve context + +**Compact-safe output:** +``` +✓ Documentation complete (compact-safe mode) + +File created: +- docs/solutions/[category]/[filename].md + +Note: This was created in compact-safe mode. For richer documentation +(cross-references, detailed prevention strategies, specialized reviews), +re-run /compound in a fresh session. +``` + +**No subagents are launched. No parallel tasks. One file written.** + +--- + ## What It Captures - **Problem symptom**: Exact error messages, observable behavior @@ -203,7 +249,7 @@ Build → Test → Find Issue → Research → Improve → Document → Validate - "that worked" - "it's fixed" - "working now" - "problem solved" - Use /workflows:compound [context] to document immediately without waiting for auto-detection. + Use /ce:compound [context] to document immediately without waiting for auto-detection. ## Routes To @@ -231,10 +277,10 @@ Based on problem type, these agents can enhance documentation: ### When to Invoke - **Auto-triggered** (optional): Agents can run post-documentation for enhancement -- **Manual trigger**: User can invoke agents after /workflows:compound completes for deeper review +- **Manual trigger**: User can invoke agents after /ce:compound completes for deeper review - **Customize agents**: Edit `compound-engineering.local.md` or invoke the `setup` skill to configure which review agents are used across all workflows ## Related Commands - `/research [topic]` - Deep investigation (searches docs/solutions/ for patterns) -- `/workflows:plan` - Planning workflow (references documented solutions) +- `/ce:plan` - Planning workflow (references documented solutions) diff --git a/plugins/compound-engineering/skills/ce-plan/SKILL.md b/plugins/compound-engineering/skills/ce-plan/SKILL.md new file mode 100644 index 0000000..b5d7e1e --- /dev/null +++ b/plugins/compound-engineering/skills/ce-plan/SKILL.md @@ -0,0 +1,641 @@ +--- +name: ce:plan +description: Transform feature descriptions into well-structured project plans following conventions +argument-hint: "[feature description, bug report, or improvement idea]" +--- + +# Create a plan for a new feature or bug fix + +## Introduction + +**Note: The current year is 2026.** Use this when dating plans and searching for recent documentation. + +Transform feature descriptions, bug reports, or improvement ideas into well-structured markdown files issues that follow project conventions and best practices. This command provides flexible detail levels to match your needs. + +## Feature Description + + #$ARGUMENTS + +**If the feature description above is empty, ask the user:** "What would you like to plan? Please describe the feature, bug fix, or improvement you have in mind." + +Do not proceed until you have a clear feature description from the user. + +### 0. Idea Refinement + +**Check for brainstorm output first:** + +Before asking questions, look for recent brainstorm documents in `docs/brainstorms/` that match this feature: + +```bash +ls -la docs/brainstorms/*.md 2>/dev/null | head -10 +``` + +**Relevance criteria:** A brainstorm is relevant if: +- The topic (from filename or YAML frontmatter) semantically matches the feature description +- Created within the last 14 days +- If multiple candidates match, use the most recent one + +**If a relevant brainstorm exists:** +1. Read the brainstorm document **thoroughly** — every section matters +2. Announce: "Found brainstorm from [date]: [topic]. Using as foundation for planning." +3. Extract and carry forward **ALL** of the following into the plan: + - Key decisions and their rationale + - Chosen approach and why alternatives were rejected + - Constraints and requirements discovered during brainstorming + - Open questions (flag these for resolution during planning) + - Success criteria and scope boundaries + - Any specific technical choices or patterns discussed +4. **Skip the idea refinement questions below** — the brainstorm already answered WHAT to build +5. Use brainstorm content as the **primary input** to research and planning phases +6. **Critical: The brainstorm is the origin document.** Throughout the plan, reference specific decisions with `(see brainstorm: docs/brainstorms/)` when carrying forward conclusions. Do not paraphrase decisions in a way that loses their original context — link back to the source. +7. **Do not omit brainstorm content** — if the brainstorm discussed it, the plan must address it (even if briefly). Scan each brainstorm section before finalizing the plan to verify nothing was dropped. + +**If multiple brainstorms could match:** +Use **AskUserQuestion tool** to ask which brainstorm to use, or whether to proceed without one. + +**If no brainstorm found (or not relevant), run idea refinement:** + +Refine the idea through collaborative dialogue using the **AskUserQuestion tool**: + +- Ask questions one at a time to understand the idea fully +- Prefer multiple choice questions when natural options exist +- Focus on understanding: purpose, constraints and success criteria +- Continue until the idea is clear OR user says "proceed" + +**Gather signals for research decision.** During refinement, note: + +- **User's familiarity**: Do they know the codebase patterns? Are they pointing to examples? +- **User's intent**: Speed vs thoroughness? Exploration vs execution? +- **Topic risk**: Security, payments, external APIs warrant more caution +- **Uncertainty level**: Is the approach clear or open-ended? + +**Skip option:** If the feature description is already detailed, offer: +"Your description is clear. Should I proceed with research, or would you like to refine it further?" + +## Main Tasks + +### 1. Local Research (Always Runs - Parallel) + + +First, I need to understand the project's conventions, existing patterns, and any documented learnings. This is fast and local - it informs whether external research is needed. + + +Run these agents **in parallel** to gather local context: + +- Task compound-engineering:research:repo-research-analyst(feature_description) +- Task compound-engineering:research:learnings-researcher(feature_description) + +**What to look for:** +- **Repo research:** existing patterns, CLAUDE.md guidance, technology familiarity, pattern consistency +- **Learnings:** documented solutions in `docs/solutions/` that might apply (gotchas, patterns, lessons learned) + +These findings inform the next step. + +### 1.5. Research Decision + +Based on signals from Step 0 and findings from Step 1, decide on external research. + +**High-risk topics → always research.** Security, payments, external APIs, data privacy. The cost of missing something is too high. This takes precedence over speed signals. + +**Strong local context → skip external research.** Codebase has good patterns, CLAUDE.md has guidance, user knows what they want. External research adds little value. + +**Uncertainty or unfamiliar territory → research.** User is exploring, codebase has no examples, new technology. External perspective is valuable. + +**Announce the decision and proceed.** Brief explanation, then continue. User can redirect if needed. + +Examples: +- "Your codebase has solid patterns for this. Proceeding without external research." +- "This involves payment processing, so I'll research current best practices first." + +### 1.5b. External Research (Conditional) + +**Only run if Step 1.5 indicates external research is valuable.** + +Run these agents in parallel: + +- Task compound-engineering:research:best-practices-researcher(feature_description) +- Task compound-engineering:research:framework-docs-researcher(feature_description) + +### 1.6. Consolidate Research + +After all research steps complete, consolidate findings: + +- Document relevant file paths from repo research (e.g., `app/services/example_service.rb:42`) +- **Include relevant institutional learnings** from `docs/solutions/` (key insights, gotchas to avoid) +- Note external documentation URLs and best practices (if external research was done) +- List related issues or PRs discovered +- Capture CLAUDE.md conventions + +**Optional validation:** Briefly summarize findings and ask if anything looks off or missing before proceeding to planning. + +### 2. Issue Planning & Structure + + +Think like a product manager - what would make this issue clear and actionable? Consider multiple perspectives + + +**Title & Categorization:** + +- [ ] Draft clear, searchable issue title using conventional format (e.g., `feat: Add user authentication`, `fix: Cart total calculation`) +- [ ] Determine issue type: enhancement, bug, refactor +- [ ] Convert title to filename: add today's date prefix, determine daily sequence number, strip prefix colon, kebab-case, add `-plan` suffix + - Scan `docs/plans/` for files matching today's date pattern `YYYY-MM-DD-\d{3}-` + - Find the highest existing sequence number for today + - Increment by 1, zero-padded to 3 digits (001, 002, etc.) + - Example: `feat: Add User Authentication` → `2026-01-21-001-feat-add-user-authentication-plan.md` + - Keep it descriptive (3-5 words after prefix) so plans are findable by context + +**Stakeholder Analysis:** + +- [ ] Identify who will be affected by this issue (end users, developers, operations) +- [ ] Consider implementation complexity and required expertise + +**Content Planning:** + +- [ ] Choose appropriate detail level based on issue complexity and audience +- [ ] List all necessary sections for the chosen template +- [ ] Gather supporting materials (error logs, screenshots, design mockups) +- [ ] Prepare code examples or reproduction steps if applicable, name the mock filenames in the lists + +### 3. SpecFlow Analysis + +After planning the issue structure, run SpecFlow Analyzer to validate and refine the feature specification: + +- Task compound-engineering:workflow:spec-flow-analyzer(feature_description, research_findings) + +**SpecFlow Analyzer Output:** + +- [ ] Review SpecFlow analysis results +- [ ] Incorporate any identified gaps or edge cases into the issue +- [ ] Update acceptance criteria based on SpecFlow findings + +### 4. Choose Implementation Detail Level + +Select how comprehensive you want the issue to be, simpler is mostly better. + +#### 📄 MINIMAL (Quick Issue) + +**Best for:** Simple bugs, small improvements, clear features + +**Includes:** + +- Problem statement or feature description +- Basic acceptance criteria +- Essential context only + +**Structure:** + +````markdown +--- +title: [Issue Title] +type: [feat|fix|refactor] +status: active +date: YYYY-MM-DD +origin: docs/brainstorms/YYYY-MM-DD--brainstorm.md # if originated from brainstorm, otherwise omit +--- + +# [Issue Title] + +[Brief problem/feature description] + +## Acceptance Criteria + +- [ ] Core requirement 1 +- [ ] Core requirement 2 + +## Context + +[Any critical information] + +## MVP + +### test.rb + +```ruby +class Test + def initialize + @name = "test" + end +end +``` + +## Sources + +- **Origin brainstorm:** [docs/brainstorms/YYYY-MM-DD--brainstorm.md](path) — include if plan originated from a brainstorm +- Related issue: #[issue_number] +- Documentation: [relevant_docs_url] +```` + +#### 📋 MORE (Standard Issue) + +**Best for:** Most features, complex bugs, team collaboration + +**Includes everything from MINIMAL plus:** + +- Detailed background and motivation +- Technical considerations +- Success metrics +- Dependencies and risks +- Basic implementation suggestions + +**Structure:** + +```markdown +--- +title: [Issue Title] +type: [feat|fix|refactor] +status: active +date: YYYY-MM-DD +origin: docs/brainstorms/YYYY-MM-DD--brainstorm.md # if originated from brainstorm, otherwise omit +--- + +# [Issue Title] + +## Overview + +[Comprehensive description] + +## Problem Statement / Motivation + +[Why this matters] + +## Proposed Solution + +[High-level approach] + +## Technical Considerations + +- Architecture impacts +- Performance implications +- Security considerations + +## System-Wide Impact + +- **Interaction graph**: [What callbacks/middleware/observers fire when this runs?] +- **Error propagation**: [How do errors flow across layers? Do retry strategies align?] +- **State lifecycle risks**: [Can partial failure leave orphaned/inconsistent state?] +- **API surface parity**: [What other interfaces expose similar functionality and need the same change?] +- **Integration test scenarios**: [Cross-layer scenarios that unit tests won't catch] + +## Acceptance Criteria + +- [ ] Detailed requirement 1 +- [ ] Detailed requirement 2 +- [ ] Testing requirements + +## Success Metrics + +[How we measure success] + +## Dependencies & Risks + +[What could block or complicate this] + +## Sources & References + +- **Origin brainstorm:** [docs/brainstorms/YYYY-MM-DD--brainstorm.md](path) — include if plan originated from a brainstorm +- Similar implementations: [file_path:line_number] +- Best practices: [documentation_url] +- Related PRs: #[pr_number] +``` + +#### 📚 A LOT (Comprehensive Issue) + +**Best for:** Major features, architectural changes, complex integrations + +**Includes everything from MORE plus:** + +- Detailed implementation plan with phases +- Alternative approaches considered +- Extensive technical specifications +- Resource requirements and timeline +- Future considerations and extensibility +- Risk mitigation strategies +- Documentation requirements + +**Structure:** + +```markdown +--- +title: [Issue Title] +type: [feat|fix|refactor] +status: active +date: YYYY-MM-DD +origin: docs/brainstorms/YYYY-MM-DD--brainstorm.md # if originated from brainstorm, otherwise omit +--- + +# [Issue Title] + +## Overview + +[Executive summary] + +## Problem Statement + +[Detailed problem analysis] + +## Proposed Solution + +[Comprehensive solution design] + +## Technical Approach + +### Architecture + +[Detailed technical design] + +### Implementation Phases + +#### Phase 1: [Foundation] + +- Tasks and deliverables +- Success criteria +- Estimated effort + +#### Phase 2: [Core Implementation] + +- Tasks and deliverables +- Success criteria +- Estimated effort + +#### Phase 3: [Polish & Optimization] + +- Tasks and deliverables +- Success criteria +- Estimated effort + +## Alternative Approaches Considered + +[Other solutions evaluated and why rejected] + +## System-Wide Impact + +### Interaction Graph + +[Map the chain reaction: what callbacks, middleware, observers, and event handlers fire when this code runs? Trace at least two levels deep. Document: "Action X triggers Y, which calls Z, which persists W."] + +### Error & Failure Propagation + +[Trace errors from lowest layer up. List specific error classes and where they're handled. Identify retry conflicts, unhandled error types, and silent failure swallowing.] + +### State Lifecycle Risks + +[Walk through each step that persists state. Can partial failure orphan rows, duplicate records, or leave caches stale? Document cleanup mechanisms or their absence.] + +### API Surface Parity + +[List all interfaces (classes, DSLs, endpoints) that expose equivalent functionality. Note which need updating and which share the code path.] + +### Integration Test Scenarios + +[3-5 cross-layer test scenarios that unit tests with mocks would never catch. Include expected behavior for each.] + +## Acceptance Criteria + +### Functional Requirements + +- [ ] Detailed functional criteria + +### Non-Functional Requirements + +- [ ] Performance targets +- [ ] Security requirements +- [ ] Accessibility standards + +### Quality Gates + +- [ ] Test coverage requirements +- [ ] Documentation completeness +- [ ] Code review approval + +## Success Metrics + +[Detailed KPIs and measurement methods] + +## Dependencies & Prerequisites + +[Detailed dependency analysis] + +## Risk Analysis & Mitigation + +[Comprehensive risk assessment] + +## Resource Requirements + +[Team, time, infrastructure needs] + +## Future Considerations + +[Extensibility and long-term vision] + +## Documentation Plan + +[What docs need updating] + +## Sources & References + +### Origin + +- **Brainstorm document:** [docs/brainstorms/YYYY-MM-DD--brainstorm.md](path) — include if plan originated from a brainstorm. Key decisions carried forward: [list 2-3 major decisions from brainstorm] + +### Internal References + +- Architecture decisions: [file_path:line_number] +- Similar features: [file_path:line_number] +- Configuration: [file_path:line_number] + +### External References + +- Framework documentation: [url] +- Best practices guide: [url] +- Industry standards: [url] + +### Related Work + +- Previous PRs: #[pr_numbers] +- Related issues: #[issue_numbers] +- Design documents: [links] +``` + +### 5. Issue Creation & Formatting + + +Apply best practices for clarity and actionability, making the issue easy to scan and understand + + +**Content Formatting:** + +- [ ] Use clear, descriptive headings with proper hierarchy (##, ###) +- [ ] Include code examples in triple backticks with language syntax highlighting +- [ ] Add screenshots/mockups if UI-related (drag & drop or use image hosting) +- [ ] Use task lists (- [ ]) for trackable items that can be checked off +- [ ] Add collapsible sections for lengthy logs or optional details using `

` tags +- [ ] Apply appropriate emoji for visual scanning (🐛 bug, ✨ feature, 📚 docs, ♻️ refactor) + +**Cross-Referencing:** + +- [ ] Link to related issues/PRs using #number format +- [ ] Reference specific commits with SHA hashes when relevant +- [ ] Link to code using GitHub's permalink feature (press 'y' for permanent link) +- [ ] Mention relevant team members with @username if needed +- [ ] Add links to external resources with descriptive text + +**Code & Examples:** + +````markdown +# Good example with syntax highlighting and line references + + +```ruby +# app/services/user_service.rb:42 +def process_user(user) + +# Implementation here + +end +``` + +# Collapsible error logs + +
+Full error stacktrace + +`Error details here...` + +
+```` + +**AI-Era Considerations:** + +- [ ] Account for accelerated development with AI pair programming +- [ ] Include prompts or instructions that worked well during research +- [ ] Note which AI tools were used for initial exploration (Claude, Copilot, etc.) +- [ ] Emphasize comprehensive testing given rapid implementation +- [ ] Document any AI-generated code that needs human review + +### 6. Final Review & Submission + +**Brainstorm cross-check (if plan originated from a brainstorm):** + +Before finalizing, re-read the brainstorm document and verify: +- [ ] Every key decision from the brainstorm is reflected in the plan +- [ ] The chosen approach matches what was decided in the brainstorm +- [ ] Constraints and requirements from the brainstorm are captured in acceptance criteria +- [ ] Open questions from the brainstorm are either resolved or flagged +- [ ] The `origin:` frontmatter field points to the brainstorm file +- [ ] The Sources section includes the brainstorm with a summary of carried-forward decisions + +**Pre-submission Checklist:** + +- [ ] Title is searchable and descriptive +- [ ] Labels accurately categorize the issue +- [ ] All template sections are complete +- [ ] Links and references are working +- [ ] Acceptance criteria are measurable +- [ ] Add names of files in pseudo code examples and todo lists +- [ ] Add an ERD mermaid diagram if applicable for new model changes + +## Write Plan File + +**REQUIRED: Write the plan file to disk before presenting any options.** + +```bash +mkdir -p docs/plans/ +# Determine daily sequence number +today=$(date +%Y-%m-%d) +last_seq=$(ls docs/plans/${today}-*-plan.md 2>/dev/null | grep -oP "${today}-\K\d{3}" | sort -n | tail -1) +next_seq=$(printf "%03d" $(( ${last_seq:-0} + 1 ))) +``` + +Use the Write tool to save the complete plan to `docs/plans/YYYY-MM-DD-NNN---plan.md` (where NNN is `$next_seq` from the bash command above). This step is mandatory and cannot be skipped — even when running as part of LFG/SLFG or other automated pipelines. + +Confirm: "Plan written to docs/plans/[filename]" + +**Pipeline mode:** If invoked from an automated workflow (LFG, SLFG, or any `disable-model-invocation` context), skip all AskUserQuestion calls. Make decisions automatically and proceed to writing the plan without interactive prompts. + +## Output Format + +**Filename:** Use the date, daily sequence number, and kebab-case filename from Step 2 Title & Categorization. + +``` +docs/plans/YYYY-MM-DD-NNN---plan.md +``` + +Examples: +- ✅ `docs/plans/2026-01-15-001-feat-user-authentication-flow-plan.md` +- ✅ `docs/plans/2026-02-03-001-fix-checkout-race-condition-plan.md` +- ✅ `docs/plans/2026-03-10-002-refactor-api-client-extraction-plan.md` +- ❌ `docs/plans/2026-01-15-feat-thing-plan.md` (missing sequence number, not descriptive) +- ❌ `docs/plans/2026-01-15-001-feat-new-feature-plan.md` (too vague - what feature?) +- ❌ `docs/plans/2026-01-15-001-feat: user auth-plan.md` (invalid characters - colon and space) +- ❌ `docs/plans/feat-user-auth-plan.md` (missing date prefix and sequence number) + +## Post-Generation Options + +After writing the plan file, use the **AskUserQuestion tool** to present these options: + +**Question:** "Plan ready at `docs/plans/YYYY-MM-DD-NNN---plan.md`. What would you like to do next?" + +**Options:** +1. **Open plan in editor** - Open the plan file for review +2. **Run `/deepen-plan`** - Enhance each section with parallel research agents (best practices, performance, UI) +3. **Review and refine** - Improve the document through structured self-review +4. **Share to Proof** - Upload to Proof for collaborative review and sharing +5. **Start `/ce:work`** - Begin implementing this plan locally +6. **Start `/ce:work` on remote** - Begin implementing in Claude Code on the web (use `&` to run in background) +7. **Create Issue** - Create issue in project tracker (GitHub/Linear) + +Based on selection: +- **Open plan in editor** → Run `open docs/plans/.md` to open the file in the user's default editor +- **`/deepen-plan`** → Call the /deepen-plan command with the plan file path to enhance with research +- **Review and refine** → Load `document-review` skill. +- **Share to Proof** → Upload the plan to Proof: + ```bash + CONTENT=$(cat docs/plans/.md) + TITLE="Plan: " + RESPONSE=$(curl -s -X POST https://www.proofeditor.ai/share/markdown \ + -H "Content-Type: application/json" \ + -d "$(jq -n --arg title "$TITLE" --arg markdown "$CONTENT" --arg by "ai:compound" '{title: $title, markdown: $markdown, by: $by}')") + PROOF_URL=$(echo "$RESPONSE" | jq -r '.tokenUrl') + ``` + Display: `View & collaborate in Proof: ` — skip silently if curl fails. Then return to options. +- **`/ce:work`** → Call the /ce:work command with the plan file path +- **`/ce:work` on remote** → Run `/ce:work docs/plans/.md &` to start work in background for Claude Code web +- **Create Issue** → See "Issue Creation" section below +- **Other** (automatically provided) → Accept free text for rework or specific changes + +**Note:** If running `/ce:plan` with ultrathink enabled, automatically run `/deepen-plan` after plan creation for maximum depth and grounding. + +Loop back to options after Simplify or Other changes until user selects `/ce:work` or another action. + +## Issue Creation + +When user selects "Create Issue", detect their project tracker from CLAUDE.md: + +1. **Check for tracker preference** in user's CLAUDE.md (global or project): + - Look for `project_tracker: github` or `project_tracker: linear` + - Or look for mentions of "GitHub Issues" or "Linear" in their workflow section + +2. **If GitHub:** + + Use the title and type from Step 2 (already in context - no need to re-read the file): + + ```bash + gh issue create --title ": " --body-file <plan_path> + ``` + +3. **If Linear:** + + ```bash + linear issue create --title "<title>" --description "$(cat <plan_path>)" + ``` + +4. **If no tracker configured:** + Ask user: "Which project tracker do you use? (GitHub/Linear/Other)" + - Suggest adding `project_tracker: github` or `project_tracker: linear` to their CLAUDE.md + +5. **After creation:** + - Display the issue URL + - Ask if they want to proceed to `/ce:work` + +NEVER CODE! Just research and write the plan. diff --git a/plugins/compound-engineering/skills/ce-review/SKILL.md b/plugins/compound-engineering/skills/ce-review/SKILL.md new file mode 100644 index 0000000..e72d7b3 --- /dev/null +++ b/plugins/compound-engineering/skills/ce-review/SKILL.md @@ -0,0 +1,558 @@ +--- +name: ce:review +description: Perform exhaustive code reviews using multi-agent analysis, ultra-thinking, and worktrees +argument-hint: "[PR number, GitHub URL, branch name, or latest] [--serial]" +--- + +# Review Command + +<command_purpose> Perform exhaustive code reviews using multi-agent analysis, ultra-thinking, and Git worktrees for deep local inspection. </command_purpose> + +## Introduction + +<role>Senior Code Review Architect with expertise in security, performance, architecture, and quality assurance</role> + +## Prerequisites + +<requirements> +- Git repository with GitHub CLI (`gh`) installed and authenticated +- Clean main/master branch +- Proper permissions to create worktrees and access the repository +- For document reviews: Path to a markdown file or document +</requirements> + +## Main Tasks + +### 1. Determine Review Target & Setup (ALWAYS FIRST) + +<review_target> #$ARGUMENTS </review_target> + +<thinking> +First, I need to determine the review target type and set up the code for analysis. +</thinking> + +#### Immediate Actions: + +<task_list> + +- [ ] Determine review type: PR number (numeric), GitHub URL, file path (.md), or empty (current branch) +- [ ] Check current git branch +- [ ] If ALREADY on the target branch (PR branch, requested branch name, or the branch already checked out for review) → proceed with analysis on current branch +- [ ] If DIFFERENT branch than the review target → offer to use worktree: "Use git-worktree skill for isolated Call `skill: git-worktree` with branch name" +- [ ] Fetch PR metadata using `gh pr view --json` for title, body, files, linked issues +- [ ] Set up language-specific analysis tools +- [ ] Prepare security scanning environment +- [ ] Make sure we are on the branch we are reviewing. Use gh pr checkout to switch to the branch or manually checkout the branch. + +Ensure that the code is ready for analysis (either in worktree or on current branch). ONLY then proceed to the next step. + +</task_list> + +#### Protected Artifacts + +<protected_artifacts> +The following paths are compound-engineering pipeline artifacts and must never be flagged for deletion, removal, or gitignore by any review agent: + +- `docs/plans/*.md` — Plan files created by `/ce:plan`. These are living documents that track implementation progress (checkboxes are checked off by `/ce:work`). +- `docs/solutions/*.md` — Solution documents created during the pipeline. + +If a review agent flags any file in these directories for cleanup or removal, discard that finding during synthesis. Do not create a todo for it. +</protected_artifacts> + +#### Load Review Agents + +Read `compound-engineering.local.md` in the project root. If found, use `review_agents` from YAML frontmatter. If the markdown body contains review context, pass it to each agent as additional instructions. + +If no settings file exists, invoke the `setup` skill to create one. Then read the newly created file and continue. + +#### Choose Execution Mode + +<execution_mode> + +Before launching review agents, check for context constraints: + +**If `--serial` flag is passed OR conversation is in a long session:** + +Run agents ONE AT A TIME in sequence. Wait for each agent to complete before starting the next. This uses less context but takes longer. + +**Default (parallel):** + +Run all agents simultaneously for speed. If you hit context limits, retry with `--serial` flag. + +**Auto-detect:** If more than 5 review agents are configured, automatically switch to serial mode and inform the user: +"Running review agents in serial mode (6+ agents configured). Use --parallel to override." + +</execution_mode> + +#### Parallel Agents to review the PR: + +<parallel_tasks> + +**Parallel mode (default for ≤5 agents):** + +Run all configured review agents in parallel using Task tool. For each agent in the `review_agents` list: + +``` +Task {agent-name}(PR content + review context from settings body) +``` + +**Serial mode (--serial flag, or auto for 6+ agents):** + +Run configured review agents ONE AT A TIME. For each agent in the `review_agents` list, wait for it to complete before starting the next: + +``` +For each agent in review_agents: + 1. Task {agent-name}(PR content + review context) + 2. Wait for completion + 3. Collect findings + 4. Proceed to next agent +``` + +Always run these last regardless of mode: +- Task compound-engineering:review:agent-native-reviewer(PR content) - Verify new features are agent-accessible +- Task compound-engineering:research:learnings-researcher(PR content) - Search docs/solutions/ for past issues related to this PR's modules and patterns + +</parallel_tasks> + +#### Conditional Agents (Run if applicable): + +<conditional_agents> + +These agents are run ONLY when the PR matches specific criteria. Check the PR files list to determine if they apply: + +**MIGRATIONS: If PR contains database migrations, schema.rb, or data backfills:** + +- Task compound-engineering:review:schema-drift-detector(PR content) - Detects unrelated schema.rb changes by cross-referencing against included migrations (run FIRST) +- Task compound-engineering:review:data-migration-expert(PR content) - Validates ID mappings match production, checks for swapped values, verifies rollback safety +- Task compound-engineering:review:deployment-verification-agent(PR content) - Creates Go/No-Go deployment checklist with SQL verification queries + +**When to run:** +- PR includes files matching `db/migrate/*.rb` or `db/schema.rb` +- PR modifies columns that store IDs, enums, or mappings +- PR includes data backfill scripts or rake tasks +- PR title/body mentions: migration, backfill, data transformation, ID mapping + +**What these agents check:** +- `schema-drift-detector`: Cross-references schema.rb changes against PR migrations to catch unrelated columns/indexes from local database state +- `data-migration-expert`: Verifies hard-coded mappings match production reality (prevents swapped IDs), checks for orphaned associations, validates dual-write patterns +- `deployment-verification-agent`: Produces executable pre/post-deploy checklists with SQL queries, rollback procedures, and monitoring plans + +</conditional_agents> + +### 2. Ultra-Thinking Deep Dive Phases + +<ultrathink_instruction> For each phase below, spend maximum cognitive effort. Think step by step. Consider all angles. Question assumptions. And bring all reviews in a synthesis to the user.</ultrathink_instruction> + +<deliverable> +Complete system context map with component interactions +</deliverable> + +#### Phase 1: Stakeholder Perspective Analysis + +<thinking_prompt> ULTRA-THINK: Put yourself in each stakeholder's shoes. What matters to them? What are their pain points? </thinking_prompt> + +<stakeholder_perspectives> + +1. **Developer Perspective** <questions> + + - How easy is this to understand and modify? + - Are the APIs intuitive? + - Is debugging straightforward? + - Can I test this easily? </questions> + +2. **Operations Perspective** <questions> + + - How do I deploy this safely? + - What metrics and logs are available? + - How do I troubleshoot issues? + - What are the resource requirements? </questions> + +3. **End User Perspective** <questions> + + - Is the feature intuitive? + - Are error messages helpful? + - Is performance acceptable? + - Does it solve my problem? </questions> + +4. **Security Team Perspective** <questions> + + - What's the attack surface? + - Are there compliance requirements? + - How is data protected? + - What are the audit capabilities? </questions> + +5. **Business Perspective** <questions> + - What's the ROI? + - Are there legal/compliance risks? + - How does this affect time-to-market? + - What's the total cost of ownership? </questions> </stakeholder_perspectives> + +#### Phase 2: Scenario Exploration + +<thinking_prompt> ULTRA-THINK: Explore edge cases and failure scenarios. What could go wrong? How does the system behave under stress? </thinking_prompt> + +<scenario_checklist> + +- [ ] **Happy Path**: Normal operation with valid inputs +- [ ] **Invalid Inputs**: Null, empty, malformed data +- [ ] **Boundary Conditions**: Min/max values, empty collections +- [ ] **Concurrent Access**: Race conditions, deadlocks +- [ ] **Scale Testing**: 10x, 100x, 1000x normal load +- [ ] **Network Issues**: Timeouts, partial failures +- [ ] **Resource Exhaustion**: Memory, disk, connections +- [ ] **Security Attacks**: Injection, overflow, DoS +- [ ] **Data Corruption**: Partial writes, inconsistency +- [ ] **Cascading Failures**: Downstream service issues </scenario_checklist> + +### 3. Multi-Angle Review Perspectives + +#### Technical Excellence Angle + +- Code craftsmanship evaluation +- Engineering best practices +- Technical documentation quality +- Tooling and automation assessment + +#### Business Value Angle + +- Feature completeness validation +- Performance impact on users +- Cost-benefit analysis +- Time-to-market considerations + +#### Risk Management Angle + +- Security risk assessment +- Operational risk evaluation +- Compliance risk verification +- Technical debt accumulation + +#### Team Dynamics Angle + +- Code review etiquette +- Knowledge sharing effectiveness +- Collaboration patterns +- Mentoring opportunities + +### 4. Simplification and Minimalism Review + +Run the Task compound-engineering:review:code-simplicity-reviewer() to see if we can simplify the code. + +### 5. Findings Synthesis and Todo Creation Using file-todos Skill + +<critical_requirement> ALL findings MUST be stored in the todos/ directory using the file-todos skill. Create todo files immediately after synthesis - do NOT present findings for user approval first. Use the skill for structured todo management. </critical_requirement> + +#### Step 1: Synthesize All Findings + +<thinking> +Consolidate all agent reports into a categorized list of findings. +Remove duplicates, prioritize by severity and impact. +</thinking> + +<synthesis_tasks> + +- [ ] Collect findings from all parallel agents +- [ ] Surface learnings-researcher results: if past solutions are relevant, flag them as "Known Pattern" with links to docs/solutions/ files +- [ ] Discard any findings that recommend deleting or gitignoring files in `docs/plans/` or `docs/solutions/` (see Protected Artifacts above) +- [ ] Categorize by type: security, performance, architecture, quality, etc. +- [ ] Assign severity levels: 🔴 CRITICAL (P1), 🟡 IMPORTANT (P2), 🔵 NICE-TO-HAVE (P3) +- [ ] Remove duplicate or overlapping findings +- [ ] Estimate effort for each finding (Small/Medium/Large) + +</synthesis_tasks> + +#### Step 2: Create Todo Files Using file-todos Skill + +<critical_instruction> Use the file-todos skill to create todo files for ALL findings immediately. Do NOT present findings one-by-one asking for user approval. Create all todo files in parallel using the skill, then summarize results to user. </critical_instruction> + +**Implementation Options:** + +**Option A: Direct File Creation (Fast)** + +- Create todo files directly using Write tool +- All findings in parallel for speed +- Use standard template from `.claude/skills/file-todos/assets/todo-template.md` +- Follow naming convention: `{issue_id}-pending-{priority}-{description}.md` + +**Option B: Sub-Agents in Parallel (Recommended for Scale)** For large PRs with 15+ findings, use sub-agents to create finding files in parallel: + +```bash +# Launch multiple finding-creator agents in parallel +Task() - Create todos for first finding +Task() - Create todos for second finding +Task() - Create todos for third finding +etc. for each finding. +``` + +Sub-agents can: + +- Process multiple findings simultaneously +- Write detailed todo files with all sections filled +- Organize findings by severity +- Create comprehensive Proposed Solutions +- Add acceptance criteria and work logs +- Complete much faster than sequential processing + +**Execution Strategy:** + +1. Synthesize all findings into categories (P1/P2/P3) +2. Group findings by severity +3. Launch 3 parallel sub-agents (one per severity level) +4. Each sub-agent creates its batch of todos using the file-todos skill +5. Consolidate results and present summary + +**Process (Using file-todos Skill):** + +1. For each finding: + + - Determine severity (P1/P2/P3) + - Write detailed Problem Statement and Findings + - Create 2-3 Proposed Solutions with pros/cons/effort/risk + - Estimate effort (Small/Medium/Large) + - Add acceptance criteria and work log + +2. Use file-todos skill for structured todo management: + + ```bash + skill: file-todos + ``` + + The skill provides: + + - Template location: `.claude/skills/file-todos/assets/todo-template.md` + - Naming convention: `{issue_id}-{status}-{priority}-{description}.md` + - YAML frontmatter structure: status, priority, issue_id, tags, dependencies + - All required sections: Problem Statement, Findings, Solutions, etc. + +3. Create todo files in parallel: + + ```bash + {next_id}-pending-{priority}-{description}.md + ``` + +4. Examples: + + ``` + 001-pending-p1-path-traversal-vulnerability.md + 002-pending-p1-api-response-validation.md + 003-pending-p2-concurrency-limit.md + 004-pending-p3-unused-parameter.md + ``` + +5. Follow template structure from file-todos skill: `.claude/skills/file-todos/assets/todo-template.md` + +**Todo File Structure (from template):** + +Each todo must include: + +- **YAML frontmatter**: status, priority, issue_id, tags, dependencies +- **Problem Statement**: What's broken/missing, why it matters +- **Findings**: Discoveries from agents with evidence/location +- **Proposed Solutions**: 2-3 options, each with pros/cons/effort/risk +- **Recommended Action**: (Filled during triage, leave blank initially) +- **Technical Details**: Affected files, components, database changes +- **Acceptance Criteria**: Testable checklist items +- **Work Log**: Dated record with actions and learnings +- **Resources**: Links to PR, issues, documentation, similar patterns + +**File naming convention:** + +``` +{issue_id}-{status}-{priority}-{description}.md + +Examples: +- 001-pending-p1-security-vulnerability.md +- 002-pending-p2-performance-optimization.md +- 003-pending-p3-code-cleanup.md +``` + +**Status values:** + +- `pending` - New findings, needs triage/decision +- `ready` - Approved by manager, ready to work +- `complete` - Work finished + +**Priority values:** + +- `p1` - Critical (blocks merge, security/data issues) +- `p2` - Important (should fix, architectural/performance) +- `p3` - Nice-to-have (enhancements, cleanup) + +**Tagging:** Always add `code-review` tag, plus: `security`, `performance`, `architecture`, `rails`, `quality`, etc. + +#### Step 3: Summary Report + +After creating all todo files, present comprehensive summary: + +````markdown +## ✅ Code Review Complete + +**Review Target:** PR #XXXX - [PR Title] **Branch:** [branch-name] + +### Findings Summary: + +- **Total Findings:** [X] +- **🔴 CRITICAL (P1):** [count] - BLOCKS MERGE +- **🟡 IMPORTANT (P2):** [count] - Should Fix +- **🔵 NICE-TO-HAVE (P3):** [count] - Enhancements + +### Created Todo Files: + +**P1 - Critical (BLOCKS MERGE):** + +- `001-pending-p1-{finding}.md` - {description} +- `002-pending-p1-{finding}.md` - {description} + +**P2 - Important:** + +- `003-pending-p2-{finding}.md` - {description} +- `004-pending-p2-{finding}.md` - {description} + +**P3 - Nice-to-Have:** + +- `005-pending-p3-{finding}.md` - {description} + +### Review Agents Used: + +- kieran-rails-reviewer +- security-sentinel +- performance-oracle +- architecture-strategist +- agent-native-reviewer +- [other agents] + +### Next Steps: + +1. **Address P1 Findings**: CRITICAL - must be fixed before merge + + - Review each P1 todo in detail + - Implement fixes or request exemption + - Verify fixes before merging PR + +2. **Triage All Todos**: + ```bash + ls todos/*-pending-*.md # View all pending todos + /triage # Use slash command for interactive triage + ``` + +3. **Work on Approved Todos**: + + ```bash + /resolve_todo_parallel # Fix all approved items efficiently + ``` + +4. **Track Progress**: + - Rename file when status changes: pending → ready → complete + - Update Work Log as you work + - Commit todos: `git add todos/ && git commit -m "refactor: add code review findings"` + +### Severity Breakdown: + +**🔴 P1 (Critical - Blocks Merge):** + +- Security vulnerabilities +- Data corruption risks +- Breaking changes +- Critical architectural issues + +**🟡 P2 (Important - Should Fix):** + +- Performance issues +- Significant architectural concerns +- Major code quality problems +- Reliability issues + +**🔵 P3 (Nice-to-Have):** + +- Minor improvements +- Code cleanup +- Optimization opportunities +- Documentation updates +```` + +### 6. End-to-End Testing (Optional) + +<detect_project_type> + +**First, detect the project type from PR files:** + +| Indicator | Project Type | +|-----------|--------------| +| `*.xcodeproj`, `*.xcworkspace`, `Package.swift` (iOS) | iOS/macOS | +| `Gemfile`, `package.json`, `app/views/*`, `*.html.*` | Web | +| Both iOS files AND web files | Hybrid (test both) | + +</detect_project_type> + +<offer_testing> + +After presenting the Summary Report, offer appropriate testing based on project type: + +**For Web Projects:** +```markdown +**"Want to run browser tests on the affected pages?"** +1. Yes - run `/test-browser` +2. No - skip +``` + +**For iOS Projects:** +```markdown +**"Want to run Xcode simulator tests on the app?"** +1. Yes - run `/xcode-test` +2. No - skip +``` + +**For Hybrid Projects (e.g., Rails + Hotwire Native):** +```markdown +**"Want to run end-to-end tests?"** +1. Web only - run `/test-browser` +2. iOS only - run `/xcode-test` +3. Both - run both commands +4. No - skip +``` + +</offer_testing> + +#### If User Accepts Web Testing: + +Spawn a subagent to run browser tests (preserves main context): + +``` +Task general-purpose("Run /test-browser for PR #[number]. Test all affected pages, check for console errors, handle failures by creating todos and fixing.") +``` + +The subagent will: +1. Identify pages affected by the PR +2. Navigate to each page and capture snapshots (using Playwright MCP or agent-browser CLI) +3. Check for console errors +4. Test critical interactions +5. Pause for human verification on OAuth/email/payment flows +6. Create P1 todos for any failures +7. Fix and retry until all tests pass + +**Standalone:** `/test-browser [PR number]` + +#### If User Accepts iOS Testing: + +Spawn a subagent to run Xcode tests (preserves main context): + +``` +Task general-purpose("Run /xcode-test for scheme [name]. Build for simulator, install, launch, take screenshots, check for crashes.") +``` + +The subagent will: +1. Verify XcodeBuildMCP is installed +2. Discover project and schemes +3. Build for iOS Simulator +4. Install and launch app +5. Take screenshots of key screens +6. Capture console logs for errors +7. Pause for human verification (Sign in with Apple, push, IAP) +8. Create P1 todos for any failures +9. Fix and retry until all tests pass + +**Standalone:** `/xcode-test [scheme]` + +### Important: P1 Findings Block Merge + +Any **🔴 P1 (CRITICAL)** findings must be addressed before merging the PR. Present these prominently and ensure they're resolved before accepting the PR. diff --git a/plugins/compound-engineering/skills/ce-work/SKILL.md b/plugins/compound-engineering/skills/ce-work/SKILL.md new file mode 100644 index 0000000..3e09c43 --- /dev/null +++ b/plugins/compound-engineering/skills/ce-work/SKILL.md @@ -0,0 +1,470 @@ +--- +name: ce:work +description: Execute work plans efficiently while maintaining quality and finishing features +argument-hint: "[plan file, specification, or todo file path]" +--- + +# Work Plan Execution Command + +Execute a work plan efficiently while maintaining quality and finishing features. + +## Introduction + +This command takes a work document (plan, specification, or todo file) and executes it systematically. The focus is on **shipping complete features** by understanding requirements quickly, following existing patterns, and maintaining quality throughout. + +## Input Document + +<input_document> #$ARGUMENTS </input_document> + +## Execution Workflow + +### Phase 1: Quick Start + +1. **Read Plan and Clarify** + + - Read the work document completely + - Review any references or links provided in the plan + - If anything is unclear or ambiguous, ask clarifying questions now + - Get user approval to proceed + - **Do not skip this** - better to ask questions now than build the wrong thing + +2. **Setup Environment** + + First, check the current branch: + + ```bash + current_branch=$(git branch --show-current) + default_branch=$(git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null | sed 's@^refs/remotes/origin/@@') + + # Fallback if remote HEAD isn't set + if [ -z "$default_branch" ]; then + default_branch=$(git rev-parse --verify origin/main >/dev/null 2>&1 && echo "main" || echo "master") + fi + ``` + + **If already on a feature branch** (not the default branch): + - Ask: "Continue working on `[current_branch]`, or create a new branch?" + - If continuing, proceed to step 3 + - If creating new, follow Option A or B below + + **If on the default branch**, choose how to proceed: + + **Option A: Create a new branch** + ```bash + git pull origin [default_branch] + git checkout -b feature-branch-name + ``` + Use a meaningful name based on the work (e.g., `feat/user-authentication`, `fix/email-validation`). + + **Option B: Use a worktree (recommended for parallel development)** + ```bash + skill: git-worktree + # The skill will create a new branch from the default branch in an isolated worktree + ``` + + **Option C: Continue on the default branch** + - Requires explicit user confirmation + - Only proceed after user explicitly says "yes, commit to [default_branch]" + - Never commit directly to the default branch without explicit permission + + **Recommendation**: Use worktree if: + - You want to work on multiple features simultaneously + - You want to keep the default branch clean while experimenting + - You plan to switch between branches frequently + +3. **Create Todo List** + - Use TodoWrite to break plan into actionable tasks + - Include dependencies between tasks + - Prioritize based on what needs to be done first + - Include testing and quality check tasks + - Keep tasks specific and completable + +### Phase 2: Execute + +1. **Task Execution Loop** + + For each task in priority order: + + ``` + while (tasks remain): + - Mark task as in_progress in TodoWrite + - Read any referenced files from the plan + - Look for similar patterns in codebase + - Implement following existing conventions + - Write tests for new functionality + - Run System-Wide Test Check (see below) + - Run tests after changes + - Mark task as completed in TodoWrite + - Mark off the corresponding checkbox in the plan file ([ ] → [x]) + - Evaluate for incremental commit (see below) + ``` + + **System-Wide Test Check** — Before marking a task done, pause and ask: + + | Question | What to do | + |----------|------------| + | **What fires when this runs?** Callbacks, middleware, observers, event handlers — trace two levels out from your change. | Read the actual code (not docs) for callbacks on models you touch, middleware in the request chain, `after_*` hooks. | + | **Do my tests exercise the real chain?** If every dependency is mocked, the test proves your logic works *in isolation* — it says nothing about the interaction. | Write at least one integration test that uses real objects through the full callback/middleware chain. No mocks for the layers that interact. | + | **Can failure leave orphaned state?** If your code persists state (DB row, cache, file) before calling an external service, what happens when the service fails? Does retry create duplicates? | Trace the failure path with real objects. If state is created before the risky call, test that failure cleans up or that retry is idempotent. | + | **What other interfaces expose this?** Mixins, DSLs, alternative entry points (Agent vs Chat vs ChatMethods). | Grep for the method/behavior in related classes. If parity is needed, add it now — not as a follow-up. | + | **Do error strategies align across layers?** Retry middleware + application fallback + framework error handling — do they conflict or create double execution? | List the specific error classes at each layer. Verify your rescue list matches what the lower layer actually raises. | + + **When to skip:** Leaf-node changes with no callbacks, no state persistence, no parallel interfaces. If the change is purely additive (new helper method, new view partial), the check takes 10 seconds and the answer is "nothing fires, skip." + + **When this matters most:** Any change that touches models with callbacks, error handling with fallback/retry, or functionality exposed through multiple interfaces. + + **IMPORTANT**: Always update the original plan document by checking off completed items. Use the Edit tool to change `- [ ]` to `- [x]` for each task you finish. This keeps the plan as a living document showing progress and ensures no checkboxes are left unchecked. + +2. **Incremental Commits** + + After completing each task, evaluate whether to create an incremental commit: + + | Commit when... | Don't commit when... | + |----------------|---------------------| + | Logical unit complete (model, service, component) | Small part of a larger unit | + | Tests pass + meaningful progress | Tests failing | + | About to switch contexts (backend → frontend) | Purely scaffolding with no behavior | + | About to attempt risky/uncertain changes | Would need a "WIP" commit message | + + **Heuristic:** "Can I write a commit message that describes a complete, valuable change? If yes, commit. If the message would be 'WIP' or 'partial X', wait." + + **Commit workflow:** + ```bash + # 1. Verify tests pass (use project's test command) + # Examples: bin/rails test, npm test, pytest, go test, etc. + + # 2. Stage only files related to this logical unit (not `git add .`) + git add <files related to this logical unit> + + # 3. Commit with conventional message + git commit -m "feat(scope): description of this unit" + ``` + + **Handling merge conflicts:** If conflicts arise during rebasing or merging, resolve them immediately. Incremental commits make conflict resolution easier since each commit is small and focused. + + **Note:** Incremental commits use clean conventional messages without attribution footers. The final Phase 4 commit/PR includes the full attribution. + +3. **Follow Existing Patterns** + + - The plan should reference similar code - read those files first + - Match naming conventions exactly + - Reuse existing components where possible + - Follow project coding standards (see CLAUDE.md) + - When in doubt, grep for similar implementations + +4. **Test Continuously** + + - Run relevant tests after each significant change + - Don't wait until the end to test + - Fix failures immediately + - Add new tests for new functionality + - **Unit tests with mocks prove logic in isolation. Integration tests with real objects prove the layers work together.** If your change touches callbacks, middleware, or error handling — you need both. + +5. **Figma Design Sync** (if applicable) + + For UI work with Figma designs: + + - Implement components following design specs + - Use figma-design-sync agent iteratively to compare + - Fix visual differences identified + - Repeat until implementation matches design + +6. **Track Progress** + - Keep TodoWrite updated as you complete tasks + - Note any blockers or unexpected discoveries + - Create new tasks if scope expands + - Keep user informed of major milestones + +### Phase 3: Quality Check + +1. **Run Core Quality Checks** + + Always run before submitting: + + ```bash + # Run full test suite (use project's test command) + # Examples: bin/rails test, npm test, pytest, go test, etc. + + # Run linting (per CLAUDE.md) + # Use linting-agent before pushing to origin + ``` + +2. **Consider Reviewer Agents** (Optional) + + Use for complex, risky, or large changes. Read agents from `compound-engineering.local.md` frontmatter (`review_agents`). If no settings file, invoke the `setup` skill to create one. + + Run configured agents in parallel with Task tool. Present findings and address critical issues. + +3. **Final Validation** + - All TodoWrite tasks marked completed + - All tests pass + - Linting passes + - Code follows existing patterns + - Figma designs match (if applicable) + - No console errors or warnings + +4. **Prepare Operational Validation Plan** (REQUIRED) + - Add a `## Post-Deploy Monitoring & Validation` section to the PR description for every change. + - Include concrete: + - Log queries/search terms + - Metrics or dashboards to watch + - Expected healthy signals + - Failure signals and rollback/mitigation trigger + - Validation window and owner + - If there is truly no production/runtime impact, still include the section with: `No additional operational monitoring required` and a one-line reason. + +### Phase 4: Ship It + +1. **Create Commit** + + ```bash + git add . + git status # Review what's being committed + git diff --staged # Check the changes + + # Commit with conventional format + git commit -m "$(cat <<'EOF' + feat(scope): description of what and why + + Brief explanation if needed. + + 🤖 Generated with [Claude Code](https://claude.com/claude-code) + + Co-Authored-By: Claude <noreply@anthropic.com> + EOF + )" + ``` + +2. **Capture and Upload Screenshots for UI Changes** (REQUIRED for any UI work) + + For **any** design changes, new views, or UI modifications, you MUST capture and upload screenshots: + + **Step 1: Start dev server** (if not running) + ```bash + bin/dev # Run in background + ``` + + **Step 2: Capture screenshots with agent-browser CLI** + ```bash + agent-browser open http://localhost:3000/[route] + agent-browser snapshot -i + agent-browser screenshot output.png + ``` + See the `agent-browser` skill for detailed usage. + + **Step 3: Upload using imgup skill** + ```bash + skill: imgup + # Then upload each screenshot: + imgup -h pixhost screenshot.png # pixhost works without API key + # Alternative hosts: catbox, imagebin, beeimg + ``` + + **What to capture:** + - **New screens**: Screenshot of the new UI + - **Modified screens**: Before AND after screenshots + - **Design implementation**: Screenshot showing Figma design match + + **IMPORTANT**: Always include uploaded image URLs in PR description. This provides visual context for reviewers and documents the change. + +3. **Create Pull Request** + + ```bash + git push -u origin feature-branch-name + + gh pr create --title "Feature: [Description]" --body "$(cat <<'EOF' + ## Summary + - What was built + - Why it was needed + - Key decisions made + + ## Testing + - Tests added/modified + - Manual testing performed + + ## Post-Deploy Monitoring & Validation + - **What to monitor/search** + - Logs: + - Metrics/Dashboards: + - **Validation checks (queries/commands)** + - `command or query here` + - **Expected healthy behavior** + - Expected signal(s) + - **Failure signal(s) / rollback trigger** + - Trigger + immediate action + - **Validation window & owner** + - Window: + - Owner: + - **If no operational impact** + - `No additional operational monitoring required: <reason>` + + ## Before / After Screenshots + | Before | After | + |--------|-------| + | ![before](URL) | ![after](URL) | + + ## Figma Design + [Link if applicable] + + --- + + [![Compound Engineered](https://img.shields.io/badge/Compound-Engineered-6366f1)](https://github.com/EveryInc/compound-engineering-plugin) 🤖 Generated with [Claude Code](https://claude.com/claude-code) + EOF + )" + ``` + +4. **Update Plan Status** + + If the input document has YAML frontmatter with a `status` field, update it to `completed`: + ``` + status: active → status: completed + ``` + +5. **Notify User** + - Summarize what was completed + - Link to PR + - Note any follow-up work needed + - Suggest next steps if applicable + +--- + +## Swarm Mode (Optional) + +For complex plans with multiple independent workstreams, enable swarm mode for parallel execution with coordinated agents. + +### When to Use Swarm Mode + +| Use Swarm Mode when... | Use Standard Mode when... | +|------------------------|---------------------------| +| Plan has 5+ independent tasks | Plan is linear/sequential | +| Multiple specialists needed (review + test + implement) | Single-focus work | +| Want maximum parallelism | Simpler mental model preferred | +| Large feature with clear phases | Small feature or bug fix | + +### Enabling Swarm Mode + +To trigger swarm execution, say: + +> "Make a Task list and launch an army of agent swarm subagents to build the plan" + +Or explicitly request: "Use swarm mode for this work" + +### Swarm Workflow + +When swarm mode is enabled, the workflow changes: + +1. **Create Team** + ``` + Teammate({ operation: "spawnTeam", team_name: "work-{timestamp}" }) + ``` + +2. **Create Task List with Dependencies** + - Parse plan into TaskCreate items + - Set up blockedBy relationships for sequential dependencies + - Independent tasks have no blockers (can run in parallel) + +3. **Spawn Specialized Teammates** + ``` + Task({ + team_name: "work-{timestamp}", + name: "implementer", + subagent_type: "general-purpose", + prompt: "Claim implementation tasks, execute, mark complete", + run_in_background: true + }) + + Task({ + team_name: "work-{timestamp}", + name: "tester", + subagent_type: "general-purpose", + prompt: "Claim testing tasks, run tests, mark complete", + run_in_background: true + }) + ``` + +4. **Coordinate and Monitor** + - Team lead monitors task completion + - Spawn additional workers as phases unblock + - Handle plan approval if required + +5. **Cleanup** + ``` + Teammate({ operation: "requestShutdown", target_agent_id: "implementer" }) + Teammate({ operation: "requestShutdown", target_agent_id: "tester" }) + Teammate({ operation: "cleanup" }) + ``` + +See the `orchestrating-swarms` skill for detailed swarm patterns and best practices. + +--- + +## Key Principles + +### Start Fast, Execute Faster + +- Get clarification once at the start, then execute +- Don't wait for perfect understanding - ask questions and move +- The goal is to **finish the feature**, not create perfect process + +### The Plan is Your Guide + +- Work documents should reference similar code and patterns +- Load those references and follow them +- Don't reinvent - match what exists + +### Test As You Go + +- Run tests after each change, not at the end +- Fix failures immediately +- Continuous testing prevents big surprises + +### Quality is Built In + +- Follow existing patterns +- Write tests for new code +- Run linting before pushing +- Use reviewer agents for complex/risky changes only + +### Ship Complete Features + +- Mark all tasks completed before moving on +- Don't leave features 80% done +- A finished feature that ships beats a perfect feature that doesn't + +## Quality Checklist + +Before creating PR, verify: + +- [ ] All clarifying questions asked and answered +- [ ] All TodoWrite tasks marked completed +- [ ] Tests pass (run project's test command) +- [ ] Linting passes (use linting-agent) +- [ ] Code follows existing patterns +- [ ] Figma designs match implementation (if applicable) +- [ ] Before/after screenshots captured and uploaded (for UI changes) +- [ ] Commit messages follow conventional format +- [ ] PR description includes Post-Deploy Monitoring & Validation section (or explicit no-impact rationale) +- [ ] PR description includes summary, testing notes, and screenshots +- [ ] PR description includes Compound Engineered badge + +## When to Use Reviewer Agents + +**Don't use by default.** Use reviewer agents only when: + +- Large refactor affecting many files (10+) +- Security-sensitive changes (authentication, permissions, data access) +- Performance-critical code paths +- Complex algorithms or business logic +- User explicitly requests thorough review + +For most features: tests + linting + following patterns is sufficient. + +## Common Pitfalls to Avoid + +- **Analysis paralysis** - Don't overthink, read the plan and execute +- **Skipping clarifying questions** - Ask now, not after building wrong thing +- **Ignoring plan references** - The plan has links for a reason +- **Testing at the end** - Test continuously or suffer later +- **Forgetting TodoWrite** - Track progress or lose track of what's done +- **80% done syndrome** - Finish the feature, don't move on early +- **Over-reviewing simple changes** - Save reviewer agents for complex work diff --git a/plugins/compound-engineering/commands/changelog.md b/plugins/compound-engineering/skills/changelog/SKILL.md similarity index 100% rename from plugins/compound-engineering/commands/changelog.md rename to plugins/compound-engineering/skills/changelog/SKILL.md diff --git a/plugins/compound-engineering/commands/create-agent-skill.md b/plugins/compound-engineering/skills/create-agent-skill/SKILL.md similarity index 83% rename from plugins/compound-engineering/commands/create-agent-skill.md rename to plugins/compound-engineering/skills/create-agent-skill/SKILL.md index 9ec53f9..2b3052b 100644 --- a/plugins/compound-engineering/commands/create-agent-skill.md +++ b/plugins/compound-engineering/skills/create-agent-skill/SKILL.md @@ -2,7 +2,7 @@ name: create-agent-skill description: Create or edit Claude Code skills with expert guidance on structure and best practices allowed-tools: Skill(create-agent-skills) -argument-hint: [skill description or requirements] +argument-hint: "[skill description or requirements]" disable-model-invocation: true --- diff --git a/plugins/compound-engineering/skills/create-agent-skills/SKILL.md b/plugins/compound-engineering/skills/create-agent-skills/SKILL.md index efcbd1a..93eb32d 100644 --- a/plugins/compound-engineering/skills/create-agent-skills/SKILL.md +++ b/plugins/compound-engineering/skills/create-agent-skills/SKILL.md @@ -97,22 +97,11 @@ Access individual args: `$ARGUMENTS[0]` or shorthand `$0`, `$1`, `$2`. ### Dynamic Context Injection -The `` !`command` `` syntax runs shell commands before content is sent to Claude: +Skills support dynamic context injection: prefix a backtick-wrapped shell command with an exclamation mark, and the preprocessor executes it at load time, replacing the directive with stdout. Write an exclamation mark immediately before the opening backtick of the command you want executed (for example, to inject the current git branch, write the exclamation mark followed by `git branch --show-current` wrapped in backticks). -```yaml ---- -name: pr-summary -description: Summarize changes in a pull request -context: fork -agent: Explore ---- +**Important:** The preprocessor scans the entire SKILL.md as plain text — it does not parse markdown. Directives inside fenced code blocks or inline code spans are still executed. If a skill documents this syntax with literal examples, the preprocessor will attempt to run them, causing load failures. To safely document this feature, describe it in prose (as done here) or place examples in a reference file, which is loaded on-demand by Claude and not preprocessed. -## Context -- PR diff: !`gh pr diff` -- Changed files: !`gh pr diff --name-only` - -Summarize this pull request... -``` +For a concrete example of dynamic context injection in a skill, see [official-spec.md](references/official-spec.md) § "Dynamic Context Injection". ### Running in a Subagent diff --git a/plugins/compound-engineering/skills/create-agent-skills/workflows/add-workflow.md b/plugins/compound-engineering/skills/create-agent-skills/workflows/add-workflow.md index f53e9cf..cfad9f8 100644 --- a/plugins/compound-engineering/skills/create-agent-skills/workflows/add-workflow.md +++ b/plugins/compound-engineering/skills/create-agent-skills/workflows/add-workflow.md @@ -1,5 +1,11 @@ # Workflow: Add a Workflow to Existing Skill +## Interaction Method + +If `AskUserQuestion` is available, use it for all prompts below. + +If not, present each question as a numbered list and wait for a reply before proceeding to the next step. Never skip or auto-configure. + <required_reading> **Read these reference files NOW:** 1. references/recommended-structure.md diff --git a/plugins/compound-engineering/skills/create-agent-skills/workflows/create-new-skill.md b/plugins/compound-engineering/skills/create-agent-skills/workflows/create-new-skill.md index 1b01bbb..3ef8b4a 100644 --- a/plugins/compound-engineering/skills/create-agent-skills/workflows/create-new-skill.md +++ b/plugins/compound-engineering/skills/create-agent-skills/workflows/create-new-skill.md @@ -1,5 +1,11 @@ # Workflow: Create a New Skill +## Interaction Method + +If `AskUserQuestion` is available, use it for all prompts below. + +If not, present each question as a numbered list and wait for a reply before proceeding to the next step. For multiSelect questions, accept comma-separated numbers (e.g. `1, 3`). Never skip or auto-configure. + <required_reading> **Read these reference files NOW:** 1. references/recommended-structure.md diff --git a/plugins/compound-engineering/commands/deepen-plan.md b/plugins/compound-engineering/skills/deepen-plan/SKILL.md similarity index 95% rename from plugins/compound-engineering/commands/deepen-plan.md rename to plugins/compound-engineering/skills/deepen-plan/SKILL.md index a705476..5e20491 100644 --- a/plugins/compound-engineering/commands/deepen-plan.md +++ b/plugins/compound-engineering/skills/deepen-plan/SKILL.md @@ -10,7 +10,7 @@ argument-hint: "[path to plan file]" **Note: The current year is 2026.** Use this when searching for recent documentation and best practices. -This command takes an existing plan (from `/workflows:plan`) and enhances each section with parallel research agents. Each major element gets its own dedicated research sub-agent to find: +This command takes an existing plan (from `/ce:plan`) and enhances each section with parallel research agents. Each major element gets its own dedicated research sub-agent to find: - Best practices and industry patterns - Performance optimizations - UI/UX improvements (if applicable) @@ -145,13 +145,13 @@ Task general-purpose: "Use the security-patterns skill at ~/.claude/skills/secur ### 3. Discover and Apply Learnings/Solutions <thinking> -Check for documented learnings from /workflows:compound. These are solved problems stored as markdown files. Spawn a sub-agent for each learning to check if it's relevant. +Check for documented learnings from /ce:compound. These are solved problems stored as markdown files. Spawn a sub-agent for each learning to check if it's relevant. </thinking> **LEARNINGS LOCATION - Check these exact folders:** ``` -docs/solutions/ <-- PRIMARY: Project-level learnings (created by /workflows:compound) +docs/solutions/ <-- PRIMARY: Project-level learnings (created by /ce:compound) ├── performance-issues/ │ └── *.md ├── debugging-patterns/ @@ -370,7 +370,7 @@ Wait for ALL parallel agents to complete - skills, research agents, review agent **Collect outputs from ALL sources:** 1. **Skill-based sub-agents** - Each skill's full output (code examples, patterns, recommendations) -2. **Learnings/Solutions sub-agents** - Relevant documented learnings from /workflows:compound +2. **Learnings/Solutions sub-agents** - Relevant documented learnings from /ce:compound 3. **Research agents** - Best practices, documentation, real-world examples 4. **Review agents** - All feedback from every reviewer (architecture, security, performance, simplicity, etc.) 5. **Context7 queries** - Framework documentation and patterns @@ -480,15 +480,13 @@ After writing the enhanced plan, use the **AskUserQuestion tool** to present the **Options:** 1. **View diff** - Show what was added/changed -2. **Run `/technical_review`** - Get feedback from reviewers on enhanced plan -3. **Start `/workflows:work`** - Begin implementing this enhanced plan -4. **Deepen further** - Run another round of research on specific sections -5. **Revert** - Restore original plan (if backup exists) +2. **Start `/ce:work`** - Begin implementing this enhanced plan +3. **Deepen further** - Run another round of research on specific sections +4. **Revert** - Restore original plan (if backup exists) Based on selection: - **View diff** → Run `git diff [plan_path]` or show before/after -- **`/technical_review`** → Call the /technical_review command with the plan file path -- **`/workflows:work`** → Call the /workflows:work command with the plan file path +- **`/ce:work`** → Call the /ce:work command with the plan file path - **Deepen further** → Ask which sections need more research, then re-run those agents - **Revert** → Restore from git or backup diff --git a/plugins/compound-engineering/commands/deploy-docs.md b/plugins/compound-engineering/skills/deploy-docs/SKILL.md similarity index 94% rename from plugins/compound-engineering/commands/deploy-docs.md rename to plugins/compound-engineering/skills/deploy-docs/SKILL.md index a54b8ea..3185a20 100644 --- a/plugins/compound-engineering/commands/deploy-docs.md +++ b/plugins/compound-engineering/skills/deploy-docs/SKILL.md @@ -15,7 +15,6 @@ Run these checks: ```bash # Count components echo "Agents: $(ls plugins/compound-engineering/agents/*.md | wc -l)" -echo "Commands: $(ls plugins/compound-engineering/commands/*.md | wc -l)" echo "Skills: $(ls -d plugins/compound-engineering/skills/*/ 2>/dev/null | wc -l)" # Validate JSON @@ -109,5 +108,5 @@ Provide a summary: - [ ] Commit any pending changes - [ ] Push to main branch - [ ] Verify GitHub Pages workflow exists -- [ ] Check deployment at https://everyinc.github.io/every-marketplace/ +- [ ] Check deployment at https://everyinc.github.io/compound-engineering-plugin/ ``` diff --git a/plugins/compound-engineering/skills/document-review/SKILL.md b/plugins/compound-engineering/skills/document-review/SKILL.md index e9cb3b2..3376c32 100644 --- a/plugins/compound-engineering/skills/document-review/SKILL.md +++ b/plugins/compound-engineering/skills/document-review/SKILL.md @@ -36,7 +36,7 @@ Score the document against these criteria: | **Specificity** | Concrete enough for next step (brainstorm → can plan, plan → can implement) | | **YAGNI** | No hypothetical features, simplest approach chosen | -If invoked within a workflow (after `/workflows:brainstorm` or `/workflows:plan`), also check: +If invoked within a workflow (after `/ce:brainstorm` or `/ce:plan`), also check: - **User intent fidelity** — Document reflects what was discussed, assumptions validated ## Step 4: Identify the Critical Improvement diff --git a/plugins/compound-engineering/commands/feature-video.md b/plugins/compound-engineering/skills/feature-video/SKILL.md similarity index 91% rename from plugins/compound-engineering/commands/feature-video.md rename to plugins/compound-engineering/skills/feature-video/SKILL.md index 346f765..55658dd 100644 --- a/plugins/compound-engineering/commands/feature-video.md +++ b/plugins/compound-engineering/skills/feature-video/SKILL.md @@ -26,6 +26,7 @@ This command creates professional video walkthroughs of features for PR document - Git repository with a PR to document - `ffmpeg` installed (for video conversion) - `rclone` configured (optional, for cloud upload - see rclone skill) +- Public R2 base URL known (for example, `https://<public-domain>.r2.dev`) </requirements> ## Setup @@ -212,6 +213,9 @@ ffmpeg -y -framerate 0.5 -pattern_type glob -i 'tmp/screenshots/*.png' \ # Check rclone is configured rclone listremotes +# Set your public base URL (NO trailing slash) +PUBLIC_BASE_URL="https://<your-public-r2-domain>.r2.dev" + # Upload video, preview GIF, and screenshots to cloud storage # Use --s3-no-check-bucket to avoid permission errors rclone copy tmp/videos/ r2:kieran-claude/pr-videos/pr-[number]/ --s3-no-check-bucket --progress @@ -219,12 +223,17 @@ rclone copy tmp/screenshots/ r2:kieran-claude/pr-videos/pr-[number]/screenshots/ # List uploaded files rclone ls r2:kieran-claude/pr-videos/pr-[number]/ -``` -Public URLs (R2 with public access): -``` -Video: https://pub-4047722ebb1b4b09853f24d3b61467f1.r2.dev/pr-videos/pr-[number]/feature-demo.mp4 -Preview: https://pub-4047722ebb1b4b09853f24d3b61467f1.r2.dev/pr-videos/pr-[number]/feature-demo-preview.gif +# Build and validate public URLs BEFORE updating PR +VIDEO_URL="$PUBLIC_BASE_URL/pr-videos/pr-[number]/feature-demo.mp4" +PREVIEW_URL="$PUBLIC_BASE_URL/pr-videos/pr-[number]/feature-demo-preview.gif" + +curl -I "$VIDEO_URL" +curl -I "$PREVIEW_URL" + +# Require HTTP 200 for both URLs; stop if either fails +curl -I "$VIDEO_URL" | head -n 1 | grep -q ' 200 ' || exit 1 +curl -I "$PREVIEW_URL" | head -n 1 | grep -q ' 200 ' || exit 1 ``` </upload_video> @@ -254,7 +263,7 @@ If the PR already has a video section, replace it. Otherwise, append: Example: ```markdown -[![Feature Demo](https://pub-4047722ebb1b4b09853f24d3b61467f1.r2.dev/pr-videos/pr-137/feature-demo-preview.gif)](https://pub-4047722ebb1b4b09853f24d3b61467f1.r2.dev/pr-videos/pr-137/feature-demo.mp4) +[![Feature Demo](https://<your-public-r2-domain>.r2.dev/pr-videos/pr-137/feature-demo-preview.gif)](https://<your-public-r2-domain>.r2.dev/pr-videos/pr-137/feature-demo.mp4) ``` **Update the PR:** diff --git a/plugins/compound-engineering/skills/file-todos/SKILL.md b/plugins/compound-engineering/skills/file-todos/SKILL.md index fd58d27..2757631 100644 --- a/plugins/compound-engineering/skills/file-todos/SKILL.md +++ b/plugins/compound-engineering/skills/file-todos/SKILL.md @@ -192,7 +192,7 @@ Work logs serve as: | Trigger | Flow | Tool | |---------|------|------| -| Code review | `/workflows:review` → Findings → `/triage` → Todos | Review agent + skill | +| Code review | `/ce:review` → Findings → `/triage` → Todos | Review agent + skill | | PR comments | `/resolve_pr_parallel` → Individual fixes → Todos | gh CLI + skill | | Code TODOs | `/resolve_todo_parallel` → Fixes + Complex todos | Agent + skill | | Planning | Brainstorm → Create todo → Work → Complete | Skill | diff --git a/plugins/compound-engineering/commands/generate_command.md b/plugins/compound-engineering/skills/generate_command/SKILL.md similarity index 92% rename from plugins/compound-engineering/commands/generate_command.md rename to plugins/compound-engineering/skills/generate_command/SKILL.md index a9cbe7c..47e2cfc 100644 --- a/plugins/compound-engineering/commands/generate_command.md +++ b/plugins/compound-engineering/skills/generate_command/SKILL.md @@ -7,7 +7,7 @@ disable-model-invocation: true # Create a Custom Claude Code Command -Create a new slash command in `.claude/commands/` for the requested task. +Create a new skill in `.claude/skills/` for the requested task. ## Goal @@ -128,10 +128,10 @@ Implement #$ARGUMENTS following these steps: ## Creating the Command File -1. **Create the file** at `.claude/commands/[name].md` (subdirectories like `workflows/` supported) +1. **Create the directory** at `.claude/skills/[name]/SKILL.md` 2. **Start with YAML frontmatter** (see section above) -3. **Structure the command** using the template above -4. **Test the command** by using it with appropriate arguments +3. **Structure the skill** using the template above +4. **Test the skill** by using it with appropriate arguments ## Command File Template diff --git a/plugins/compound-engineering/skills/git-worktree/SKILL.md b/plugins/compound-engineering/skills/git-worktree/SKILL.md index 1ba22f4..19b8806 100644 --- a/plugins/compound-engineering/skills/git-worktree/SKILL.md +++ b/plugins/compound-engineering/skills/git-worktree/SKILL.md @@ -38,8 +38,8 @@ git worktree add .worktrees/feature-name -b feature-name main Use this skill in these scenarios: -1. **Code Review (`/workflows:review`)**: If NOT already on the target branch (PR branch or requested branch), offer worktree for isolated review -2. **Feature Work (`/workflows:work`)**: Always ask if user wants parallel worktree or live branch work +1. **Code Review (`/ce:review`)**: If NOT already on the target branch (PR branch or requested branch), offer worktree for isolated review +2. **Feature Work (`/ce:work`)**: Always ask if user wants parallel worktree or live branch work 3. **Parallel Development**: When working on multiple features simultaneously 4. **Cleanup**: After completing work in a worktree @@ -47,7 +47,7 @@ Use this skill in these scenarios: ### In Claude Code Workflows -The skill is automatically called from `/workflows:review` and `/workflows:work` commands: +The skill is automatically called from `/ce:review` and `/ce:work` commands: ``` # For review: offers worktree if not on PR branch @@ -204,7 +204,7 @@ bash ${CLAUDE_PLUGIN_ROOT}/skills/git-worktree/scripts/worktree-manager.sh clean ## Integration with Workflows -### `/workflows:review` +### `/ce:review` Instead of always creating a worktree: @@ -217,7 +217,7 @@ Instead of always creating a worktree: - no → proceed with PR diff on current branch ``` -### `/workflows:work` +### `/ce:work` Always offer choice: diff --git a/plugins/compound-engineering/commands/heal-skill.md b/plugins/compound-engineering/skills/heal-skill/SKILL.md similarity index 98% rename from plugins/compound-engineering/commands/heal-skill.md rename to plugins/compound-engineering/skills/heal-skill/SKILL.md index 02d48a4..a021f31 100644 --- a/plugins/compound-engineering/commands/heal-skill.md +++ b/plugins/compound-engineering/skills/heal-skill/SKILL.md @@ -1,7 +1,7 @@ --- name: heal-skill description: Fix incorrect SKILL.md files when a skill has wrong instructions or outdated API references -argument-hint: [optional: specific issue to fix] +argument-hint: "[optional: specific issue to fix]" allowed-tools: [Read, Edit, Bash(ls:*), Bash(git:*)] disable-model-invocation: true --- diff --git a/plugins/compound-engineering/skills/lfg/SKILL.md b/plugins/compound-engineering/skills/lfg/SKILL.md new file mode 100644 index 0000000..46e1485 --- /dev/null +++ b/plugins/compound-engineering/skills/lfg/SKILL.md @@ -0,0 +1,34 @@ +--- +name: lfg +description: Full autonomous engineering workflow +argument-hint: "[feature description]" +disable-model-invocation: true +--- + +CRITICAL: You MUST execute every step below IN ORDER. Do NOT skip any step. Do NOT jump ahead to coding or implementation. The plan phase (steps 2-3) MUST be completed and verified BEFORE any work begins. Violating this order produces bad output. + +1. **Optional:** If the `ralph-wiggum` skill is available, run `/ralph-wiggum:ralph-loop "finish all slash commands" --completion-promise "DONE"`. If not available or it fails, skip and continue to step 2 immediately. + +2. `/ce:plan $ARGUMENTS` + + GATE: STOP. Verify that `/ce:plan` produced a plan file in `docs/plans/`. If no plan file was created, run `/ce:plan $ARGUMENTS` again. Do NOT proceed to step 3 until a written plan exists. + +3. `/compound-engineering:deepen-plan` + + GATE: STOP. Confirm the plan has been deepened and updated. The plan file in `docs/plans/` should now contain additional detail. Do NOT proceed to step 4 without a deepened plan. + +4. `/ce:work` + + GATE: STOP. Verify that implementation work was performed - files were created or modified beyond the plan. Do NOT proceed to step 5 if no code changes were made. + +5. `/ce:review` + +6. `/compound-engineering:resolve_todo_parallel` + +7. `/compound-engineering:test-browser` + +8. `/compound-engineering:feature-video` + +9. Output `<promise>DONE</promise>` when video is in PR + +Start with step 2 now (or step 1 if ralph-wiggum is available). Remember: plan FIRST, then work. Never skip the plan. diff --git a/plugins/compound-engineering/skills/proof/SKILL.md b/plugins/compound-engineering/skills/proof/SKILL.md new file mode 100644 index 0000000..f4f5c4f --- /dev/null +++ b/plugins/compound-engineering/skills/proof/SKILL.md @@ -0,0 +1,185 @@ +--- +name: proof +description: Create, edit, comment on, and share markdown documents via Proof's web API and local bridge. Use when asked to "proof", "share a doc", "create a proof doc", "comment on a document", "suggest edits", "review in proof", or when given a proofeditor.ai URL. +allowed-tools: + - Bash + - Read + - Write + - WebFetch +--- + +# Proof - Collaborative Markdown Editor + +Proof is a collaborative document editor for humans and agents. It supports two modes: + +1. **Web API** - Create and edit shared documents via HTTP (no install needed) +2. **Local Bridge** - Drive the macOS Proof app via localhost:9847 + +## Web API (Primary for Sharing) + +### Create a Shared Document + +No authentication required. Returns a shareable URL with access token. + +```bash +curl -X POST https://www.proofeditor.ai/share/markdown \ + -H "Content-Type: application/json" \ + -d '{"title":"My Doc","markdown":"# Hello\n\nContent here."}' +``` + +**Response format:** +```json +{ + "slug": "abc123", + "tokenUrl": "https://www.proofeditor.ai/d/abc123?token=xxx", + "accessToken": "xxx", + "ownerSecret": "yyy", + "_links": { + "state": "https://www.proofeditor.ai/api/agent/abc123/state", + "ops": "https://www.proofeditor.ai/api/agent/abc123/ops" + } +} +``` + +Use the `tokenUrl` as the shareable link. The `_links` give you the exact API paths. + +### Read a Shared Document + +```bash +curl -s "https://www.proofeditor.ai/api/agent/{slug}/state" \ + -H "x-share-token: <token>" +``` + +### Edit a Shared Document + +All operations go to `POST https://www.proofeditor.ai/api/agent/{slug}/ops` + +**Note:** Use the `/api/agent/{slug}/ops` path (from `_links` in create response), NOT `/api/documents/{slug}/ops`. + +**Authentication for protected docs:** +- Header: `x-share-token: <token>` or `Authorization: Bearer <token>` +- Token comes from the URL parameter: `?token=xxx` or the `accessToken` from create response + +**Comment on text:** +```json +{"op": "comment.add", "quote": "text to comment on", "by": "ai:<agent-name>", "text": "Your comment here"} +``` + +**Reply to a comment:** +```json +{"op": "comment.reply", "markId": "<id>", "by": "ai:<agent-name>", "text": "Reply text"} +``` + +**Resolve a comment:** +```json +{"op": "comment.resolve", "markId": "<id>", "by": "ai:<agent-name>"} +``` + +**Suggest a replacement:** +```json +{"op": "suggestion.add", "kind": "replace", "quote": "original text", "by": "ai:<agent-name>", "content": "replacement text"} +``` + +**Suggest a deletion:** +```json +{"op": "suggestion.add", "kind": "delete", "quote": "text to delete", "by": "ai:<agent-name>"} +``` + +**Bulk rewrite:** +```json +{"op": "rewrite.apply", "content": "full new markdown", "by": "ai:<agent-name>"} +``` + +### Known Limitations (Web API) + +- `suggestion.add` with `kind: "insert"` returns Bad Request on the web ops endpoint. Use `kind: "replace"` with a broader quote instead, or use `rewrite.apply` for insertions. +- Bridge-style endpoints (`/d/{slug}/bridge/*`) require client version headers (`x-proof-client-version`, `x-proof-client-build`, `x-proof-client-protocol`) and return 426 CLIENT_UPGRADE_REQUIRED without them. Use the `/api/agent/{slug}/ops` endpoint instead. + +## Local Bridge (macOS App) + +Requires Proof.app running. Bridge at `http://localhost:9847`. + +**Required headers:** +- `X-Agent-Id: claude` (identity for presence) +- `Content-Type: application/json` +- `X-Window-Id: <uuid>` (when multiple docs open) + +### Key Endpoints + +| Method | Endpoint | Purpose | +|--------|----------|---------| +| GET | `/windows` | List open documents | +| GET | `/state` | Read markdown, cursor, word count | +| GET | `/marks` | List all suggestions and comments | +| POST | `/marks/suggest-replace` | `{"quote":"old","by":"ai:<agent-name>","content":"new"}` | +| POST | `/marks/suggest-insert` | `{"quote":"after this","by":"ai:<agent-name>","content":"insert"}` | +| POST | `/marks/suggest-delete` | `{"quote":"delete this","by":"ai:<agent-name>"}` | +| POST | `/marks/comment` | `{"quote":"text","by":"ai:<agent-name>","text":"comment"}` | +| POST | `/marks/reply` | `{"markId":"<id>","by":"ai:<agent-name>","text":"reply"}` | +| POST | `/marks/resolve` | `{"markId":"<id>","by":"ai:<agent-name>"}` | +| POST | `/marks/accept` | `{"markId":"<id>"}` | +| POST | `/marks/reject` | `{"markId":"<id>"}` | +| POST | `/rewrite` | `{"content":"full markdown","by":"ai:<agent-name>"}` | +| POST | `/presence` | `{"status":"reading","summary":"..."}` | +| GET | `/events/pending` | Poll for user actions | + +### Presence Statuses + +`thinking`, `reading`, `idle`, `acting`, `waiting`, `completed` + +## Workflow: Review a Shared Document + +When given a Proof URL like `https://www.proofeditor.ai/d/abc123?token=xxx`: + +1. Extract the slug (`abc123`) and token from the URL +2. Read the document state via the API +3. Add comments or suggest edits using the ops endpoint +4. The author sees changes in real-time + +```bash +# Read +curl -s "https://www.proofeditor.ai/api/agent/abc123/state" \ + -H "x-share-token: xxx" + +# Comment +curl -X POST "https://www.proofeditor.ai/api/agent/abc123/ops" \ + -H "Content-Type: application/json" \ + -H "x-share-token: xxx" \ + -d '{"op":"comment.add","quote":"text","by":"ai:compound","text":"comment"}' + +# Suggest edit +curl -X POST "https://www.proofeditor.ai/api/agent/abc123/ops" \ + -H "Content-Type: application/json" \ + -H "x-share-token: xxx" \ + -d '{"op":"suggestion.add","kind":"replace","quote":"old","by":"ai:compound","content":"new"}' +``` + +## Workflow: Create and Share a New Document + +```bash +# 1. Create +RESPONSE=$(curl -s -X POST https://www.proofeditor.ai/share/markdown \ + -H "Content-Type: application/json" \ + -d '{"title":"My Doc","markdown":"# Title\n\nContent here."}') + +# 2. Extract URL and token +URL=$(echo "$RESPONSE" | jq -r '.tokenUrl') +SLUG=$(echo "$RESPONSE" | jq -r '.slug') +TOKEN=$(echo "$RESPONSE" | jq -r '.accessToken') + +# 3. Share the URL +echo "$URL" + +# 4. Make edits using the ops endpoint +curl -X POST "https://www.proofeditor.ai/api/agent/$SLUG/ops" \ + -H "Content-Type: application/json" \ + -H "x-share-token: $TOKEN" \ + -d '{"op":"comment.add","quote":"Content here","by":"ai:compound","text":"Added a note"}' +``` + +## Safety + +- Use `/state` content as source of truth before editing +- Prefer suggest-replace over full rewrite for small changes +- Don't span table cells in a single replace +- Always include `by` field for attribution tracking diff --git a/plugins/compound-engineering/commands/report-bug.md b/plugins/compound-engineering/skills/report-bug/SKILL.md similarity index 100% rename from plugins/compound-engineering/commands/report-bug.md rename to plugins/compound-engineering/skills/report-bug/SKILL.md diff --git a/plugins/compound-engineering/commands/reproduce-bug.md b/plugins/compound-engineering/skills/reproduce-bug/SKILL.md similarity index 100% rename from plugins/compound-engineering/commands/reproduce-bug.md rename to plugins/compound-engineering/skills/reproduce-bug/SKILL.md diff --git a/plugins/compound-engineering/skills/resolve-pr-parallel/SKILL.md b/plugins/compound-engineering/skills/resolve-pr-parallel/SKILL.md index 46dc793..e040fba 100644 --- a/plugins/compound-engineering/skills/resolve-pr-parallel/SKILL.md +++ b/plugins/compound-engineering/skills/resolve-pr-parallel/SKILL.md @@ -1,5 +1,5 @@ --- -name: resolve_pr_parallel +name: resolve-pr-parallel description: Resolve all PR comments using parallel processing. Use when addressing PR review feedback, resolving review threads, or batch-fixing PR comments. argument-hint: "[optional: PR number or current PR]" disable-model-invocation: true diff --git a/plugins/compound-engineering/commands/resolve_parallel.md b/plugins/compound-engineering/skills/resolve_parallel/SKILL.md similarity index 100% rename from plugins/compound-engineering/commands/resolve_parallel.md rename to plugins/compound-engineering/skills/resolve_parallel/SKILL.md diff --git a/plugins/compound-engineering/skills/resolve_todo_parallel/SKILL.md b/plugins/compound-engineering/skills/resolve_todo_parallel/SKILL.md new file mode 100644 index 0000000..afd653d --- /dev/null +++ b/plugins/compound-engineering/skills/resolve_todo_parallel/SKILL.md @@ -0,0 +1,37 @@ +--- +name: resolve_todo_parallel +description: Resolve all pending CLI todos using parallel processing +argument-hint: "[optional: specific todo ID or pattern]" +--- + +Resolve all TODO comments using parallel processing. + +## Workflow + +### 1. Analyze + +Get all unresolved TODOs from the /todos/\*.md directory + +If any todo recommends deleting, removing, or gitignoring files in `docs/plans/` or `docs/solutions/`, skip it and mark it as `wont_fix`. These are compound-engineering pipeline artifacts that are intentional and permanent. + +### 2. Plan + +Create a TodoWrite list of all unresolved items grouped by type.Make sure to look at dependencies that might occur and prioritize the ones needed by others. For example, if you need to change a name, you must wait to do the others. Output a mermaid flow diagram showing how we can do this. Can we do everything in parallel? Do we need to do one first that leads to others in parallel? I'll put the to-dos in the mermaid diagram flow‑wise so the agent knows how to proceed in order. + +### 3. Implement (PARALLEL) + +Spawn a pr-comment-resolver agent for each unresolved item in parallel. + +So if there are 3 comments, it will spawn 3 pr-comment-resolver agents in parallel. liek this + +1. Task pr-comment-resolver(comment1) +2. Task pr-comment-resolver(comment2) +3. Task pr-comment-resolver(comment3) + +Always run all in parallel subagents/Tasks for each Todo item. + +### 4. Commit & Resolve + +- Commit changes +- Remove the TODO from the file, and mark it as resolved. +- Push to remote diff --git a/plugins/compound-engineering/skills/setup/SKILL.md b/plugins/compound-engineering/skills/setup/SKILL.md index 239739a..73fc0fb 100644 --- a/plugins/compound-engineering/skills/setup/SKILL.md +++ b/plugins/compound-engineering/skills/setup/SKILL.md @@ -6,7 +6,13 @@ disable-model-invocation: true # Compound Engineering Setup -Interactive setup for `compound-engineering.local.md` — configures which agents run during `/workflows:review` and `/workflows:work`. +## Interaction Method + +If `AskUserQuestion` is available, use it for all prompts below. + +If not, present each question as a numbered list and wait for a reply before proceeding to the next step. For multiSelect questions, accept comma-separated numbers (e.g. `1, 3`). Never skip or auto-configure. + +Interactive setup for `compound-engineering.local.md` — configures which agents run during `/ce:review` and `/ce:work`. ## Step 1: Check Existing Config @@ -145,7 +151,7 @@ plan_review_agents: [{computed plan agent list}] # Review Context Add project-specific review instructions here. -These notes are passed to all review agents during /workflows:review and /workflows:work. +These notes are passed to all review agents during /ce:review and /ce:work. Examples: - "We use Turbo Frames heavily — check for frame-busting issues" diff --git a/plugins/compound-engineering/skills/skill-creator/SKILL.md b/plugins/compound-engineering/skills/skill-creator/SKILL.md deleted file mode 100644 index 4917689..0000000 --- a/plugins/compound-engineering/skills/skill-creator/SKILL.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -name: skill-creator -description: Guide for creating effective skills. This skill should be used when users want to create a new skill (or update an existing skill) that extends Claude's capabilities with specialized knowledge, workflows, or tool integrations. -license: Complete terms in LICENSE.txt -disable-model-invocation: true ---- - -# Skill Creator - -This skill provides guidance for creating effective skills. - -## About Skills - -Skills are modular, self-contained packages that extend Claude's capabilities by providing -specialized knowledge, workflows, and tools. Think of them as "onboarding guides" for specific -domains or tasks—they transform Claude from a general-purpose agent into a specialized agent -equipped with procedural knowledge that no model can fully possess. - -### What Skills Provide - -1. Specialized workflows - Multi-step procedures for specific domains -2. Tool integrations - Instructions for working with specific file formats or APIs -3. Domain expertise - Company-specific knowledge, schemas, business logic -4. Bundled resources - Scripts, references, and assets for complex and repetitive tasks - -### Anatomy of a Skill - -Every skill consists of a required SKILL.md file and optional bundled resources: - -``` -skill-name/ -├── SKILL.md (required) -│ ├── YAML frontmatter metadata (required) -│ │ ├── name: (required) -│ │ └── description: (required) -│ └── Markdown instructions (required) -└── Bundled Resources (optional) - ├── scripts/ - Executable code (Python/Bash/etc.) - ├── references/ - Documentation intended to be loaded into context as needed - └── assets/ - Files used in output (templates, icons, fonts, etc.) -``` - -#### SKILL.md (required) - -**Metadata Quality:** The `name` and `description` in YAML frontmatter determine when Claude will use the skill. Be specific about what the skill does and when to use it. Use the third-person (e.g. "This skill should be used when..." instead of "Use this skill when..."). - -#### Bundled Resources (optional) - -##### Scripts (`scripts/`) - -Executable code (Python/Bash/etc.) for tasks that require deterministic reliability or are repeatedly rewritten. - -- **When to include**: When the same code is being rewritten repeatedly or deterministic reliability is needed -- **Example**: `scripts/rotate_pdf.py` for PDF rotation tasks -- **Benefits**: Token efficient, deterministic, may be executed without loading into context -- **Note**: Scripts may still need to be read by Claude for patching or environment-specific adjustments - -##### References (`references/`) - -Documentation and reference material intended to be loaded as needed into context to inform Claude's process and thinking. - -- **When to include**: For documentation that Claude should reference while working -- **Examples**: `references/finance.md` for financial schemas, `references/mnda.md` for company NDA template, `references/policies.md` for company policies, `references/api_docs.md` for API specifications -- **Use cases**: Database schemas, API documentation, domain knowledge, company policies, detailed workflow guides -- **Benefits**: Keeps SKILL.md lean, loaded only when Claude determines it's needed -- **Best practice**: If files are large (>10k words), include grep search patterns in SKILL.md -- **Avoid duplication**: Information should live in either SKILL.md or references files, not both. Prefer references files for detailed information unless it's truly core to the skill—this keeps SKILL.md lean while making information discoverable without hogging the context window. Keep only essential procedural instructions and workflow guidance in SKILL.md; move detailed reference material, schemas, and examples to references files. - -##### Assets (`assets/`) - -Files not intended to be loaded into context, but rather used within the output Claude produces. - -- **When to include**: When the skill needs files that will be used in the final output -- **Examples**: `assets/logo.png` for brand assets, `assets/slides.pptx` for PowerPoint templates, `assets/frontend-template/` for HTML/React boilerplate, `assets/font.ttf` for typography -- **Use cases**: Templates, images, icons, boilerplate code, fonts, sample documents that get copied or modified -- **Benefits**: Separates output resources from documentation, enables Claude to use files without loading them into context - -### Progressive Disclosure Design Principle - -Skills use a three-level loading system to manage context efficiently: - -1. **Metadata (name + description)** - Always in context (~100 words) -2. **SKILL.md body** - When skill triggers (<5k words) -3. **Bundled resources** - As needed by Claude (Unlimited*) - -*Unlimited because scripts can be executed without reading into context window. - -## Skill Creation Process - -To create a skill, follow the "Skill Creation Process" in order, skipping steps only if there is a clear reason why they are not applicable. - -### Step 1: Understanding the Skill with Concrete Examples - -Skip this step only when the skill's usage patterns are already clearly understood. It remains valuable even when working with an existing skill. - -To create an effective skill, clearly understand concrete examples of how the skill will be used. This understanding can come from either direct user examples or generated examples that are validated with user feedback. - -For example, when building an image-editor skill, relevant questions include: - -- "What functionality should the image-editor skill support? Editing, rotating, anything else?" -- "Can you give some examples of how this skill would be used?" -- "I can imagine users asking for things like 'Remove the red-eye from this image' or 'Rotate this image'. Are there other ways you imagine this skill being used?" -- "What would a user say that should trigger this skill?" - -To avoid overwhelming users, avoid asking too many questions in a single message. Start with the most important questions and follow up as needed for better effectiveness. - -Conclude this step when there is a clear sense of the functionality the skill should support. - -### Step 2: Planning the Reusable Skill Contents - -To turn concrete examples into an effective skill, analyze each example by: - -1. Considering how to execute on the example from scratch -2. Identifying what scripts, references, and assets would be helpful when executing these workflows repeatedly - -Example: When building a `pdf-editor` skill to handle queries like "Help me rotate this PDF," the analysis shows: - -1. Rotating a PDF requires re-writing the same code each time -2. A `scripts/rotate_pdf.py` script would be helpful to store in the skill - -Example: When designing a `frontend-webapp-builder` skill for queries like "Build me a todo app" or "Build me a dashboard to track my steps," the analysis shows: - -1. Writing a frontend webapp requires the same boilerplate HTML/React each time -2. An `assets/hello-world/` template containing the boilerplate HTML/React project files would be helpful to store in the skill - -Example: When building a `big-query` skill to handle queries like "How many users have logged in today?" the analysis shows: - -1. Querying BigQuery requires re-discovering the table schemas and relationships each time -2. A `references/schema.md` file documenting the table schemas would be helpful to store in the skill - -To establish the skill's contents, analyze each concrete example to create a list of the reusable resources to include: scripts, references, and assets. - -### Step 3: Initializing the Skill - -At this point, it is time to actually create the skill. - -Skip this step only if the skill being developed already exists, and iteration or packaging is needed. In this case, continue to the next step. - -When creating a new skill from scratch, always run the `init_skill.py` script. The script conveniently generates a new template skill directory that automatically includes everything a skill requires, making the skill creation process much more efficient and reliable. - -Usage: - -```bash -scripts/init_skill.py <skill-name> --path <output-directory> -``` - -The script: - -- Creates the skill directory at the specified path -- Generates a SKILL.md template with proper frontmatter and TODO placeholders -- Creates example resource directories: `scripts/`, `references/`, and `assets/` -- Adds example files in each directory that can be customized or deleted - -After initialization, customize or remove the generated SKILL.md and example files as needed. - -### Step 4: Edit the Skill - -When editing the (newly-generated or existing) skill, remember that the skill is being created for another instance of Claude to use. Focus on including information that would be beneficial and non-obvious to Claude. Consider what procedural knowledge, domain-specific details, or reusable assets would help another Claude instance execute these tasks more effectively. - -#### Start with Reusable Skill Contents - -To begin implementation, start with the reusable resources identified above: `scripts/`, `references/`, and `assets/` files. Note that this step may require user input. For example, when implementing a `brand-guidelines` skill, the user may need to provide brand assets or templates to store in `assets/`, or documentation to store in `references/`. - -Also, delete any example files and directories not needed for the skill. The initialization script creates example files in `scripts/`, `references/`, and `assets/` to demonstrate structure, but most skills won't need all of them. - -#### Update SKILL.md - -**Writing Style:** Write the entire skill using **imperative/infinitive form** (verb-first instructions), not second person. Use objective, instructional language (e.g., "To accomplish X, do Y" rather than "You should do X" or "If you need to do X"). This maintains consistency and clarity for AI consumption. - -To complete SKILL.md, answer the following questions: - -1. What is the purpose of the skill, in a few sentences? -2. When should the skill be used? -3. In practice, how should Claude use the skill? All reusable skill contents developed above should be referenced so that Claude knows how to use them. - -### Step 5: Packaging a Skill - -Once the skill is ready, it should be packaged into a distributable zip file that gets shared with the user. The packaging process automatically validates the skill first to ensure it meets all requirements: - -```bash -scripts/package_skill.py <path/to/skill-folder> -``` - -Optional output directory specification: - -```bash -scripts/package_skill.py <path/to/skill-folder> ./dist -``` - -The packaging script will: - -1. **Validate** the skill automatically, checking: - - YAML frontmatter format and required fields - - Skill naming conventions and directory structure - - Description completeness and quality - - File organization and resource references - -2. **Package** the skill if validation passes, creating a zip file named after the skill (e.g., `my-skill.zip`) that includes all files and maintains the proper directory structure for distribution. - -If validation fails, the script will report the errors and exit without creating a package. Fix any validation errors and run the packaging command again. - -### Step 6: Iterate - -After testing the skill, users may request improvements. Often this happens right after using the skill, with fresh context of how the skill performed. - -**Iteration workflow:** -1. Use the skill on real tasks -2. Notice struggles or inefficiencies -3. Identify how SKILL.md or bundled resources should be updated -4. Implement changes and test again diff --git a/plugins/compound-engineering/skills/skill-creator/scripts/init_skill.py b/plugins/compound-engineering/skills/skill-creator/scripts/init_skill.py deleted file mode 100755 index 329ad4e..0000000 --- a/plugins/compound-engineering/skills/skill-creator/scripts/init_skill.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/env python3 -""" -Skill Initializer - Creates a new skill from template - -Usage: - init_skill.py <skill-name> --path <path> - -Examples: - init_skill.py my-new-skill --path skills/public - init_skill.py my-api-helper --path skills/private - init_skill.py custom-skill --path /custom/location -""" - -import sys -from pathlib import Path - - -SKILL_TEMPLATE = """--- -name: {skill_name} -description: [TODO: Complete and informative explanation of what the skill does and when to use it. Include WHEN to use this skill - specific scenarios, file types, or tasks that trigger it.] ---- - -# {skill_title} - -## Overview - -[TODO: 1-2 sentences explaining what this skill enables] - -## Structuring This Skill - -[TODO: Choose the structure that best fits this skill's purpose. Common patterns: - -**1. Workflow-Based** (best for sequential processes) -- Works well when there are clear step-by-step procedures -- Example: DOCX skill with "Workflow Decision Tree" → "Reading" → "Creating" → "Editing" -- Structure: ## Overview → ## Workflow Decision Tree → ## Step 1 → ## Step 2... - -**2. Task-Based** (best for tool collections) -- Works well when the skill offers different operations/capabilities -- Example: PDF skill with "Quick Start" → "Merge PDFs" → "Split PDFs" → "Extract Text" -- Structure: ## Overview → ## Quick Start → ## Task Category 1 → ## Task Category 2... - -**3. Reference/Guidelines** (best for standards or specifications) -- Works well for brand guidelines, coding standards, or requirements -- Example: Brand styling with "Brand Guidelines" → "Colors" → "Typography" → "Features" -- Structure: ## Overview → ## Guidelines → ## Specifications → ## Usage... - -**4. Capabilities-Based** (best for integrated systems) -- Works well when the skill provides multiple interrelated features -- Example: Product Management with "Core Capabilities" → numbered capability list -- Structure: ## Overview → ## Core Capabilities → ### 1. Feature → ### 2. Feature... - -Patterns can be mixed and matched as needed. Most skills combine patterns (e.g., start with task-based, add workflow for complex operations). - -Delete this entire "Structuring This Skill" section when done - it's just guidance.] - -## [TODO: Replace with the first main section based on chosen structure] - -[TODO: Add content here. See examples in existing skills: -- Code samples for technical skills -- Decision trees for complex workflows -- Concrete examples with realistic user requests -- References to scripts/templates/references as needed] - -## Resources - -This skill includes example resource directories that demonstrate how to organize different types of bundled resources: - -### scripts/ -Executable code (Python/Bash/etc.) that can be run directly to perform specific operations. - -**Examples from other skills:** -- PDF skill: `fill_fillable_fields.py`, `extract_form_field_info.py` - utilities for PDF manipulation -- DOCX skill: `document.py`, `utilities.py` - Python modules for document processing - -**Appropriate for:** Python scripts, shell scripts, or any executable code that performs automation, data processing, or specific operations. - -**Note:** Scripts may be executed without loading into context, but can still be read by Claude for patching or environment adjustments. - -### references/ -Documentation and reference material intended to be loaded into context to inform Claude's process and thinking. - -**Examples from other skills:** -- Product management: `communication.md`, `context_building.md` - detailed workflow guides -- BigQuery: API reference documentation and query examples -- Finance: Schema documentation, company policies - -**Appropriate for:** In-depth documentation, API references, database schemas, comprehensive guides, or any detailed information that Claude should reference while working. - -### assets/ -Files not intended to be loaded into context, but rather used within the output Claude produces. - -**Examples from other skills:** -- Brand styling: PowerPoint template files (.pptx), logo files -- Frontend builder: HTML/React boilerplate project directories -- Typography: Font files (.ttf, .woff2) - -**Appropriate for:** Templates, boilerplate code, document templates, images, icons, fonts, or any files meant to be copied or used in the final output. - ---- - -**Any unneeded directories can be deleted.** Not every skill requires all three types of resources. -""" - -EXAMPLE_SCRIPT = '''#!/usr/bin/env python3 -""" -Example helper script for {skill_name} - -This is a placeholder script that can be executed directly. -Replace with actual implementation or delete if not needed. - -Example real scripts from other skills: -- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields -- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images -""" - -def main(): - print("This is an example script for {skill_name}") - # TODO: Add actual script logic here - # This could be data processing, file conversion, API calls, etc. - -if __name__ == "__main__": - main() -''' - -EXAMPLE_REFERENCE = """# Reference Documentation for {skill_title} - -This is a placeholder for detailed reference documentation. -Replace with actual reference content or delete if not needed. - -Example real reference docs from other skills: -- product-management/references/communication.md - Comprehensive guide for status updates -- product-management/references/context_building.md - Deep-dive on gathering context -- bigquery/references/ - API references and query examples - -## When Reference Docs Are Useful - -Reference docs are ideal for: -- Comprehensive API documentation -- Detailed workflow guides -- Complex multi-step processes -- Information too lengthy for main SKILL.md -- Content that's only needed for specific use cases - -## Structure Suggestions - -### API Reference Example -- Overview -- Authentication -- Endpoints with examples -- Error codes -- Rate limits - -### Workflow Guide Example -- Prerequisites -- Step-by-step instructions -- Common patterns -- Troubleshooting -- Best practices -""" - -EXAMPLE_ASSET = """# Example Asset File - -This placeholder represents where asset files would be stored. -Replace with actual asset files (templates, images, fonts, etc.) or delete if not needed. - -Asset files are NOT intended to be loaded into context, but rather used within -the output Claude produces. - -Example asset files from other skills: -- Brand guidelines: logo.png, slides_template.pptx -- Frontend builder: hello-world/ directory with HTML/React boilerplate -- Typography: custom-font.ttf, font-family.woff2 -- Data: sample_data.csv, test_dataset.json - -## Common Asset Types - -- Templates: .pptx, .docx, boilerplate directories -- Images: .png, .jpg, .svg, .gif -- Fonts: .ttf, .otf, .woff, .woff2 -- Boilerplate code: Project directories, starter files -- Icons: .ico, .svg -- Data files: .csv, .json, .xml, .yaml - -Note: This is a text placeholder. Actual assets can be any file type. -""" - - -def title_case_skill_name(skill_name): - """Convert hyphenated skill name to Title Case for display.""" - return ' '.join(word.capitalize() for word in skill_name.split('-')) - - -def init_skill(skill_name, path): - """ - Initialize a new skill directory with template SKILL.md. - - Args: - skill_name: Name of the skill - path: Path where the skill directory should be created - - Returns: - Path to created skill directory, or None if error - """ - # Determine skill directory path - skill_dir = Path(path).resolve() / skill_name - - # Check if directory already exists - if skill_dir.exists(): - print(f"❌ Error: Skill directory already exists: {skill_dir}") - return None - - # Create skill directory - try: - skill_dir.mkdir(parents=True, exist_ok=False) - print(f"✅ Created skill directory: {skill_dir}") - except Exception as e: - print(f"❌ Error creating directory: {e}") - return None - - # Create SKILL.md from template - skill_title = title_case_skill_name(skill_name) - skill_content = SKILL_TEMPLATE.format( - skill_name=skill_name, - skill_title=skill_title - ) - - skill_md_path = skill_dir / 'SKILL.md' - try: - skill_md_path.write_text(skill_content) - print("✅ Created SKILL.md") - except Exception as e: - print(f"❌ Error creating SKILL.md: {e}") - return None - - # Create resource directories with example files - try: - # Create scripts/ directory with example script - scripts_dir = skill_dir / 'scripts' - scripts_dir.mkdir(exist_ok=True) - example_script = scripts_dir / 'example.py' - example_script.write_text(EXAMPLE_SCRIPT.format(skill_name=skill_name)) - example_script.chmod(0o755) - print("✅ Created scripts/example.py") - - # Create references/ directory with example reference doc - references_dir = skill_dir / 'references' - references_dir.mkdir(exist_ok=True) - example_reference = references_dir / 'api_reference.md' - example_reference.write_text(EXAMPLE_REFERENCE.format(skill_title=skill_title)) - print("✅ Created references/api_reference.md") - - # Create assets/ directory with example asset placeholder - assets_dir = skill_dir / 'assets' - assets_dir.mkdir(exist_ok=True) - example_asset = assets_dir / 'example_asset.txt' - example_asset.write_text(EXAMPLE_ASSET) - print("✅ Created assets/example_asset.txt") - except Exception as e: - print(f"❌ Error creating resource directories: {e}") - return None - - # Print next steps - print(f"\n✅ Skill '{skill_name}' initialized successfully at {skill_dir}") - print("\nNext steps:") - print("1. Edit SKILL.md to complete the TODO items and update the description") - print("2. Customize or delete the example files in scripts/, references/, and assets/") - print("3. Run the validator when ready to check the skill structure") - - return skill_dir - - -def main(): - if len(sys.argv) < 4 or sys.argv[2] != '--path': - print("Usage: init_skill.py <skill-name> --path <path>") - print("\nSkill name requirements:") - print(" - Hyphen-case identifier (e.g., 'data-analyzer')") - print(" - Lowercase letters, digits, and hyphens only") - print(" - Max 40 characters") - print(" - Must match directory name exactly") - print("\nExamples:") - print(" init_skill.py my-new-skill --path skills/public") - print(" init_skill.py my-api-helper --path skills/private") - print(" init_skill.py custom-skill --path /custom/location") - sys.exit(1) - - skill_name = sys.argv[1] - path = sys.argv[3] - - print(f"🚀 Initializing skill: {skill_name}") - print(f" Location: {path}") - print() - - result = init_skill(skill_name, path) - - if result: - sys.exit(0) - else: - sys.exit(1) - - -if __name__ == "__main__": - main() diff --git a/plugins/compound-engineering/skills/skill-creator/scripts/package_skill.py b/plugins/compound-engineering/skills/skill-creator/scripts/package_skill.py deleted file mode 100755 index 3ee8e8e..0000000 --- a/plugins/compound-engineering/skills/skill-creator/scripts/package_skill.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env python3 -""" -Skill Packager - Creates a distributable zip file of a skill folder - -Usage: - python utils/package_skill.py <path/to/skill-folder> [output-directory] - -Example: - python utils/package_skill.py skills/public/my-skill - python utils/package_skill.py skills/public/my-skill ./dist -""" - -import sys -import zipfile -from pathlib import Path -from quick_validate import validate_skill - - -def package_skill(skill_path, output_dir=None): - """ - Package a skill folder into a zip file. - - Args: - skill_path: Path to the skill folder - output_dir: Optional output directory for the zip file (defaults to current directory) - - Returns: - Path to the created zip file, or None if error - """ - skill_path = Path(skill_path).resolve() - - # Validate skill folder exists - if not skill_path.exists(): - print(f"❌ Error: Skill folder not found: {skill_path}") - return None - - if not skill_path.is_dir(): - print(f"❌ Error: Path is not a directory: {skill_path}") - return None - - # Validate SKILL.md exists - skill_md = skill_path / "SKILL.md" - if not skill_md.exists(): - print(f"❌ Error: SKILL.md not found in {skill_path}") - return None - - # Run validation before packaging - print("🔍 Validating skill...") - valid, message = validate_skill(skill_path) - if not valid: - print(f"❌ Validation failed: {message}") - print(" Please fix the validation errors before packaging.") - return None - print(f"✅ {message}\n") - - # Determine output location - skill_name = skill_path.name - if output_dir: - output_path = Path(output_dir).resolve() - output_path.mkdir(parents=True, exist_ok=True) - else: - output_path = Path.cwd() - - zip_filename = output_path / f"{skill_name}.zip" - - # Create the zip file - try: - with zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED) as zipf: - # Walk through the skill directory - for file_path in skill_path.rglob('*'): - if file_path.is_file(): - # Calculate the relative path within the zip - arcname = file_path.relative_to(skill_path.parent) - zipf.write(file_path, arcname) - print(f" Added: {arcname}") - - print(f"\n✅ Successfully packaged skill to: {zip_filename}") - return zip_filename - - except Exception as e: - print(f"❌ Error creating zip file: {e}") - return None - - -def main(): - if len(sys.argv) < 2: - print("Usage: python utils/package_skill.py <path/to/skill-folder> [output-directory]") - print("\nExample:") - print(" python utils/package_skill.py skills/public/my-skill") - print(" python utils/package_skill.py skills/public/my-skill ./dist") - sys.exit(1) - - skill_path = sys.argv[1] - output_dir = sys.argv[2] if len(sys.argv) > 2 else None - - print(f"📦 Packaging skill: {skill_path}") - if output_dir: - print(f" Output directory: {output_dir}") - print() - - result = package_skill(skill_path, output_dir) - - if result: - sys.exit(0) - else: - sys.exit(1) - - -if __name__ == "__main__": - main() diff --git a/plugins/compound-engineering/skills/skill-creator/scripts/quick_validate.py b/plugins/compound-engineering/skills/skill-creator/scripts/quick_validate.py deleted file mode 100755 index 6fa6c63..0000000 --- a/plugins/compound-engineering/skills/skill-creator/scripts/quick_validate.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python3 -""" -Quick validation script for skills - minimal version -""" - -import sys -import os -import re -from pathlib import Path - -def validate_skill(skill_path): - """Basic validation of a skill""" - skill_path = Path(skill_path) - - # Check SKILL.md exists - skill_md = skill_path / 'SKILL.md' - if not skill_md.exists(): - return False, "SKILL.md not found" - - # Read and validate frontmatter - content = skill_md.read_text() - if not content.startswith('---'): - return False, "No YAML frontmatter found" - - # Extract frontmatter - match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) - if not match: - return False, "Invalid frontmatter format" - - frontmatter = match.group(1) - - # Check required fields - if 'name:' not in frontmatter: - return False, "Missing 'name' in frontmatter" - if 'description:' not in frontmatter: - return False, "Missing 'description' in frontmatter" - - # Extract name for validation - name_match = re.search(r'name:\s*(.+)', frontmatter) - if name_match: - name = name_match.group(1).strip() - # Check naming convention (hyphen-case: lowercase with hyphens) - if not re.match(r'^[a-z0-9-]+$', name): - return False, f"Name '{name}' should be hyphen-case (lowercase letters, digits, and hyphens only)" - if name.startswith('-') or name.endswith('-') or '--' in name: - return False, f"Name '{name}' cannot start/end with hyphen or contain consecutive hyphens" - - # Extract and validate description - desc_match = re.search(r'description:\s*(.+)', frontmatter) - if desc_match: - description = desc_match.group(1).strip() - # Check for angle brackets - if '<' in description or '>' in description: - return False, "Description cannot contain angle brackets (< or >)" - - return True, "Skill is valid!" - -if __name__ == "__main__": - if len(sys.argv) != 2: - print("Usage: python quick_validate.py <skill_directory>") - sys.exit(1) - - valid, message = validate_skill(sys.argv[1]) - print(message) - sys.exit(0 if valid else 1) \ No newline at end of file diff --git a/plugins/compound-engineering/commands/slfg.md b/plugins/compound-engineering/skills/slfg/SKILL.md similarity index 62% rename from plugins/compound-engineering/commands/slfg.md rename to plugins/compound-engineering/skills/slfg/SKILL.md index eef3445..32d2e76 100644 --- a/plugins/compound-engineering/commands/slfg.md +++ b/plugins/compound-engineering/skills/slfg/SKILL.md @@ -5,20 +5,20 @@ argument-hint: "[feature description]" disable-model-invocation: true --- -Swarm-enabled LFG. Run these steps in order, parallelizing where indicated. +Swarm-enabled LFG. Run these steps in order, parallelizing where indicated. Do not stop between steps — complete every step through to the end. ## Sequential Phase -1. `/ralph-wiggum:ralph-loop "finish all slash commands" --completion-promise "DONE"` -2. `/workflows:plan $ARGUMENTS` +1. **Optional:** If the `ralph-wiggum` skill is available, run `/ralph-wiggum:ralph-loop "finish all slash commands" --completion-promise "DONE"`. If not available or it fails, skip and continue to step 2 immediately. +2. `/ce:plan $ARGUMENTS` 3. `/compound-engineering:deepen-plan` -4. `/workflows:work` — **Use swarm mode**: Make a Task list and launch an army of agent swarm subagents to build the plan +4. `/ce:work` — **Use swarm mode**: Make a Task list and launch an army of agent swarm subagents to build the plan ## Parallel Phase After work completes, launch steps 5 and 6 as **parallel swarm agents** (both only need code to be written): -5. `/workflows:review` — spawn as background Task agent +5. `/ce:review` — spawn as background Task agent 6. `/compound-engineering:test-browser` — spawn as background Task agent Wait for both to complete before continuing. diff --git a/plugins/compound-engineering/commands/test-browser.md b/plugins/compound-engineering/skills/test-browser/SKILL.md similarity index 77% rename from plugins/compound-engineering/commands/test-browser.md rename to plugins/compound-engineering/skills/test-browser/SKILL.md index 150dce0..f9f46e3 100644 --- a/plugins/compound-engineering/commands/test-browser.md +++ b/plugins/compound-engineering/skills/test-browser/SKILL.md @@ -1,7 +1,7 @@ --- name: test-browser description: Run browser tests on pages affected by current PR or branch -argument-hint: "[PR number, branch name, or 'current' for current branch]" +argument-hint: "[PR number, branch name, 'current', or --port PORT]" --- # Browser Test Command @@ -122,31 +122,82 @@ Build a list of URLs to test based on the mapping. </file_to_route_mapping> -### 4. Verify Server is Running +### 4. Detect Dev Server Port + +<detect_port> + +Determine the dev server port using this priority order: + +**Priority 1: Explicit argument** +If the user passed a port number (e.g., `/test-browser 5000` or `/test-browser --port 5000`), use that port directly. + +**Priority 2: CLAUDE.md / project instructions** +```bash +# Check CLAUDE.md for port references +grep -Eio '(port\s*[:=]\s*|localhost:)([0-9]{4,5})' CLAUDE.md 2>/dev/null | grep -Eo '[0-9]{4,5}' | head -1 +``` + +**Priority 3: package.json scripts** +```bash +# Check dev/start scripts for --port flags +grep -Eo '\-\-port[= ]+[0-9]{4,5}' package.json 2>/dev/null | grep -Eo '[0-9]{4,5}' | head -1 +``` + +**Priority 4: Environment files** +```bash +# Check .env, .env.local, .env.development for PORT= +grep -h '^PORT=' .env .env.local .env.development 2>/dev/null | tail -1 | cut -d= -f2 +``` + +**Priority 5: Default fallback** +If none of the above yields a port, default to `3000`. + +Store the result in a `PORT` variable for use in all subsequent steps. + +```bash +# Combined detection (run this) +PORT="${EXPLICIT_PORT:-}" +if [ -z "$PORT" ]; then + PORT=$(grep -Eio '(port\s*[:=]\s*|localhost:)([0-9]{4,5})' CLAUDE.md 2>/dev/null | grep -Eo '[0-9]{4,5}' | head -1) +fi +if [ -z "$PORT" ]; then + PORT=$(grep -Eo '\-\-port[= ]+[0-9]{4,5}' package.json 2>/dev/null | grep -Eo '[0-9]{4,5}' | head -1) +fi +if [ -z "$PORT" ]; then + PORT=$(grep -h '^PORT=' .env .env.local .env.development 2>/dev/null | tail -1 | cut -d= -f2) +fi +PORT="${PORT:-3000}" +echo "Using dev server port: $PORT" +``` + +</detect_port> + +### 5. Verify Server is Running <check_server> -Before testing, verify the local server is accessible: +Before testing, verify the local server is accessible using the detected port: ```bash -agent-browser open http://localhost:3000 +agent-browser open http://localhost:${PORT} agent-browser snapshot -i ``` If server is not running, inform user: ```markdown -**Server not running** +**Server not running on port ${PORT}** Please start your development server: - Rails: `bin/dev` or `rails server` - Node/Next.js: `npm run dev` +- Custom port: `/test-browser --port <your-port>` Then run `/test-browser` again. ``` </check_server> -### 5. Test Each Affected Page +### 6. Test Each Affected Page <test_pages> @@ -154,13 +205,13 @@ For each affected route, use agent-browser CLI commands (NOT Chrome MCP): **Step 1: Navigate and capture snapshot** ```bash -agent-browser open "http://localhost:3000/[route]" +agent-browser open "http://localhost:${PORT}/[route]" agent-browser snapshot -i ``` **Step 2: For headed mode (visual debugging)** ```bash -agent-browser --headed open "http://localhost:3000/[route]" +agent-browser --headed open "http://localhost:${PORT}/[route]" agent-browser --headed snapshot -i ``` @@ -185,7 +236,7 @@ agent-browser screenshot --full page-name-full.png # Full page </test_pages> -### 6. Human Verification (When Required) +### 7. Human Verification (When Required) <human_verification> @@ -214,7 +265,7 @@ Did it work correctly? </human_verification> -### 7. Handle Failures +### 8. Handle Failures <failure_handling> @@ -253,7 +304,7 @@ When a test fails: </failure_handling> -### 8. Test Summary +### 9. Test Summary <test_summary> @@ -263,7 +314,7 @@ After all tests complete, present summary: ## Browser Test Results **Test Scope:** PR #[number] / [branch name] -**Server:** http://localhost:3000 +**Server:** http://localhost:${PORT} ### Pages Tested: [count] @@ -295,7 +346,7 @@ After all tests complete, present summary: ## Quick Usage Examples ```bash -# Test current branch changes +# Test current branch changes (auto-detects port) /test-browser # Test specific PR @@ -303,6 +354,9 @@ After all tests complete, present summary: # Test specific branch /test-browser feature/new-dashboard + +# Test on a specific port +/test-browser --port 5000 ``` ## agent-browser CLI Reference diff --git a/plugins/compound-engineering/commands/test-xcode.md b/plugins/compound-engineering/skills/test-xcode/SKILL.md similarity index 97% rename from plugins/compound-engineering/commands/test-xcode.md rename to plugins/compound-engineering/skills/test-xcode/SKILL.md index 82d5c8b..10cba1b 100644 --- a/plugins/compound-engineering/commands/test-xcode.md +++ b/plugins/compound-engineering/skills/test-xcode/SKILL.md @@ -323,9 +323,9 @@ mcp__xcodebuildmcp__shutdown_simulator({ simulator_id: "[uuid]" }) /xcode-test current ``` -## Integration with /workflows:review +## Integration with /ce:review -When reviewing PRs that touch iOS code, the `/workflows:review` command can spawn this as a subagent: +When reviewing PRs that touch iOS code, the `/ce:review` command can spawn this as a subagent: ``` Task general-purpose("Run /xcode-test for scheme [name]. Build, install on simulator, test key screens, check for crashes.") diff --git a/plugins/compound-engineering/commands/triage.md b/plugins/compound-engineering/skills/triage/SKILL.md similarity index 100% rename from plugins/compound-engineering/commands/triage.md rename to plugins/compound-engineering/skills/triage/SKILL.md diff --git a/plugins/compound-engineering/skills/workflows-brainstorm/SKILL.md b/plugins/compound-engineering/skills/workflows-brainstorm/SKILL.md new file mode 100644 index 0000000..d421810 --- /dev/null +++ b/plugins/compound-engineering/skills/workflows-brainstorm/SKILL.md @@ -0,0 +1,10 @@ +--- +name: workflows:brainstorm +description: "[DEPRECATED] Use /ce:brainstorm instead — renamed for clarity." +argument-hint: "[feature idea or problem to explore]" +disable-model-invocation: true +--- + +NOTE: /workflows:brainstorm is deprecated. Please use /ce:brainstorm instead. This alias will be removed in a future version. + +/ce:brainstorm $ARGUMENTS diff --git a/plugins/compound-engineering/skills/workflows-compound/SKILL.md b/plugins/compound-engineering/skills/workflows-compound/SKILL.md new file mode 100644 index 0000000..aedbc9f --- /dev/null +++ b/plugins/compound-engineering/skills/workflows-compound/SKILL.md @@ -0,0 +1,10 @@ +--- +name: workflows:compound +description: "[DEPRECATED] Use /ce:compound instead — renamed for clarity." +argument-hint: "[optional: brief context about the fix]" +disable-model-invocation: true +--- + +NOTE: /workflows:compound is deprecated. Please use /ce:compound instead. This alias will be removed in a future version. + +/ce:compound $ARGUMENTS diff --git a/plugins/compound-engineering/skills/workflows-plan/SKILL.md b/plugins/compound-engineering/skills/workflows-plan/SKILL.md new file mode 100644 index 0000000..d2407ea --- /dev/null +++ b/plugins/compound-engineering/skills/workflows-plan/SKILL.md @@ -0,0 +1,10 @@ +--- +name: workflows:plan +description: "[DEPRECATED] Use /ce:plan instead — renamed for clarity." +argument-hint: "[feature description, bug report, or improvement idea]" +disable-model-invocation: true +--- + +NOTE: /workflows:plan is deprecated. Please use /ce:plan instead. This alias will be removed in a future version. + +/ce:plan $ARGUMENTS diff --git a/plugins/compound-engineering/skills/workflows-review/SKILL.md b/plugins/compound-engineering/skills/workflows-review/SKILL.md new file mode 100644 index 0000000..7897e85 --- /dev/null +++ b/plugins/compound-engineering/skills/workflows-review/SKILL.md @@ -0,0 +1,10 @@ +--- +name: workflows:review +description: "[DEPRECATED] Use /ce:review instead — renamed for clarity." +argument-hint: "[PR number, GitHub URL, branch name, or latest]" +disable-model-invocation: true +--- + +NOTE: /workflows:review is deprecated. Please use /ce:review instead. This alias will be removed in a future version. + +/ce:review $ARGUMENTS diff --git a/plugins/compound-engineering/skills/workflows-work/SKILL.md b/plugins/compound-engineering/skills/workflows-work/SKILL.md new file mode 100644 index 0000000..16b38d5 --- /dev/null +++ b/plugins/compound-engineering/skills/workflows-work/SKILL.md @@ -0,0 +1,10 @@ +--- +name: workflows:work +description: "[DEPRECATED] Use /ce:work instead — renamed for clarity." +argument-hint: "[plan file, specification, or todo file path]" +disable-model-invocation: true +--- + +NOTE: /workflows:work is deprecated. Please use /ce:work instead. This alias will be removed in a future version. + +/ce:work $ARGUMENTS diff --git a/src/commands/convert.ts b/src/commands/convert.ts index 9f62511..a616c5f 100644 --- a/src/commands/convert.ts +++ b/src/commands/convert.ts @@ -2,10 +2,12 @@ import { defineCommand } from "citty" import os from "os" import path from "path" import { loadClaudePlugin } from "../parsers/claude" -import { targets } from "../targets" +import { targets, validateScope } from "../targets" import type { PermissionMode } from "../converters/claude-to-opencode" import { ensureCodexAgentsFile } from "../utils/codex-agents" import { expandHome, resolveTargetHome } from "../utils/resolve-home" +import { resolveTargetOutputRoot } from "../utils/resolve-output" +import { detectInstalledTools } from "../utils/detect-tools" const permissionModes: PermissionMode[] = ["none", "broad", "from-commands"] @@ -23,7 +25,7 @@ export default defineCommand({ to: { type: "string", default: "opencode", - description: "Target format (opencode | codex | droid | cursor | pi | gemini)", + description: "Target format (opencode | codex | droid | cursor | pi | copilot | gemini | kiro | windsurf | openclaw | qwen | all)", }, output: { type: "string", @@ -40,6 +42,20 @@ export default defineCommand({ alias: "pi-home", description: "Write Pi output to this Pi root (ex: ~/.pi/agent or ./.pi)", }, + openclawHome: { + type: "string", + alias: "openclaw-home", + description: "Write OpenClaw output to this extensions root (ex: ~/.openclaw/extensions)", + }, + qwenHome: { + type: "string", + alias: "qwen-home", + description: "Write Qwen output to this Qwen extensions root (ex: ~/.qwen/extensions)", + }, + scope: { + type: "string", + description: "Scope level: global | workspace (default varies by target)", + }, also: { type: "string", description: "Comma-separated extra targets to generate (ex: codex)", @@ -62,6 +78,71 @@ export default defineCommand({ }, async run({ args }) { const targetName = String(args.to) + + const permissions = String(args.permissions) + if (!permissionModes.includes(permissions as PermissionMode)) { + throw new Error(`Unknown permissions mode: ${permissions}`) + } + + const plugin = await loadClaudePlugin(String(args.source)) + const outputRoot = resolveOutputRoot(args.output) + const hasExplicitOutput = Boolean(args.output && String(args.output).trim()) + const codexHome = resolveTargetHome(args.codexHome, path.join(os.homedir(), ".codex")) + const piHome = resolveTargetHome(args.piHome, path.join(os.homedir(), ".pi", "agent")) + const openclawHome = resolveTargetHome(args.openclawHome, path.join(os.homedir(), ".openclaw", "extensions")) + const qwenHome = resolveTargetHome(args.qwenHome, path.join(os.homedir(), ".qwen", "extensions")) + + const options = { + agentMode: String(args.agentMode) === "primary" ? "primary" : "subagent", + inferTemperature: Boolean(args.inferTemperature), + permissions: permissions as PermissionMode, + } + + if (targetName === "all") { + const detected = await detectInstalledTools() + const activeTargets = detected.filter((t) => t.detected) + + if (activeTargets.length === 0) { + console.log("No AI coding tools detected. Install at least one tool first.") + return + } + + console.log(`Detected ${activeTargets.length} tool(s):`) + for (const tool of detected) { + console.log(` ${tool.detected ? "✓" : "✗"} ${tool.name} — ${tool.reason}`) + } + + for (const tool of activeTargets) { + const handler = targets[tool.name] + if (!handler || !handler.implemented) { + console.warn(`Skipping ${tool.name}: not implemented.`) + continue + } + const bundle = handler.convert(plugin, options) + if (!bundle) { + console.warn(`Skipping ${tool.name}: no output returned.`) + continue + } + const root = resolveTargetOutputRoot({ + targetName: tool.name, + outputRoot, + codexHome, + piHome, + openclawHome, + qwenHome, + pluginName: plugin.manifest.name, + hasExplicitOutput, + }) + await handler.write(root, bundle) + console.log(`Converted ${plugin.manifest.name} to ${tool.name} at ${root}`) + } + + if (activeTargets.some((t) => t.name === "codex")) { + await ensureCodexAgentsFile(codexHome) + } + return + } + const target = targets[targetName] if (!target) { throw new Error(`Unknown target: ${targetName}`) @@ -71,29 +152,25 @@ export default defineCommand({ throw new Error(`Target ${targetName} is registered but not implemented yet.`) } - const permissions = String(args.permissions) - if (!permissionModes.includes(permissions as PermissionMode)) { - throw new Error(`Unknown permissions mode: ${permissions}`) - } + const resolvedScope = validateScope(targetName, target, args.scope ? String(args.scope) : undefined) - const plugin = await loadClaudePlugin(String(args.source)) - const outputRoot = resolveOutputRoot(args.output) - const codexHome = resolveTargetHome(args.codexHome, path.join(os.homedir(), ".codex")) - const piHome = resolveTargetHome(args.piHome, path.join(os.homedir(), ".pi", "agent")) - - const options = { - agentMode: String(args.agentMode) === "primary" ? "primary" : "subagent", - inferTemperature: Boolean(args.inferTemperature), - permissions: permissions as PermissionMode, - } - - const primaryOutputRoot = resolveTargetOutputRoot(targetName, outputRoot, codexHome, piHome) + const primaryOutputRoot = resolveTargetOutputRoot({ + targetName, + outputRoot, + codexHome, + piHome, + openclawHome, + qwenHome, + pluginName: plugin.manifest.name, + hasExplicitOutput, + scope: resolvedScope, + }) const bundle = target.convert(plugin, options) if (!bundle) { throw new Error(`Target ${targetName} did not return a bundle.`) } - await target.write(primaryOutputRoot, bundle) + await target.write(primaryOutputRoot, bundle, resolvedScope) console.log(`Converted ${plugin.manifest.name} to ${targetName} at ${primaryOutputRoot}`) const extraTargets = parseExtraTargets(args.also) @@ -113,8 +190,18 @@ export default defineCommand({ console.warn(`Skipping ${extra}: no output returned.`) continue } - const extraRoot = resolveTargetOutputRoot(extra, path.join(outputRoot, extra), codexHome, piHome) - await handler.write(extraRoot, extraBundle) + const extraRoot = resolveTargetOutputRoot({ + targetName: extra, + outputRoot: path.join(outputRoot, extra), + codexHome, + piHome, + openclawHome, + qwenHome, + pluginName: plugin.manifest.name, + hasExplicitOutput, + scope: handler.defaultScope, + }) + await handler.write(extraRoot, extraBundle, handler.defaultScope) console.log(`Converted ${plugin.manifest.name} to ${extra} at ${extraRoot}`) } @@ -139,12 +226,3 @@ function resolveOutputRoot(value: unknown): string { } return process.cwd() } - -function resolveTargetOutputRoot(targetName: string, outputRoot: string, codexHome: string, piHome: string): string { - if (targetName === "codex") return codexHome - if (targetName === "pi") return piHome - if (targetName === "droid") return path.join(os.homedir(), ".factory") - if (targetName === "cursor") return path.join(outputRoot, ".cursor") - if (targetName === "gemini") return path.join(outputRoot, ".gemini") - return outputRoot -} diff --git a/src/commands/install.ts b/src/commands/install.ts index 35506e8..a1f2f1c 100644 --- a/src/commands/install.ts +++ b/src/commands/install.ts @@ -3,11 +3,13 @@ import { promises as fs } from "fs" import os from "os" import path from "path" import { loadClaudePlugin } from "../parsers/claude" -import { targets } from "../targets" +import { targets, validateScope } from "../targets" import { pathExists } from "../utils/files" import type { PermissionMode } from "../converters/claude-to-opencode" import { ensureCodexAgentsFile } from "../utils/codex-agents" import { expandHome, resolveTargetHome } from "../utils/resolve-home" +import { resolveTargetOutputRoot } from "../utils/resolve-output" +import { detectInstalledTools } from "../utils/detect-tools" const permissionModes: PermissionMode[] = ["none", "broad", "from-commands"] @@ -25,7 +27,7 @@ export default defineCommand({ to: { type: "string", default: "opencode", - description: "Target format (opencode | codex | droid | cursor | pi | gemini)", + description: "Target format (opencode | codex | droid | cursor | pi | copilot | gemini | kiro | windsurf | openclaw | qwen | all)", }, output: { type: "string", @@ -42,14 +44,28 @@ export default defineCommand({ alias: "pi-home", description: "Write Pi output to this Pi root (ex: ~/.pi/agent or ./.pi)", }, + openclawHome: { + type: "string", + alias: "openclaw-home", + description: "Write OpenClaw output to this extensions root (ex: ~/.openclaw/extensions)", + }, + qwenHome: { + type: "string", + alias: "qwen-home", + description: "Write Qwen output to this Qwen extensions root (ex: ~/.qwen/extensions)", + }, + scope: { + type: "string", + description: "Scope level: global | workspace (default varies by target)", + }, also: { type: "string", description: "Comma-separated extra targets to generate (ex: codex)", }, permissions: { type: "string", - default: "broad", - description: "Permission mapping: none | broad | from-commands", + default: "none", // Default is "none" -- writing global permissions to opencode.json pollutes user config. See ADR-003. + description: "Permission mapping written to opencode.json: none (default) | broad | from-command", }, agentMode: { type: "string", @@ -64,13 +80,6 @@ export default defineCommand({ }, async run({ args }) { const targetName = String(args.to) - const target = targets[targetName] - if (!target) { - throw new Error(`Unknown target: ${targetName}`) - } - if (!target.implemented) { - throw new Error(`Target ${targetName} is registered but not implemented yet.`) - } const permissions = String(args.permissions) if (!permissionModes.includes(permissions as PermissionMode)) { @@ -84,6 +93,9 @@ export default defineCommand({ const outputRoot = resolveOutputRoot(args.output) const codexHome = resolveTargetHome(args.codexHome, path.join(os.homedir(), ".codex")) const piHome = resolveTargetHome(args.piHome, path.join(os.homedir(), ".pi", "agent")) + const hasExplicitOutput = Boolean(args.output && String(args.output).trim()) + const openclawHome = resolveTargetHome(args.openclawHome, path.join(os.homedir(), ".openclaw", "extensions")) + const qwenHome = resolveTargetHome(args.qwenHome, path.join(os.homedir(), ".qwen", "extensions")) const options = { agentMode: String(args.agentMode) === "primary" ? "primary" : "subagent", @@ -91,13 +103,77 @@ export default defineCommand({ permissions: permissions as PermissionMode, } + if (targetName === "all") { + const detected = await detectInstalledTools() + const activeTargets = detected.filter((t) => t.detected) + + if (activeTargets.length === 0) { + console.log("No AI coding tools detected. Install at least one tool first.") + return + } + + console.log(`Detected ${activeTargets.length} tool(s):`) + for (const tool of detected) { + console.log(` ${tool.detected ? "✓" : "✗"} ${tool.name} — ${tool.reason}`) + } + + for (const tool of activeTargets) { + const handler = targets[tool.name] + if (!handler || !handler.implemented) { + console.warn(`Skipping ${tool.name}: not implemented.`) + continue + } + const bundle = handler.convert(plugin, options) + if (!bundle) { + console.warn(`Skipping ${tool.name}: no output returned.`) + continue + } + const root = resolveTargetOutputRoot({ + targetName: tool.name, + outputRoot, + codexHome, + piHome, + openclawHome, + qwenHome, + pluginName: plugin.manifest.name, + hasExplicitOutput, + }) + await handler.write(root, bundle) + console.log(`Installed ${plugin.manifest.name} to ${tool.name} at ${root}`) + } + + if (activeTargets.some((t) => t.name === "codex")) { + await ensureCodexAgentsFile(codexHome) + } + return + } + + const target = targets[targetName] + if (!target) { + throw new Error(`Unknown target: ${targetName}`) + } + if (!target.implemented) { + throw new Error(`Target ${targetName} is registered but not implemented yet.`) + } + + const resolvedScope = validateScope(targetName, target, args.scope ? String(args.scope) : undefined) + const bundle = target.convert(plugin, options) if (!bundle) { throw new Error(`Target ${targetName} did not return a bundle.`) } - const hasExplicitOutput = Boolean(args.output && String(args.output).trim()) - const primaryOutputRoot = resolveTargetOutputRoot(targetName, outputRoot, codexHome, piHome, hasExplicitOutput) - await target.write(primaryOutputRoot, bundle) + const primaryOutputRoot = resolveTargetOutputRoot({ + targetName, + outputRoot, + codexHome, + piHome, + openclawHome, + qwenHome, + pluginName: plugin.manifest.name, + hasExplicitOutput, + scope: resolvedScope, + }) + await target.write(primaryOutputRoot, bundle, resolvedScope) console.log(`Installed ${plugin.manifest.name} to ${primaryOutputRoot}`) const extraTargets = parseExtraTargets(args.also) @@ -117,8 +193,18 @@ export default defineCommand({ console.warn(`Skipping ${extra}: no output returned.`) continue } - const extraRoot = resolveTargetOutputRoot(extra, path.join(outputRoot, extra), codexHome, piHome, hasExplicitOutput) - await handler.write(extraRoot, extraBundle) + const extraRoot = resolveTargetOutputRoot({ + targetName: extra, + outputRoot: path.join(outputRoot, extra), + codexHome, + piHome, + openclawHome, + qwenHome, + pluginName: plugin.manifest.name, + hasExplicitOutput, + scope: handler.defaultScope, + }) + await handler.write(extraRoot, extraBundle, handler.defaultScope) console.log(`Installed ${plugin.manifest.name} to ${extraRoot}`) } @@ -169,27 +255,6 @@ function resolveOutputRoot(value: unknown): string { return path.join(os.homedir(), ".config", "opencode") } -function resolveTargetOutputRoot( - targetName: string, - outputRoot: string, - codexHome: string, - piHome: string, - hasExplicitOutput: boolean, -): string { - if (targetName === "codex") return codexHome - if (targetName === "pi") return piHome - if (targetName === "droid") return path.join(os.homedir(), ".factory") - if (targetName === "cursor") { - const base = hasExplicitOutput ? outputRoot : process.cwd() - return path.join(base, ".cursor") - } - if (targetName === "gemini") { - const base = hasExplicitOutput ? outputRoot : process.cwd() - return path.join(base, ".gemini") - } - return outputRoot -} - async function resolveGitHubPluginPath(pluginName: string): Promise<ResolvedPluginPath> { const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "compound-plugin-")) const source = resolveGitHubSource() diff --git a/src/commands/sync.ts b/src/commands/sync.ts index e5b576e..690de8c 100644 --- a/src/commands/sync.ts +++ b/src/commands/sync.ts @@ -1,60 +1,34 @@ import { defineCommand } from "citty" -import os from "os" import path from "path" import { loadClaudeHome } from "../parsers/claude-home" -import { syncToOpenCode } from "../sync/opencode" -import { syncToCodex } from "../sync/codex" -import { syncToPi } from "../sync/pi" -import { syncToDroid } from "../sync/droid" -import { syncToCursor } from "../sync/cursor" +import { + getDefaultSyncRegistryContext, + getSyncTarget, + isSyncTargetName, + syncTargetNames, + type SyncTargetName, +} from "../sync/registry" import { expandHome } from "../utils/resolve-home" +import { hasPotentialSecrets } from "../utils/secrets" +import { detectInstalledTools } from "../utils/detect-tools" -const validTargets = ["opencode", "codex", "pi", "droid", "cursor"] as const -type SyncTarget = (typeof validTargets)[number] +const validTargets = [...syncTargetNames, "all"] as const +type SyncTarget = SyncTargetName | "all" function isValidTarget(value: string): value is SyncTarget { - return (validTargets as readonly string[]).includes(value) -} - -/** Check if any MCP servers have env vars that might contain secrets */ -function hasPotentialSecrets(mcpServers: Record<string, unknown>): boolean { - const sensitivePatterns = /key|token|secret|password|credential|api_key/i - for (const server of Object.values(mcpServers)) { - const env = (server as { env?: Record<string, string> }).env - if (env) { - for (const key of Object.keys(env)) { - if (sensitivePatterns.test(key)) return true - } - } - } - return false -} - -function resolveOutputRoot(target: SyncTarget): string { - switch (target) { - case "opencode": - return path.join(os.homedir(), ".config", "opencode") - case "codex": - return path.join(os.homedir(), ".codex") - case "pi": - return path.join(os.homedir(), ".pi", "agent") - case "droid": - return path.join(os.homedir(), ".factory") - case "cursor": - return path.join(process.cwd(), ".cursor") - } + return value === "all" || isSyncTargetName(value) } export default defineCommand({ meta: { name: "sync", - description: "Sync Claude Code config (~/.claude/) to OpenCode, Codex, Pi, Droid, or Cursor", + description: "Sync Claude Code config (~/.claude/) to supported provider configs and skills", }, args: { target: { type: "string", - required: true, - description: "Target: opencode | codex | pi | droid | cursor", + default: "all", + description: `Target: ${syncTargetNames.join(" | ")} | all (default: all)`, }, claudeHome: { type: "string", @@ -67,7 +41,8 @@ export default defineCommand({ throw new Error(`Unknown target: ${args.target}. Use one of: ${validTargets.join(", ")}`) } - const claudeHome = expandHome(args.claudeHome ?? path.join(os.homedir(), ".claude")) + const { home, cwd } = getDefaultSyncRegistryContext() + const claudeHome = expandHome(args.claudeHome ?? path.join(home, ".claude")) const config = await loadClaudeHome(claudeHome) // Warn about potential secrets in MCP env vars @@ -78,30 +53,36 @@ export default defineCommand({ ) } - console.log( - `Syncing ${config.skills.length} skills, ${Object.keys(config.mcpServers).length} MCP servers...`, - ) + if (args.target === "all") { + const detected = await detectInstalledTools() + const activeTargets = detected.filter((t) => t.detected).map((t) => t.name) - const outputRoot = resolveOutputRoot(args.target) + if (activeTargets.length === 0) { + console.log("No AI coding tools detected.") + return + } - switch (args.target) { - case "opencode": - await syncToOpenCode(config, outputRoot) - break - case "codex": - await syncToCodex(config, outputRoot) - break - case "pi": - await syncToPi(config, outputRoot) - break - case "droid": - await syncToDroid(config, outputRoot) - break - case "cursor": - await syncToCursor(config, outputRoot) - break + console.log(`Syncing to ${activeTargets.length} detected tool(s)...`) + for (const tool of detected) { + console.log(` ${tool.detected ? "✓" : "✗"} ${tool.name} — ${tool.reason}`) + } + + for (const name of activeTargets) { + const target = getSyncTarget(name as SyncTargetName) + const outputRoot = target.resolveOutputRoot(home, cwd) + await target.sync(config, outputRoot) + console.log(`✓ Synced to ${name}: ${outputRoot}`) + } + return } + console.log( + `Syncing ${config.skills.length} skills, ${config.commands?.length ?? 0} commands, ${Object.keys(config.mcpServers).length} MCP servers...`, + ) + + const target = getSyncTarget(args.target as SyncTargetName) + const outputRoot = target.resolveOutputRoot(home, cwd) + await target.sync(config, outputRoot) console.log(`✓ Synced to ${args.target}: ${outputRoot}`) }, }) diff --git a/src/converters/claude-to-cursor.ts b/src/converters/claude-to-copilot.ts similarity index 50% rename from src/converters/claude-to-cursor.ts rename to src/converters/claude-to-copilot.ts index d6100d8..6a7722c 100644 --- a/src/converters/claude-to-cursor.ts +++ b/src/converters/claude-to-copilot.ts @@ -1,43 +1,63 @@ import { formatFrontmatter } from "../utils/frontmatter" import type { ClaudeAgent, ClaudeCommand, ClaudeMcpServer, ClaudePlugin } from "../types/claude" -import type { CursorBundle, CursorCommand, CursorMcpServer, CursorRule } from "../types/cursor" +import type { + CopilotAgent, + CopilotBundle, + CopilotGeneratedSkill, + CopilotMcpServer, +} from "../types/copilot" import type { ClaudeToOpenCodeOptions } from "./claude-to-opencode" -export type ClaudeToCursorOptions = ClaudeToOpenCodeOptions +export type ClaudeToCopilotOptions = ClaudeToOpenCodeOptions -export function convertClaudeToCursor( +const COPILOT_BODY_CHAR_LIMIT = 30_000 + +export function convertClaudeToCopilot( plugin: ClaudePlugin, - _options: ClaudeToCursorOptions, -): CursorBundle { - const usedRuleNames = new Set<string>() - const usedCommandNames = new Set<string>() + _options: ClaudeToCopilotOptions, +): CopilotBundle { + const usedAgentNames = new Set<string>() + const usedSkillNames = new Set<string>() - const rules = plugin.agents.map((agent) => convertAgentToRule(agent, usedRuleNames)) - const commands = plugin.commands.map((command) => convertCommand(command, usedCommandNames)) - const skillDirs = plugin.skills.map((skill) => ({ - name: skill.name, - sourceDir: skill.sourceDir, - })) + const agents = plugin.agents.map((agent) => convertAgent(agent, usedAgentNames)) - const mcpServers = convertMcpServers(plugin.mcpServers) + // Reserve skill names first so generated skills (from commands) don't collide + const skillDirs = plugin.skills.map((skill) => { + usedSkillNames.add(skill.name) + return { + name: skill.name, + sourceDir: skill.sourceDir, + } + }) + + const generatedSkills = plugin.commands.map((command) => + convertCommandToSkill(command, usedSkillNames), + ) + + const mcpConfig = convertMcpServers(plugin.mcpServers) if (plugin.hooks && Object.keys(plugin.hooks.hooks).length > 0) { - console.warn("Warning: Cursor does not support hooks. Hooks were skipped during conversion.") + console.warn("Warning: Copilot does not support hooks. Hooks were skipped during conversion.") } - return { rules, commands, skillDirs, mcpServers } + return { agents, generatedSkills, skillDirs, mcpConfig } } -function convertAgentToRule(agent: ClaudeAgent, usedNames: Set<string>): CursorRule { +function convertAgent(agent: ClaudeAgent, usedNames: Set<string>): CopilotAgent { const name = uniqueName(normalizeName(agent.name), usedNames) const description = agent.description ?? `Converted from Claude agent ${agent.name}` const frontmatter: Record<string, unknown> = { description, - alwaysApply: false, + tools: ["*"], + infer: true, } - let body = transformContentForCursor(agent.body.trim()) + if (agent.model) { + frontmatter.model = agent.model + } + + let body = transformContentForCopilot(agent.body.trim()) if (agent.capabilities && agent.capabilities.length > 0) { const capabilities = agent.capabilities.map((c) => `- ${c}`).join("\n") body = `## Capabilities\n${capabilities}\n\n${body}`.trim() @@ -46,39 +66,44 @@ function convertAgentToRule(agent: ClaudeAgent, usedNames: Set<string>): CursorR body = `Instructions converted from the ${agent.name} agent.` } + if (body.length > COPILOT_BODY_CHAR_LIMIT) { + console.warn( + `Warning: Agent "${agent.name}" body exceeds ${COPILOT_BODY_CHAR_LIMIT} characters (${body.length}). Copilot may truncate it.`, + ) + } + const content = formatFrontmatter(frontmatter, body) return { name, content } } -function convertCommand(command: ClaudeCommand, usedNames: Set<string>): CursorCommand { +function convertCommandToSkill( + command: ClaudeCommand, + usedNames: Set<string>, +): CopilotGeneratedSkill { const name = uniqueName(flattenCommandName(command.name), usedNames) - const sections: string[] = [] - - if (command.description) { - sections.push(`<!-- ${command.description} -->`) + const frontmatter: Record<string, unknown> = { + name, } + if (command.description) { + frontmatter.description = command.description + } + + const sections: string[] = [] if (command.argumentHint) { sections.push(`## Arguments\n${command.argumentHint}`) } - const transformedBody = transformContentForCursor(command.body.trim()) + const transformedBody = transformContentForCopilot(command.body.trim()) sections.push(transformedBody) - const content = sections.filter(Boolean).join("\n\n").trim() + const body = sections.filter(Boolean).join("\n\n").trim() + const content = formatFrontmatter(frontmatter, body) return { name, content } } -/** - * Transform Claude Code content to Cursor-compatible content. - * - * 1. Task agent calls: Task agent-name(args) -> Use the agent-name skill to: args - * 2. Slash commands: /workflows:plan -> /plan (flatten namespace) - * 3. Path rewriting: .claude/ -> .cursor/ - * 4. Agent references: @agent-name -> the agent-name rule - */ -export function transformContentForCursor(body: string): string { +export function transformContentForCopilot(body: string): string { let result = body // 1. Transform Task agent calls @@ -88,24 +113,25 @@ export function transformContentForCursor(body: string): string { return `${prefix}Use the ${skillName} skill to: ${args.trim()}` }) - // 2. Transform slash command references (flatten namespaces) + // 2. Transform slash command references (replace colons with hyphens) const slashCommandPattern = /(?<![:\w])\/([a-z][a-z0-9_:-]*?)(?=[\s,."')\]}`]|$)/gi result = result.replace(slashCommandPattern, (match, commandName: string) => { if (commandName.includes("/")) return match if (["dev", "tmp", "etc", "usr", "var", "bin", "home"].includes(commandName)) return match - const flattened = flattenCommandName(commandName) - return `/${flattened}` + const normalized = flattenCommandName(commandName) + return `/${normalized}` }) - // 3. Rewrite .claude/ paths to .cursor/ + // 3. Rewrite .claude/ paths to .github/ and ~/.claude/ to ~/.copilot/ result = result - .replace(/~\/\.claude\//g, "~/.cursor/") - .replace(/\.claude\//g, ".cursor/") + .replace(/~\/\.claude\//g, "~/.copilot/") + .replace(/\.claude\//g, ".github/") // 4. Transform @agent-name references - const agentRefPattern = /@([a-z][a-z0-9-]*-(?:agent|reviewer|researcher|analyst|specialist|oracle|sentinel|guardian|strategist))/gi + const agentRefPattern = + /@([a-z][a-z0-9-]*-(?:agent|reviewer|researcher|analyst|specialist|oracle|sentinel|guardian|strategist))/gi result = result.replace(agentRefPattern, (_match, agentName: string) => { - return `the ${normalizeName(agentName)} rule` + return `the ${normalizeName(agentName)} agent` }) return result @@ -113,29 +139,47 @@ export function transformContentForCursor(body: string): string { function convertMcpServers( servers?: Record<string, ClaudeMcpServer>, -): Record<string, CursorMcpServer> | undefined { +): Record<string, CopilotMcpServer> | undefined { if (!servers || Object.keys(servers).length === 0) return undefined - const result: Record<string, CursorMcpServer> = {} + const result: Record<string, CopilotMcpServer> = {} for (const [name, server] of Object.entries(servers)) { - const entry: CursorMcpServer = {} + const entry: CopilotMcpServer = { + type: server.command ? "local" : "sse", + tools: ["*"], + } + if (server.command) { entry.command = server.command if (server.args && server.args.length > 0) entry.args = server.args - if (server.env && Object.keys(server.env).length > 0) entry.env = server.env } else if (server.url) { entry.url = server.url if (server.headers && Object.keys(server.headers).length > 0) entry.headers = server.headers } + + if (server.env && Object.keys(server.env).length > 0) { + entry.env = prefixEnvVars(server.env) + } + result[name] = entry } return result } +function prefixEnvVars(env: Record<string, string>): Record<string, string> { + const result: Record<string, string> = {} + for (const [key, value] of Object.entries(env)) { + if (key.startsWith("COPILOT_MCP_")) { + result[key] = value + } else { + result[`COPILOT_MCP_${key}`] = value + } + } + return result +} + function flattenCommandName(name: string): string { - const colonIndex = name.lastIndexOf(":") - const base = colonIndex >= 0 ? name.slice(colonIndex + 1) : name - return normalizeName(base) + return normalizeName(name) } function normalizeName(value: string): string { diff --git a/src/converters/claude-to-kiro.ts b/src/converters/claude-to-kiro.ts new file mode 100644 index 0000000..2711267 --- /dev/null +++ b/src/converters/claude-to-kiro.ts @@ -0,0 +1,262 @@ +import { readFileSync, existsSync } from "fs" +import path from "path" +import { formatFrontmatter } from "../utils/frontmatter" +import type { ClaudeAgent, ClaudeCommand, ClaudeMcpServer, ClaudePlugin } from "../types/claude" +import type { + KiroAgent, + KiroAgentConfig, + KiroBundle, + KiroMcpServer, + KiroSkill, + KiroSteeringFile, +} from "../types/kiro" +import type { ClaudeToOpenCodeOptions } from "./claude-to-opencode" + +export type ClaudeToKiroOptions = ClaudeToOpenCodeOptions + +const KIRO_SKILL_NAME_MAX_LENGTH = 64 +const KIRO_SKILL_NAME_PATTERN = /^[a-z][a-z0-9-]*$/ +const KIRO_DESCRIPTION_MAX_LENGTH = 1024 + +const CLAUDE_TO_KIRO_TOOLS: Record<string, string> = { + Bash: "shell", + Write: "write", + Read: "read", + Edit: "write", // NOTE: Kiro write is full-file, not surgical edit. Lossy mapping. + Glob: "glob", + Grep: "grep", + WebFetch: "web_fetch", + Task: "use_subagent", +} + +export function convertClaudeToKiro( + plugin: ClaudePlugin, + _options: ClaudeToKiroOptions, +): KiroBundle { + const usedSkillNames = new Set<string>() + + // Pass-through skills are processed first — they're the source of truth + const skillDirs = plugin.skills.map((skill) => ({ + name: skill.name, + sourceDir: skill.sourceDir, + })) + for (const skill of skillDirs) { + usedSkillNames.add(normalizeName(skill.name)) + } + + // Convert agents to Kiro custom agents + const agentNames = plugin.agents.map((a) => normalizeName(a.name)) + const agents = plugin.agents.map((agent) => convertAgentToKiroAgent(agent, agentNames)) + + // Convert commands to skills (generated) + const generatedSkills = plugin.commands.map((command) => + convertCommandToSkill(command, usedSkillNames, agentNames), + ) + + // Convert MCP servers (stdio only) + const mcpServers = convertMcpServers(plugin.mcpServers) + + // Build steering files from CLAUDE.md + const steeringFiles = buildSteeringFiles(plugin, agentNames) + + // Warn about hooks + if (plugin.hooks && Object.keys(plugin.hooks.hooks).length > 0) { + console.warn( + "Warning: Kiro CLI hooks use a different format (preToolUse/postToolUse inside agent configs). Hooks were skipped during conversion.", + ) + } + + return { agents, generatedSkills, skillDirs, steeringFiles, mcpServers } +} + +function convertAgentToKiroAgent(agent: ClaudeAgent, knownAgentNames: string[]): KiroAgent { + const name = normalizeName(agent.name) + const description = sanitizeDescription( + agent.description ?? `Use this agent for ${agent.name} tasks`, + ) + + const config: KiroAgentConfig = { + name, + description, + prompt: `file://./prompts/${name}.md`, + tools: ["*"], + resources: [ + "file://.kiro/steering/**/*.md", + "skill://.kiro/skills/**/SKILL.md", + ], + includeMcpJson: true, + welcomeMessage: `Switching to the ${name} agent. ${description}`, + } + + let body = transformContentForKiro(agent.body.trim(), knownAgentNames) + if (agent.capabilities && agent.capabilities.length > 0) { + const capabilities = agent.capabilities.map((c) => `- ${c}`).join("\n") + body = `## Capabilities\n${capabilities}\n\n${body}`.trim() + } + if (body.length === 0) { + body = `Instructions converted from the ${agent.name} agent.` + } + + return { name, config, promptContent: body } +} + +function convertCommandToSkill( + command: ClaudeCommand, + usedNames: Set<string>, + knownAgentNames: string[], +): KiroSkill { + const rawName = normalizeName(command.name) + const name = uniqueName(rawName, usedNames) + + const description = sanitizeDescription( + command.description ?? `Converted from Claude command ${command.name}`, + ) + + const frontmatter: Record<string, unknown> = { name, description } + + let body = transformContentForKiro(command.body.trim(), knownAgentNames) + if (body.length === 0) { + body = `Instructions converted from the ${command.name} command.` + } + + const content = formatFrontmatter(frontmatter, body) + return { name, content } +} + +/** + * Transform Claude Code content to Kiro-compatible content. + * + * 1. Task agent calls: Task agent-name(args) -> Use the use_subagent tool ... + * 2. Path rewriting: .claude/ -> .kiro/, ~/.claude/ -> ~/.kiro/ + * 3. Slash command refs: /workflows:plan -> use the workflows-plan skill + * 4. Claude tool names: Bash -> shell, Read -> read, etc. + * 5. Agent refs: @agent-name -> the agent-name agent (only for known agent names) + */ +export function transformContentForKiro(body: string, knownAgentNames: string[] = []): string { + let result = body + + // 1. Transform Task agent calls + const taskPattern = /^(\s*-?\s*)Task\s+([a-z][a-z0-9-]*)\(([^)]+)\)/gm + result = result.replace(taskPattern, (_match, prefix: string, agentName: string, args: string) => { + return `${prefix}Use the use_subagent tool to delegate to the ${normalizeName(agentName)} agent: ${args.trim()}` + }) + + // 2. Rewrite .claude/ paths to .kiro/ (with word-boundary-like lookbehind) + result = result.replace(/(?<=^|\s|["'`])~\/\.claude\//gm, "~/.kiro/") + result = result.replace(/(?<=^|\s|["'`])\.claude\//gm, ".kiro/") + + // 3. Slash command refs: /command-name -> skill activation language + result = result.replace(/(?<=^|\s)`?\/([a-zA-Z][a-zA-Z0-9_:-]*)`?/gm, (_match, cmdName: string) => { + const skillName = normalizeName(cmdName) + return `the ${skillName} skill` + }) + + // 4. Claude tool names -> Kiro tool names + for (const [claudeTool, kiroTool] of Object.entries(CLAUDE_TO_KIRO_TOOLS)) { + // Match tool name references: "the X tool", "using X", "use X to" + const toolPattern = new RegExp(`\\b${claudeTool}\\b(?=\\s+tool|\\s+to\\s)`, "g") + result = result.replace(toolPattern, kiroTool) + } + + // 5. Transform @agent-name references (only for known agent names) + if (knownAgentNames.length > 0) { + const escapedNames = knownAgentNames.map((n) => n.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")) + const agentRefPattern = new RegExp(`@(${escapedNames.join("|")})\\b`, "g") + result = result.replace(agentRefPattern, (_match, agentName: string) => { + return `the ${normalizeName(agentName)} agent` + }) + } + + return result +} + +function convertMcpServers( + servers?: Record<string, ClaudeMcpServer>, +): Record<string, KiroMcpServer> { + if (!servers || Object.keys(servers).length === 0) return {} + + const result: Record<string, KiroMcpServer> = {} + for (const [name, server] of Object.entries(servers)) { + if (!server.command) { + console.warn( + `Warning: MCP server "${name}" has no command (HTTP/SSE transport). Kiro only supports stdio. Skipping.`, + ) + continue + } + + const entry: KiroMcpServer = { command: server.command } + if (server.args && server.args.length > 0) entry.args = server.args + if (server.env && Object.keys(server.env).length > 0) entry.env = server.env + + console.log(`MCP server "${name}" will execute: ${server.command}${server.args ? " " + server.args.join(" ") : ""}`) + result[name] = entry + } + return result +} + +function buildSteeringFiles(plugin: ClaudePlugin, knownAgentNames: string[]): KiroSteeringFile[] { + const claudeMdPath = path.join(plugin.root, "CLAUDE.md") + if (!existsSync(claudeMdPath)) return [] + + let content: string + try { + content = readFileSync(claudeMdPath, "utf8") + } catch { + return [] + } + + if (!content || content.trim().length === 0) return [] + + const transformed = transformContentForKiro(content, knownAgentNames) + return [{ name: "compound-engineering", content: transformed }] +} + +function normalizeName(value: string): string { + const trimmed = value.trim() + if (!trimmed) return "item" + let normalized = trimmed + .toLowerCase() + .replace(/[\\/]+/g, "-") + .replace(/[:\s]+/g, "-") + .replace(/[^a-z0-9_-]+/g, "-") + .replace(/-+/g, "-") // Collapse consecutive hyphens (Agent Skills standard) + .replace(/^-+|-+$/g, "") + + // Enforce max length (truncate at last hyphen boundary) + if (normalized.length > KIRO_SKILL_NAME_MAX_LENGTH) { + normalized = normalized.slice(0, KIRO_SKILL_NAME_MAX_LENGTH) + const lastHyphen = normalized.lastIndexOf("-") + if (lastHyphen > 0) { + normalized = normalized.slice(0, lastHyphen) + } + normalized = normalized.replace(/-+$/g, "") + } + + // Ensure name starts with a letter + if (normalized.length === 0 || !/^[a-z]/.test(normalized)) { + return "item" + } + + return normalized +} + +function sanitizeDescription(value: string, maxLength = KIRO_DESCRIPTION_MAX_LENGTH): string { + const normalized = value.replace(/\s+/g, " ").trim() + if (normalized.length <= maxLength) return normalized + const ellipsis = "..." + return normalized.slice(0, Math.max(0, maxLength - ellipsis.length)).trimEnd() + ellipsis +} + +function uniqueName(base: string, used: Set<string>): string { + if (!used.has(base)) { + used.add(base) + return base + } + let index = 2 + while (used.has(`${base}-${index}`)) { + index += 1 + } + const name = `${base}-${index}` + used.add(name) + return name +} diff --git a/src/converters/claude-to-openclaw.ts b/src/converters/claude-to-openclaw.ts new file mode 100644 index 0000000..ba2435c --- /dev/null +++ b/src/converters/claude-to-openclaw.ts @@ -0,0 +1,244 @@ +import { formatFrontmatter } from "../utils/frontmatter" +import type { + ClaudeAgent, + ClaudeCommand, + ClaudePlugin, + ClaudeMcpServer, +} from "../types/claude" +import type { + OpenClawBundle, + OpenClawCommandRegistration, + OpenClawPluginManifest, + OpenClawSkillFile, +} from "../types/openclaw" +import type { ClaudeToOpenCodeOptions } from "./claude-to-opencode" + +export type ClaudeToOpenClawOptions = ClaudeToOpenCodeOptions + +export function convertClaudeToOpenClaw( + plugin: ClaudePlugin, + _options: ClaudeToOpenClawOptions, +): OpenClawBundle { + const enabledCommands = plugin.commands.filter((cmd) => !cmd.disableModelInvocation) + + const agentSkills = plugin.agents.map(convertAgentToSkill) + const commandSkills = enabledCommands.map(convertCommandToSkill) + const commands = enabledCommands.map(convertCommand) + + const skills: OpenClawSkillFile[] = [...agentSkills, ...commandSkills] + + const skillDirCopies = plugin.skills.map((skill) => ({ + sourceDir: skill.sourceDir, + name: skill.name, + })) + + const allSkillDirs = [ + ...agentSkills.map((s) => s.dir), + ...commandSkills.map((s) => s.dir), + ...plugin.skills.map((s) => s.name), + ] + + const manifest = buildManifest(plugin, allSkillDirs) + + const packageJson = buildPackageJson(plugin) + + const openclawConfig = plugin.mcpServers + ? buildOpenClawConfig(plugin.mcpServers) + : undefined + + const entryPoint = generateEntryPoint(commands) + + return { + manifest, + packageJson, + entryPoint, + skills, + skillDirCopies, + commands, + openclawConfig, + } +} + +function buildManifest(plugin: ClaudePlugin, skillDirs: string[]): OpenClawPluginManifest { + return { + id: plugin.manifest.name, + name: formatDisplayName(plugin.manifest.name), + kind: "tool", + configSchema: { + type: "object", + properties: {}, + }, + skills: skillDirs.map((dir) => `skills/${dir}`), + } +} + +function buildPackageJson(plugin: ClaudePlugin): Record<string, unknown> { + return { + name: `openclaw-${plugin.manifest.name}`, + version: plugin.manifest.version, + type: "module", + private: true, + description: plugin.manifest.description, + main: "index.ts", + openclaw: { + extensions: [ + { + id: plugin.manifest.name, + entry: "./index.ts", + }, + ], + }, + keywords: [ + "openclaw", + "openclaw-plugin", + ...(plugin.manifest.keywords ?? []), + ], + } +} + +function convertAgentToSkill(agent: ClaudeAgent): OpenClawSkillFile { + const frontmatter: Record<string, unknown> = { + name: agent.name, + description: agent.description, + } + + if (agent.model && agent.model !== "inherit") { + frontmatter.model = agent.model + } + + const body = rewritePaths(agent.body) + const content = formatFrontmatter(frontmatter, body) + + return { + name: agent.name, + content, + dir: `agent-${agent.name}`, + } +} + +function convertCommandToSkill(command: ClaudeCommand): OpenClawSkillFile { + const frontmatter: Record<string, unknown> = { + name: `cmd-${command.name}`, + description: command.description, + } + + if (command.model && command.model !== "inherit") { + frontmatter.model = command.model + } + + const body = rewritePaths(command.body) + const content = formatFrontmatter(frontmatter, body) + + return { + name: command.name, + content, + dir: `cmd-${command.name}`, + } +} + +function convertCommand(command: ClaudeCommand): OpenClawCommandRegistration { + return { + name: command.name.replace(/:/g, "-"), + description: command.description ?? `Run ${command.name}`, + acceptsArgs: Boolean(command.argumentHint), + body: rewritePaths(command.body), + } +} + +function buildOpenClawConfig( + servers: Record<string, ClaudeMcpServer>, +): Record<string, unknown> { + const mcpServers: Record<string, unknown> = {} + + for (const [name, server] of Object.entries(servers)) { + if (server.command) { + mcpServers[name] = { + type: "stdio", + command: server.command, + args: server.args ?? [], + env: server.env, + } + } else if (server.url) { + mcpServers[name] = { + type: "http", + url: server.url, + headers: server.headers, + } + } + } + + return { mcpServers } +} + +function generateEntryPoint(commands: OpenClawCommandRegistration[]): string { + const commandRegistrations = commands + .map((cmd) => { + // JSON.stringify produces a fully-escaped string literal safe for JS/TS source embedding + const safeName = JSON.stringify(cmd.name) + const safeDesc = JSON.stringify(cmd.description ?? "") + const safeNotFound = JSON.stringify(`Command ${cmd.name} not found. Check skills directory.`) + return ` api.registerCommand({ + name: ${safeName}, + description: ${safeDesc}, + acceptsArgs: ${cmd.acceptsArgs}, + requireAuth: false, + handler: (ctx) => ({ + text: skills[${safeName}] ?? ${safeNotFound}, + }), + });` + }) + .join("\n\n") + + return `// Auto-generated OpenClaw plugin entry point +// Converted from Claude Code plugin format by compound-plugin CLI +import { promises as fs } from "fs"; +import path from "path"; +import { fileURLToPath } from "url"; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); + +// Pre-load skill bodies for command responses +const skills: Record<string, string> = {}; + +async function loadSkills() { + const skillsDir = path.join(__dirname, "skills"); + try { + const entries = await fs.readdir(skillsDir, { withFileTypes: true }); + for (const entry of entries) { + if (!entry.isDirectory()) continue; + const skillPath = path.join(skillsDir, entry.name, "SKILL.md"); + try { + const content = await fs.readFile(skillPath, "utf8"); + // Strip frontmatter + const body = content.replace(/^---[\\s\\S]*?---\\n*/, ""); + skills[entry.name.replace(/^cmd-/, "")] = body.trim(); + } catch { + // Skill file not found, skip + } + } + } catch { + // Skills directory not found + } +} + +export default async function register(api) { + await loadSkills(); + +${commandRegistrations} +} +` +} + +function rewritePaths(body: string): string { + return body + .replace(/(?<=^|\s|["'`])~\/\.claude\//gm, "~/.openclaw/") + .replace(/(?<=^|\s|["'`])\.claude\//gm, ".openclaw/") + .replace(/\.claude-plugin\//g, "openclaw-plugin/") +} + +function formatDisplayName(name: string): string { + return name + .split("-") + .map((word) => word.charAt(0).toUpperCase() + word.slice(1)) + .join(" ") +} diff --git a/src/converters/claude-to-opencode.ts b/src/converters/claude-to-opencode.ts index 5bff059..feea6cb 100644 --- a/src/converters/claude-to-opencode.ts +++ b/src/converters/claude-to-opencode.ts @@ -8,7 +8,7 @@ import type { } from "../types/claude" import type { OpenCodeBundle, - OpenCodeCommandConfig, + OpenCodeCommandFile, OpenCodeConfig, OpenCodeMcpServer, } from "../types/opencode" @@ -66,13 +66,12 @@ export function convertClaudeToOpenCode( options: ClaudeToOpenCodeOptions, ): OpenCodeBundle { const agentFiles = plugin.agents.map((agent) => convertAgent(agent, options)) - const commandMap = convertCommands(plugin.commands) + const cmdFiles = convertCommands(plugin.commands) const mcp = plugin.mcpServers ? convertMcp(plugin.mcpServers) : undefined const plugins = plugin.hooks ? [convertHooks(plugin.hooks)] : [] const config: OpenCodeConfig = { $schema: "https://opencode.ai/config.json", - command: Object.keys(commandMap).length > 0 ? commandMap : undefined, mcp: mcp && Object.keys(mcp).length > 0 ? mcp : undefined, } @@ -81,6 +80,7 @@ export function convertClaudeToOpenCode( return { config, agents: agentFiles, + commandFiles: cmdFiles, plugins, skillDirs: plugin.skills.map((skill) => ({ sourceDir: skill.sourceDir, name: skill.name })), } @@ -111,20 +111,22 @@ function convertAgent(agent: ClaudeAgent, options: ClaudeToOpenCodeOptions) { } } -function convertCommands(commands: ClaudeCommand[]): Record<string, OpenCodeCommandConfig> { - const result: Record<string, OpenCodeCommandConfig> = {} +// Commands are written as individual .md files rather than entries in opencode.json. +// Chosen over JSON map because opencode resolves commands by filename at runtime (ADR-001). +function convertCommands(commands: ClaudeCommand[]): OpenCodeCommandFile[] { + const files: OpenCodeCommandFile[] = [] for (const command of commands) { if (command.disableModelInvocation) continue - const entry: OpenCodeCommandConfig = { + const frontmatter: Record<string, unknown> = { description: command.description, - template: rewriteClaudePaths(command.body), } if (command.model && command.model !== "inherit") { - entry.model = normalizeModel(command.model) + frontmatter.model = normalizeModel(command.model) } - result[command.name] = entry + const content = formatFrontmatter(frontmatter, rewriteClaudePaths(command.body)) + files.push({ name: command.name, content }) } - return result + return files } function convertMcp(servers: Record<string, ClaudeMcpServer>): Record<string, OpenCodeMcpServer> { @@ -200,7 +202,15 @@ function renderHookHandlers( const wrapped = options.requireError ? ` if (input?.error) {\n${statements.map((line) => ` ${line}`).join("\n")}\n }` : rendered + + // Wrap tool.execute.before handlers in try-catch to prevent a failing hook + // from crashing parallel tool call batches (causes API 400 errors). + // See: https://github.com/EveryInc/compound-engineering-plugin/issues/85 + const isPreToolUse = event === "tool.execute.before" const note = options.note ? ` // ${options.note}\n` : "" + if (isPreToolUse) { + return ` "${event}": async (input) => {\n${note} try {\n ${wrapped}\n } catch (err) {\n console.error("[hook] ${event} error (non-fatal):", err)\n }\n }` + } return ` "${event}": async (input) => {\n${note}${wrapped}\n }` } diff --git a/src/converters/claude-to-qwen.ts b/src/converters/claude-to-qwen.ts new file mode 100644 index 0000000..c07b177 --- /dev/null +++ b/src/converters/claude-to-qwen.ts @@ -0,0 +1,238 @@ +import { formatFrontmatter } from "../utils/frontmatter" +import type { ClaudeAgent, ClaudeCommand, ClaudeMcpServer, ClaudePlugin } from "../types/claude" +import type { + QwenAgentFile, + QwenBundle, + QwenCommandFile, + QwenExtensionConfig, + QwenMcpServer, + QwenSetting, +} from "../types/qwen" + +export type ClaudeToQwenOptions = { + agentMode: "primary" | "subagent" + inferTemperature: boolean +} + +export function convertClaudeToQwen(plugin: ClaudePlugin, options: ClaudeToQwenOptions): QwenBundle { + const agentFiles = plugin.agents.map((agent) => convertAgent(agent, options)) + const cmdFiles = convertCommands(plugin.commands) + const mcp = plugin.mcpServers ? convertMcp(plugin.mcpServers) : undefined + const settings = extractSettings(plugin.mcpServers) + + const config: QwenExtensionConfig = { + name: plugin.manifest.name, + version: plugin.manifest.version || "1.0.0", + commands: "commands", + skills: "skills", + agents: "agents", + } + + if (mcp && Object.keys(mcp).length > 0) { + config.mcpServers = mcp + } + + if (settings && settings.length > 0) { + config.settings = settings + } + + const contextFile = generateContextFile(plugin) + + return { + config, + agents: agentFiles, + commandFiles: cmdFiles, + skillDirs: plugin.skills.map((skill) => ({ sourceDir: skill.sourceDir, name: skill.name })), + contextFile, + } +} + +function convertAgent(agent: ClaudeAgent, options: ClaudeToQwenOptions): QwenAgentFile { + const frontmatter: Record<string, unknown> = { + name: agent.name, + description: agent.description, + } + + if (agent.model && agent.model !== "inherit") { + frontmatter.model = normalizeModel(agent.model) + } + + if (options.inferTemperature) { + const temperature = inferTemperature(agent) + if (temperature !== undefined) { + frontmatter.temperature = temperature + } + } + + // Qwen supports both YAML and Markdown for agents + // Using YAML format for structured config + const content = formatFrontmatter(frontmatter, rewriteQwenPaths(agent.body)) + + return { + name: agent.name, + content, + format: "yaml", + } +} + +function convertCommands(commands: ClaudeCommand[]): QwenCommandFile[] { + const files: QwenCommandFile[] = [] + for (const command of commands) { + if (command.disableModelInvocation) continue + const frontmatter: Record<string, unknown> = { + description: command.description, + } + if (command.model && command.model !== "inherit") { + frontmatter.model = normalizeModel(command.model) + } + if (command.allowedTools && command.allowedTools.length > 0) { + frontmatter.allowedTools = command.allowedTools + } + const content = formatFrontmatter(frontmatter, rewriteQwenPaths(command.body)) + files.push({ name: command.name, content }) + } + return files +} + +function convertMcp(servers: Record<string, ClaudeMcpServer>): Record<string, QwenMcpServer> { + const result: Record<string, QwenMcpServer> = {} + for (const [name, server] of Object.entries(servers)) { + if (server.command) { + result[name] = { + command: server.command, + args: server.args, + env: server.env, + } + continue + } + + if (server.url) { + // Qwen only supports stdio (command-based) MCP servers — skip remote servers + console.warn( + `Warning: Remote MCP server '${name}' (URL: ${server.url}) is not supported in Qwen format. Qwen only supports stdio MCP servers. Skipping.`, + ) + } + } + return result +} + +function extractSettings(mcpServers?: Record<string, ClaudeMcpServer>): QwenSetting[] { + const settings: QwenSetting[] = [] + if (!mcpServers) return settings + + for (const [name, server] of Object.entries(mcpServers)) { + if (server.env) { + for (const [envVar, value] of Object.entries(server.env)) { + // Only add settings for environment variables that look like placeholders + if (value.startsWith("${") || value.includes("YOUR_") || value.includes("XXX")) { + settings.push({ + name: formatSettingName(envVar), + description: `Environment variable for ${name} MCP server`, + envVar, + sensitive: envVar.toLowerCase().includes("key") || envVar.toLowerCase().includes("token") || envVar.toLowerCase().includes("secret"), + }) + } + } + } + } + + return settings +} + +function formatSettingName(envVar: string): string { + return envVar + .replace(/_/g, " ") + .toLowerCase() + .replace(/\b\w/g, (c) => c.toUpperCase()) +} + +function generateContextFile(plugin: ClaudePlugin): string { + const sections: string[] = [] + + // Plugin description + sections.push(`# ${plugin.manifest.name}`) + sections.push("") + if (plugin.manifest.description) { + sections.push(plugin.manifest.description) + sections.push("") + } + + // Agents section + if (plugin.agents.length > 0) { + sections.push("## Agents") + sections.push("") + for (const agent of plugin.agents) { + sections.push(`- **${agent.name}**: ${agent.description || "No description"}`) + } + sections.push("") + } + + // Commands section + if (plugin.commands.length > 0) { + sections.push("## Commands") + sections.push("") + for (const command of plugin.commands) { + if (!command.disableModelInvocation) { + sections.push(`- **/${command.name}**: ${command.description || "No description"}`) + } + } + sections.push("") + } + + // Skills section + if (plugin.skills.length > 0) { + sections.push("## Skills") + sections.push("") + for (const skill of plugin.skills) { + sections.push(`- ${skill.name}`) + } + sections.push("") + } + + return sections.join("\n") +} + +function rewriteQwenPaths(body: string): string { + return body + .replace(/(?<=^|\s|["'`])~\/\.claude\//gm, "~/.qwen/") + .replace(/(?<=^|\s|["'`])\.claude\//gm, ".qwen/") +} + +const CLAUDE_FAMILY_ALIASES: Record<string, string> = { + haiku: "claude-haiku", + sonnet: "claude-sonnet", + opus: "claude-opus", +} + +function normalizeModel(model: string): string { + if (model.includes("/")) return model + if (CLAUDE_FAMILY_ALIASES[model]) { + const resolved = `anthropic/${CLAUDE_FAMILY_ALIASES[model]}` + console.warn( + `Warning: bare model alias "${model}" mapped to "${resolved}".`, + ) + return resolved + } + if (/^claude-/.test(model)) return `anthropic/${model}` + if (/^(gpt-|o1-|o3-)/.test(model)) return `openai/${model}` + if (/^gemini-/.test(model)) return `google/${model}` + if (/^qwen-/.test(model)) return `qwen/${model}` + return `anthropic/${model}` +} + +function inferTemperature(agent: ClaudeAgent): number | undefined { + const sample = `${agent.name} ${agent.description ?? ""}`.toLowerCase() + if (/(review|audit|security|sentinel|oracle|lint|verification|guardian)/.test(sample)) { + return 0.1 + } + if (/(plan|planning|architecture|strategist|analysis|research)/.test(sample)) { + return 0.2 + } + if (/(doc|readme|changelog|editor|writer)/.test(sample)) { + return 0.3 + } + if (/(brainstorm|creative|ideate|design|concept)/.test(sample)) { + return 0.6 + } + return undefined +} diff --git a/src/converters/claude-to-windsurf.ts b/src/converters/claude-to-windsurf.ts new file mode 100644 index 0000000..975af99 --- /dev/null +++ b/src/converters/claude-to-windsurf.ts @@ -0,0 +1,205 @@ +import { formatFrontmatter } from "../utils/frontmatter" +import { findServersWithPotentialSecrets } from "../utils/secrets" +import type { ClaudeAgent, ClaudeCommand, ClaudeMcpServer, ClaudePlugin } from "../types/claude" +import type { WindsurfBundle, WindsurfGeneratedSkill, WindsurfMcpConfig, WindsurfMcpServerEntry, WindsurfWorkflow } from "../types/windsurf" +import type { ClaudeToOpenCodeOptions } from "./claude-to-opencode" + +export type ClaudeToWindsurfOptions = ClaudeToOpenCodeOptions + +const WINDSURF_WORKFLOW_CHAR_LIMIT = 12_000 + +export function convertClaudeToWindsurf( + plugin: ClaudePlugin, + _options: ClaudeToWindsurfOptions, +): WindsurfBundle { + const knownAgentNames = plugin.agents.map((a) => normalizeName(a.name)) + + // Pass-through skills (collected first so agent skill names can deduplicate against them) + const skillDirs = plugin.skills.map((skill) => ({ + name: skill.name, + sourceDir: skill.sourceDir, + })) + + // Convert agents to skills (seed usedNames with pass-through skill names) + const usedSkillNames = new Set<string>(skillDirs.map((s) => s.name)) + const agentSkills = plugin.agents.map((agent) => + convertAgentToSkill(agent, knownAgentNames, usedSkillNames), + ) + + // Convert commands to workflows + const usedCommandNames = new Set<string>() + const commandWorkflows = plugin.commands.map((command) => + convertCommandToWorkflow(command, knownAgentNames, usedCommandNames), + ) + + // Build MCP config + const mcpConfig = buildMcpConfig(plugin.mcpServers) + + // Warn about hooks + if (plugin.hooks && Object.keys(plugin.hooks.hooks).length > 0) { + console.warn( + "Warning: Windsurf has no hooks equivalent. Hooks were skipped during conversion.", + ) + } + + return { agentSkills, commandWorkflows, skillDirs, mcpConfig } +} + +function convertAgentToSkill( + agent: ClaudeAgent, + knownAgentNames: string[], + usedNames: Set<string>, +): WindsurfGeneratedSkill { + const name = uniqueName(normalizeName(agent.name), usedNames) + const description = sanitizeDescription( + agent.description ?? `Converted from Claude agent ${agent.name}`, + ) + + let body = transformContentForWindsurf(agent.body.trim(), knownAgentNames) + if (agent.capabilities && agent.capabilities.length > 0) { + const capabilities = agent.capabilities.map((c) => `- ${c}`).join("\n") + body = `## Capabilities\n${capabilities}\n\n${body}`.trim() + } + if (body.length === 0) { + body = `Instructions converted from the ${agent.name} agent.` + } + + const content = formatFrontmatter({ name, description }, `# ${name}\n\n${body}`) + "\n" + return { name, content } +} + +function convertCommandToWorkflow( + command: ClaudeCommand, + knownAgentNames: string[], + usedNames: Set<string>, +): WindsurfWorkflow { + const name = uniqueName(normalizeName(command.name), usedNames) + const description = sanitizeDescription( + command.description ?? `Converted from Claude command ${command.name}`, + ) + + let body = transformContentForWindsurf(command.body.trim(), knownAgentNames) + if (command.argumentHint) { + body = `> Arguments: ${command.argumentHint}\n\n${body}` + } + if (body.length === 0) { + body = `Instructions converted from the ${command.name} command.` + } + + const frontmatter: Record<string, unknown> = { description } + const fullContent = formatFrontmatter(frontmatter, `# ${name}\n\n${body}`) + if (fullContent.length > WINDSURF_WORKFLOW_CHAR_LIMIT) { + console.warn( + `Warning: Workflow "${name}" is ${fullContent.length} characters (limit: ${WINDSURF_WORKFLOW_CHAR_LIMIT}). It may be truncated by Windsurf.`, + ) + } + + return { name, description, body } +} + +/** + * Transform Claude Code content to Windsurf-compatible content. + * + * 1. Path rewriting: .claude/ -> .windsurf/, ~/.claude/ -> ~/.codeium/windsurf/ + * 2. Slash command refs: /workflows:plan -> /workflows-plan (Windsurf invokes workflows as /{name}) + * 3. @agent-name refs: kept as @agent-name (already Windsurf skill invocation syntax) + * 4. Task agent calls: Task agent-name(args) -> Use the @agent-name skill: args + */ +export function transformContentForWindsurf(body: string, knownAgentNames: string[] = []): string { + let result = body + + // 1. Rewrite paths + result = result.replace(/(?<=^|\s|["'`])~\/\.claude\//gm, "~/.codeium/windsurf/") + result = result.replace(/(?<=^|\s|["'`])\.claude\//gm, ".windsurf/") + + // 2. Slash command refs: /workflows:plan -> /workflows-plan (Windsurf invokes as /{name}) + result = result.replace(/(?<=^|\s)`?\/([a-zA-Z][a-zA-Z0-9_:-]*)`?/gm, (_match, cmdName: string) => { + const workflowName = normalizeName(cmdName) + return `/${workflowName}` + }) + + // 3. @agent-name references: no transformation needed. + // In Windsurf, @skill-name is the native invocation syntax for skills. + // Since agents are now mapped to skills, @agent-name already works correctly. + + // 4. Transform Task agent calls to skill references + const taskPattern = /^(\s*-?\s*)Task\s+([a-z][a-z0-9-]*)\(([^)]+)\)/gm + result = result.replace(taskPattern, (_match, prefix: string, agentName: string, args: string) => { + return `${prefix}Use the @${normalizeName(agentName)} skill: ${args.trim()}` + }) + + return result +} + +function buildMcpConfig(servers?: Record<string, ClaudeMcpServer>): WindsurfMcpConfig | null { + if (!servers || Object.keys(servers).length === 0) return null + + const result: Record<string, WindsurfMcpServerEntry> = {} + for (const [name, server] of Object.entries(servers)) { + if (server.command) { + // stdio transport + const entry: WindsurfMcpServerEntry = { command: server.command } + if (server.args?.length) entry.args = server.args + if (server.env && Object.keys(server.env).length > 0) entry.env = server.env + result[name] = entry + } else if (server.url) { + // HTTP/SSE transport + const entry: WindsurfMcpServerEntry = { serverUrl: server.url } + if (server.headers && Object.keys(server.headers).length > 0) entry.headers = server.headers + if (server.env && Object.keys(server.env).length > 0) entry.env = server.env + result[name] = entry + } else { + console.warn(`Warning: MCP server "${name}" has no command or URL. Skipping.`) + continue + } + } + + if (Object.keys(result).length === 0) return null + + // Warn about secrets (don't redact — they're needed for the config to work) + const flagged = findServersWithPotentialSecrets(result) + if (flagged.length > 0) { + console.warn( + `Warning: MCP servers contain env vars that may include secrets: ${flagged.join(", ")}.\n` + + " These will be written to mcp_config.json. Review before sharing the config file.", + ) + } + + return { mcpServers: result } +} + +export function normalizeName(value: string): string { + const trimmed = value.trim() + if (!trimmed) return "item" + let normalized = trimmed + .toLowerCase() + .replace(/[\\/]+/g, "-") + .replace(/[:\s]+/g, "-") + .replace(/[^a-z0-9_-]+/g, "-") + .replace(/-+/g, "-") + .replace(/^-+|-+$/g, "") + + if (normalized.length === 0 || !/^[a-z]/.test(normalized)) { + return "item" + } + + return normalized +} + +function sanitizeDescription(value: string): string { + return value.replace(/\s+/g, " ").trim() +} + +function uniqueName(base: string, used: Set<string>): string { + if (!used.has(base)) { + used.add(base) + return base + } + let index = 2 + while (used.has(`${base}-${index}`)) { + index += 1 + } + const name = `${base}-${index}` + used.add(name) + return name +} diff --git a/src/index.ts b/src/index.ts index bfd0b72..2e46e29 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,5 +1,6 @@ #!/usr/bin/env bun import { defineCommand, runMain } from "citty" +import packageJson from "../package.json" import convert from "./commands/convert" import install from "./commands/install" import listCommand from "./commands/list" @@ -8,7 +9,7 @@ import sync from "./commands/sync" const main = defineCommand({ meta: { name: "compound-plugin", - version: "0.1.0", + version: packageJson.version, description: "Convert Claude Code plugins into other agent formats", }, subCommands: { diff --git a/src/parsers/claude-home.ts b/src/parsers/claude-home.ts index c8f1818..efc1732 100644 --- a/src/parsers/claude-home.ts +++ b/src/parsers/claude-home.ts @@ -1,22 +1,26 @@ import path from "path" import os from "os" import fs from "fs/promises" -import type { ClaudeSkill, ClaudeMcpServer } from "../types/claude" +import { parseFrontmatter } from "../utils/frontmatter" +import { walkFiles } from "../utils/files" +import type { ClaudeCommand, ClaudeSkill, ClaudeMcpServer } from "../types/claude" export interface ClaudeHomeConfig { skills: ClaudeSkill[] + commands?: ClaudeCommand[] mcpServers: Record<string, ClaudeMcpServer> } export async function loadClaudeHome(claudeHome?: string): Promise<ClaudeHomeConfig> { const home = claudeHome ?? path.join(os.homedir(), ".claude") - const [skills, mcpServers] = await Promise.all([ + const [skills, commands, mcpServers] = await Promise.all([ loadPersonalSkills(path.join(home, "skills")), + loadPersonalCommands(path.join(home, "commands")), loadSettingsMcp(path.join(home, "settings.json")), ]) - return { skills, mcpServers } + return { skills, commands, mcpServers } } async function loadPersonalSkills(skillsDir: string): Promise<ClaudeSkill[]> { @@ -63,3 +67,51 @@ async function loadSettingsMcp( return {} // File doesn't exist or invalid JSON } } + +async function loadPersonalCommands(commandsDir: string): Promise<ClaudeCommand[]> { + try { + const files = (await walkFiles(commandsDir)) + .filter((file) => file.endsWith(".md")) + .sort() + + const commands: ClaudeCommand[] = [] + for (const file of files) { + const raw = await fs.readFile(file, "utf8") + const { data, body } = parseFrontmatter(raw) + commands.push({ + name: typeof data.name === "string" ? data.name : deriveCommandName(commandsDir, file), + description: data.description as string | undefined, + argumentHint: data["argument-hint"] as string | undefined, + model: data.model as string | undefined, + allowedTools: parseAllowedTools(data["allowed-tools"]), + disableModelInvocation: data["disable-model-invocation"] === true ? true : undefined, + body: body.trim(), + sourcePath: file, + }) + } + + return commands + } catch { + return [] + } +} + +function deriveCommandName(commandsDir: string, filePath: string): string { + const relative = path.relative(commandsDir, filePath) + const withoutExt = relative.replace(/\.md$/i, "") + return withoutExt.split(path.sep).join(":") +} + +function parseAllowedTools(value: unknown): string[] | undefined { + if (!value) return undefined + if (Array.isArray(value)) { + return value.map((item) => String(item)) + } + if (typeof value === "string") { + return value + .split(/,/) + .map((item) => item.trim()) + .filter(Boolean) + } + return undefined +} diff --git a/src/sync/codex.ts b/src/sync/codex.ts index c0414bd..b7b894e 100644 --- a/src/sync/codex.ts +++ b/src/sync/codex.ts @@ -1,31 +1,29 @@ import fs from "fs/promises" import path from "path" import type { ClaudeHomeConfig } from "../parsers/claude-home" -import type { ClaudeMcpServer } from "../types/claude" -import { forceSymlink, isValidSkillName } from "../utils/symlink" +import { renderCodexConfig } from "../targets/codex" +import { writeTextSecure } from "../utils/files" +import { syncCodexCommands } from "./commands" +import { syncSkills } from "./skills" + +const CURRENT_START_MARKER = "# BEGIN compound-plugin Claude Code MCP" +const CURRENT_END_MARKER = "# END compound-plugin Claude Code MCP" +const LEGACY_MARKER = "# MCP servers synced from Claude Code" export async function syncToCodex( config: ClaudeHomeConfig, outputRoot: string, ): Promise<void> { - // Ensure output directories exist - const skillsDir = path.join(outputRoot, "skills") - await fs.mkdir(skillsDir, { recursive: true }) - - // Symlink skills (with validation) - for (const skill of config.skills) { - if (!isValidSkillName(skill.name)) { - console.warn(`Skipping skill with invalid name: ${skill.name}`) - continue - } - const target = path.join(skillsDir, skill.name) - await forceSymlink(skill.sourceDir, target) - } + await syncSkills(config.skills, path.join(outputRoot, "skills")) + await syncCodexCommands(config, outputRoot) // Write MCP servers to config.toml (TOML format) if (Object.keys(config.mcpServers).length > 0) { const configPath = path.join(outputRoot, "config.toml") - const mcpToml = convertMcpForCodex(config.mcpServers) + const mcpToml = renderCodexConfig(config.mcpServers) + if (!mcpToml) { + return + } // Read existing config and merge idempotently let existingContent = "" @@ -37,56 +35,34 @@ export async function syncToCodex( } } - // Remove any existing Claude Code MCP section to make idempotent - const marker = "# MCP servers synced from Claude Code" - const markerIndex = existingContent.indexOf(marker) - if (markerIndex !== -1) { - existingContent = existingContent.slice(0, markerIndex).trimEnd() - } + const managedBlock = [ + CURRENT_START_MARKER, + mcpToml.trim(), + CURRENT_END_MARKER, + "", + ].join("\n") - const newContent = existingContent - ? existingContent + "\n\n" + marker + "\n" + mcpToml - : "# Codex config - synced from Claude Code\n\n" + mcpToml + const withoutCurrentBlock = existingContent.replace( + new RegExp( + `${escapeForRegex(CURRENT_START_MARKER)}[\\s\\S]*?${escapeForRegex(CURRENT_END_MARKER)}\\n?`, + "g", + ), + "", + ).trimEnd() - await fs.writeFile(configPath, newContent, { mode: 0o600 }) + const legacyMarkerIndex = withoutCurrentBlock.indexOf(LEGACY_MARKER) + const cleaned = legacyMarkerIndex === -1 + ? withoutCurrentBlock + : withoutCurrentBlock.slice(0, legacyMarkerIndex).trimEnd() + + const newContent = cleaned + ? `${cleaned}\n\n${managedBlock}` + : `${managedBlock}` + + await writeTextSecure(configPath, newContent) } } -/** Escape a string for TOML double-quoted strings */ -function escapeTomlString(str: string): string { - return str - .replace(/\\/g, "\\\\") - .replace(/"/g, '\\"') - .replace(/\n/g, "\\n") - .replace(/\r/g, "\\r") - .replace(/\t/g, "\\t") -} - -function convertMcpForCodex(servers: Record<string, ClaudeMcpServer>): string { - const sections: string[] = [] - - for (const [name, server] of Object.entries(servers)) { - if (!server.command) continue - - const lines: string[] = [] - lines.push(`[mcp_servers.${name}]`) - lines.push(`command = "${escapeTomlString(server.command)}"`) - - if (server.args && server.args.length > 0) { - const argsStr = server.args.map((arg) => `"${escapeTomlString(arg)}"`).join(", ") - lines.push(`args = [${argsStr}]`) - } - - if (server.env && Object.keys(server.env).length > 0) { - lines.push("") - lines.push(`[mcp_servers.${name}.env]`) - for (const [key, value] of Object.entries(server.env)) { - lines.push(`${key} = "${escapeTomlString(value)}"`) - } - } - - sections.push(lines.join("\n")) - } - - return sections.join("\n\n") + "\n" +function escapeForRegex(value: string): string { + return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&") } diff --git a/src/sync/commands.ts b/src/sync/commands.ts new file mode 100644 index 0000000..03ca3fb --- /dev/null +++ b/src/sync/commands.ts @@ -0,0 +1,198 @@ +import path from "path" +import type { ClaudeHomeConfig } from "../parsers/claude-home" +import type { ClaudePlugin } from "../types/claude" +import { backupFile, writeText } from "../utils/files" +import { convertClaudeToCodex } from "../converters/claude-to-codex" +import { convertClaudeToCopilot } from "../converters/claude-to-copilot" +import { convertClaudeToDroid } from "../converters/claude-to-droid" +import { convertClaudeToGemini } from "../converters/claude-to-gemini" +import { convertClaudeToKiro } from "../converters/claude-to-kiro" +import { convertClaudeToOpenCode, type ClaudeToOpenCodeOptions } from "../converters/claude-to-opencode" +import { convertClaudeToPi } from "../converters/claude-to-pi" +import { convertClaudeToQwen, type ClaudeToQwenOptions } from "../converters/claude-to-qwen" +import { convertClaudeToWindsurf } from "../converters/claude-to-windsurf" +import { writeWindsurfBundle } from "../targets/windsurf" + +type WindsurfSyncScope = "global" | "workspace" + +const HOME_SYNC_PLUGIN_ROOT = path.join(process.cwd(), ".compound-sync-home") + +const DEFAULT_SYNC_OPTIONS: ClaudeToOpenCodeOptions = { + agentMode: "subagent", + inferTemperature: false, + permissions: "none", +} + +const DEFAULT_QWEN_SYNC_OPTIONS: ClaudeToQwenOptions = { + agentMode: "subagent", + inferTemperature: false, +} + +function hasCommands(config: ClaudeHomeConfig): boolean { + return (config.commands?.length ?? 0) > 0 +} + +function buildClaudeHomePlugin(config: ClaudeHomeConfig): ClaudePlugin { + return { + root: HOME_SYNC_PLUGIN_ROOT, + manifest: { + name: "claude-home", + version: "1.0.0", + description: "Personal Claude Code home config", + }, + agents: [], + commands: config.commands ?? [], + skills: config.skills, + mcpServers: undefined, + } +} + +export async function syncOpenCodeCommands( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + if (!hasCommands(config)) return + + const plugin = buildClaudeHomePlugin(config) + const bundle = convertClaudeToOpenCode(plugin, DEFAULT_SYNC_OPTIONS) + + for (const commandFile of bundle.commandFiles) { + const commandPath = path.join(outputRoot, "commands", `${commandFile.name}.md`) + const backupPath = await backupFile(commandPath) + if (backupPath) { + console.log(`Backed up existing command file to ${backupPath}`) + } + await writeText(commandPath, commandFile.content + "\n") + } +} + +export async function syncCodexCommands( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + if (!hasCommands(config)) return + + const plugin = buildClaudeHomePlugin(config) + const bundle = convertClaudeToCodex(plugin, DEFAULT_SYNC_OPTIONS) + for (const prompt of bundle.prompts) { + await writeText(path.join(outputRoot, "prompts", `${prompt.name}.md`), prompt.content + "\n") + } + for (const skill of bundle.generatedSkills) { + await writeText(path.join(outputRoot, "skills", skill.name, "SKILL.md"), skill.content + "\n") + } +} + +export async function syncPiCommands( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + if (!hasCommands(config)) return + + const plugin = buildClaudeHomePlugin(config) + const bundle = convertClaudeToPi(plugin, DEFAULT_SYNC_OPTIONS) + for (const prompt of bundle.prompts) { + await writeText(path.join(outputRoot, "prompts", `${prompt.name}.md`), prompt.content + "\n") + } + for (const extension of bundle.extensions) { + await writeText(path.join(outputRoot, "extensions", extension.name), extension.content + "\n") + } +} + +export async function syncDroidCommands( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + if (!hasCommands(config)) return + + const plugin = buildClaudeHomePlugin(config) + const bundle = convertClaudeToDroid(plugin, DEFAULT_SYNC_OPTIONS) + for (const command of bundle.commands) { + await writeText(path.join(outputRoot, "commands", `${command.name}.md`), command.content + "\n") + } +} + +export async function syncCopilotCommands( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + if (!hasCommands(config)) return + + const plugin = buildClaudeHomePlugin(config) + const bundle = convertClaudeToCopilot(plugin, DEFAULT_SYNC_OPTIONS) + + for (const skill of bundle.generatedSkills) { + await writeText(path.join(outputRoot, "skills", skill.name, "SKILL.md"), skill.content + "\n") + } +} + +export async function syncGeminiCommands( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + if (!hasCommands(config)) return + + const plugin = buildClaudeHomePlugin(config) + const bundle = convertClaudeToGemini(plugin, DEFAULT_SYNC_OPTIONS) + for (const command of bundle.commands) { + await writeText(path.join(outputRoot, "commands", `${command.name}.toml`), command.content + "\n") + } +} + +export async function syncKiroCommands( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + if (!hasCommands(config)) return + + const plugin = buildClaudeHomePlugin(config) + const bundle = convertClaudeToKiro(plugin, DEFAULT_SYNC_OPTIONS) + for (const skill of bundle.generatedSkills) { + await writeText(path.join(outputRoot, "skills", skill.name, "SKILL.md"), skill.content + "\n") + } +} + +export async function syncWindsurfCommands( + config: ClaudeHomeConfig, + outputRoot: string, + scope: WindsurfSyncScope = "global", +): Promise<void> { + if (!hasCommands(config)) return + + const plugin = buildClaudeHomePlugin(config) + const bundle = convertClaudeToWindsurf(plugin, DEFAULT_SYNC_OPTIONS) + await writeWindsurfBundle(outputRoot, { + agentSkills: [], + commandWorkflows: bundle.commandWorkflows, + skillDirs: [], + mcpConfig: null, + }, scope) +} + +export async function syncQwenCommands( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + if (!hasCommands(config)) return + + const plugin = buildClaudeHomePlugin(config) + const bundle = convertClaudeToQwen(plugin, DEFAULT_QWEN_SYNC_OPTIONS) + + for (const commandFile of bundle.commandFiles) { + const parts = commandFile.name.split(":") + if (parts.length > 1) { + const nestedDir = path.join(outputRoot, "commands", ...parts.slice(0, -1)) + await writeText(path.join(nestedDir, `${parts[parts.length - 1]}.md`), commandFile.content + "\n") + continue + } + + await writeText(path.join(outputRoot, "commands", `${commandFile.name}.md`), commandFile.content + "\n") + } +} + +export function warnUnsupportedOpenClawCommands(config: ClaudeHomeConfig): void { + if (!hasCommands(config)) return + + console.warn( + "Warning: OpenClaw personal command sync is skipped because this sync target currently has no documented user-level command surface.", + ) +} diff --git a/src/sync/copilot.ts b/src/sync/copilot.ts new file mode 100644 index 0000000..51a9b06 --- /dev/null +++ b/src/sync/copilot.ts @@ -0,0 +1,78 @@ +import path from "path" +import type { ClaudeHomeConfig } from "../parsers/claude-home" +import type { ClaudeMcpServer } from "../types/claude" +import { syncCopilotCommands } from "./commands" +import { mergeJsonConfigAtKey } from "./json-config" +import { hasExplicitSseTransport } from "./mcp-transports" +import { syncSkills } from "./skills" + +type CopilotMcpServer = { + type: "local" | "http" | "sse" + command?: string + args?: string[] + url?: string + tools: string[] + env?: Record<string, string> + headers?: Record<string, string> +} + +type CopilotMcpConfig = { + mcpServers: Record<string, CopilotMcpServer> +} + +export async function syncToCopilot( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + await syncSkills(config.skills, path.join(outputRoot, "skills")) + await syncCopilotCommands(config, outputRoot) + + if (Object.keys(config.mcpServers).length > 0) { + const mcpPath = path.join(outputRoot, "mcp-config.json") + const converted = convertMcpForCopilot(config.mcpServers) + await mergeJsonConfigAtKey({ + configPath: mcpPath, + key: "mcpServers", + incoming: converted, + }) + } +} + +function convertMcpForCopilot( + servers: Record<string, ClaudeMcpServer>, +): Record<string, CopilotMcpServer> { + const result: Record<string, CopilotMcpServer> = {} + for (const [name, server] of Object.entries(servers)) { + const entry: CopilotMcpServer = { + type: server.command ? "local" : hasExplicitSseTransport(server) ? "sse" : "http", + tools: ["*"], + } + + if (server.command) { + entry.command = server.command + if (server.args && server.args.length > 0) entry.args = server.args + } else if (server.url) { + entry.url = server.url + if (server.headers && Object.keys(server.headers).length > 0) entry.headers = server.headers + } + + if (server.env && Object.keys(server.env).length > 0) { + entry.env = prefixEnvVars(server.env) + } + + result[name] = entry + } + return result +} + +function prefixEnvVars(env: Record<string, string>): Record<string, string> { + const result: Record<string, string> = {} + for (const [key, value] of Object.entries(env)) { + if (key.startsWith("COPILOT_MCP_")) { + result[key] = value + } else { + result[`COPILOT_MCP_${key}`] = value + } + } + return result +} diff --git a/src/sync/cursor.ts b/src/sync/cursor.ts deleted file mode 100644 index 32f3aa4..0000000 --- a/src/sync/cursor.ts +++ /dev/null @@ -1,78 +0,0 @@ -import fs from "fs/promises" -import path from "path" -import type { ClaudeHomeConfig } from "../parsers/claude-home" -import type { ClaudeMcpServer } from "../types/claude" -import { forceSymlink, isValidSkillName } from "../utils/symlink" - -type CursorMcpServer = { - command?: string - args?: string[] - url?: string - env?: Record<string, string> - headers?: Record<string, string> -} - -type CursorMcpConfig = { - mcpServers: Record<string, CursorMcpServer> -} - -export async function syncToCursor( - config: ClaudeHomeConfig, - outputRoot: string, -): Promise<void> { - const skillsDir = path.join(outputRoot, "skills") - await fs.mkdir(skillsDir, { recursive: true }) - - for (const skill of config.skills) { - if (!isValidSkillName(skill.name)) { - console.warn(`Skipping skill with invalid name: ${skill.name}`) - continue - } - const target = path.join(skillsDir, skill.name) - await forceSymlink(skill.sourceDir, target) - } - - if (Object.keys(config.mcpServers).length > 0) { - const mcpPath = path.join(outputRoot, "mcp.json") - const existing = await readJsonSafe(mcpPath) - const converted = convertMcpForCursor(config.mcpServers) - const merged: CursorMcpConfig = { - mcpServers: { - ...(existing.mcpServers ?? {}), - ...converted, - }, - } - await fs.writeFile(mcpPath, JSON.stringify(merged, null, 2), { mode: 0o600 }) - } -} - -async function readJsonSafe(filePath: string): Promise<Partial<CursorMcpConfig>> { - try { - const content = await fs.readFile(filePath, "utf-8") - return JSON.parse(content) as Partial<CursorMcpConfig> - } catch (err) { - if ((err as NodeJS.ErrnoException).code === "ENOENT") { - return {} - } - throw err - } -} - -function convertMcpForCursor( - servers: Record<string, ClaudeMcpServer>, -): Record<string, CursorMcpServer> { - const result: Record<string, CursorMcpServer> = {} - for (const [name, server] of Object.entries(servers)) { - const entry: CursorMcpServer = {} - if (server.command) { - entry.command = server.command - if (server.args && server.args.length > 0) entry.args = server.args - if (server.env && Object.keys(server.env).length > 0) entry.env = server.env - } else if (server.url) { - entry.url = server.url - if (server.headers && Object.keys(server.headers).length > 0) entry.headers = server.headers - } - result[name] = entry - } - return result -} diff --git a/src/sync/droid.ts b/src/sync/droid.ts index 1f55968..aa041c2 100644 --- a/src/sync/droid.ts +++ b/src/sync/droid.ts @@ -1,21 +1,62 @@ -import fs from "fs/promises" import path from "path" import type { ClaudeHomeConfig } from "../parsers/claude-home" -import { forceSymlink, isValidSkillName } from "../utils/symlink" +import type { ClaudeMcpServer } from "../types/claude" +import { syncDroidCommands } from "./commands" +import { mergeJsonConfigAtKey } from "./json-config" +import { syncSkills } from "./skills" + +type DroidMcpServer = { + type: "stdio" | "http" + command?: string + args?: string[] + env?: Record<string, string> + url?: string + headers?: Record<string, string> + disabled: boolean +} export async function syncToDroid( config: ClaudeHomeConfig, outputRoot: string, ): Promise<void> { - const skillsDir = path.join(outputRoot, "skills") - await fs.mkdir(skillsDir, { recursive: true }) + await syncSkills(config.skills, path.join(outputRoot, "skills")) + await syncDroidCommands(config, outputRoot) - for (const skill of config.skills) { - if (!isValidSkillName(skill.name)) { - console.warn(`Skipping skill with invalid name: ${skill.name}`) - continue - } - const target = path.join(skillsDir, skill.name) - await forceSymlink(skill.sourceDir, target) + if (Object.keys(config.mcpServers).length > 0) { + await mergeJsonConfigAtKey({ + configPath: path.join(outputRoot, "mcp.json"), + key: "mcpServers", + incoming: convertMcpForDroid(config.mcpServers), + }) } } + +function convertMcpForDroid( + servers: Record<string, ClaudeMcpServer>, +): Record<string, DroidMcpServer> { + const result: Record<string, DroidMcpServer> = {} + + for (const [name, server] of Object.entries(servers)) { + if (server.command) { + result[name] = { + type: "stdio", + command: server.command, + args: server.args, + env: server.env, + disabled: false, + } + continue + } + + if (server.url) { + result[name] = { + type: "http", + url: server.url, + headers: server.headers, + disabled: false, + } + } + } + + return result +} diff --git a/src/sync/gemini.ts b/src/sync/gemini.ts new file mode 100644 index 0000000..c1c5546 --- /dev/null +++ b/src/sync/gemini.ts @@ -0,0 +1,135 @@ +import fs from "fs/promises" +import path from "path" +import type { ClaudeHomeConfig } from "../parsers/claude-home" +import type { ClaudeMcpServer } from "../types/claude" +import { syncGeminiCommands } from "./commands" +import { mergeJsonConfigAtKey } from "./json-config" +import { syncSkills } from "./skills" + +type GeminiMcpServer = { + command?: string + args?: string[] + url?: string + env?: Record<string, string> + headers?: Record<string, string> +} + +export async function syncToGemini( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + await syncGeminiSkills(config.skills, outputRoot) + await syncGeminiCommands(config, outputRoot) + + if (Object.keys(config.mcpServers).length > 0) { + const settingsPath = path.join(outputRoot, "settings.json") + const converted = convertMcpForGemini(config.mcpServers) + await mergeJsonConfigAtKey({ + configPath: settingsPath, + key: "mcpServers", + incoming: converted, + }) + } +} + +async function syncGeminiSkills( + skills: ClaudeHomeConfig["skills"], + outputRoot: string, +): Promise<void> { + const skillsDir = path.join(outputRoot, "skills") + const sharedSkillsDir = getGeminiSharedSkillsDir(outputRoot) + + if (!sharedSkillsDir) { + await syncSkills(skills, skillsDir) + return + } + + const canonicalSharedSkillsDir = await canonicalizePath(sharedSkillsDir) + const mirroredSkills: ClaudeHomeConfig["skills"] = [] + const directSkills: ClaudeHomeConfig["skills"] = [] + + for (const skill of skills) { + if (await isWithinDir(skill.sourceDir, canonicalSharedSkillsDir)) { + mirroredSkills.push(skill) + } else { + directSkills.push(skill) + } + } + + await removeGeminiMirrorConflicts(mirroredSkills, skillsDir, canonicalSharedSkillsDir) + await syncSkills(directSkills, skillsDir) +} + +function getGeminiSharedSkillsDir(outputRoot: string): string | null { + if (path.basename(outputRoot) !== ".gemini") return null + return path.join(path.dirname(outputRoot), ".agents", "skills") +} + +async function canonicalizePath(targetPath: string): Promise<string> { + try { + return await fs.realpath(targetPath) + } catch { + return path.resolve(targetPath) + } +} + +async function isWithinDir(candidate: string, canonicalParentDir: string): Promise<boolean> { + const resolvedCandidate = await canonicalizePath(candidate) + return resolvedCandidate === canonicalParentDir + || resolvedCandidate.startsWith(`${canonicalParentDir}${path.sep}`) +} + +async function removeGeminiMirrorConflicts( + skills: ClaudeHomeConfig["skills"], + skillsDir: string, + sharedSkillsDir: string, +): Promise<void> { + for (const skill of skills) { + const duplicatePath = path.join(skillsDir, skill.name) + + let stat + try { + stat = await fs.lstat(duplicatePath) + } catch (error) { + if ((error as NodeJS.ErrnoException).code === "ENOENT") { + continue + } + throw error + } + + if (!stat.isSymbolicLink()) { + continue + } + + let resolvedTarget: string + try { + resolvedTarget = await canonicalizePath(duplicatePath) + } catch { + continue + } + + if (resolvedTarget === await canonicalizePath(skill.sourceDir) + || await isWithinDir(resolvedTarget, sharedSkillsDir)) { + await fs.unlink(duplicatePath) + } + } +} + +function convertMcpForGemini( + servers: Record<string, ClaudeMcpServer>, +): Record<string, GeminiMcpServer> { + const result: Record<string, GeminiMcpServer> = {} + for (const [name, server] of Object.entries(servers)) { + const entry: GeminiMcpServer = {} + if (server.command) { + entry.command = server.command + if (server.args && server.args.length > 0) entry.args = server.args + if (server.env && Object.keys(server.env).length > 0) entry.env = server.env + } else if (server.url) { + entry.url = server.url + if (server.headers && Object.keys(server.headers).length > 0) entry.headers = server.headers + } + result[name] = entry + } + return result +} diff --git a/src/sync/json-config.ts b/src/sync/json-config.ts new file mode 100644 index 0000000..c12780f --- /dev/null +++ b/src/sync/json-config.ts @@ -0,0 +1,47 @@ +import path from "path" +import { pathExists, readJson, writeJsonSecure } from "../utils/files" + +type JsonObject = Record<string, unknown> + +function isJsonObject(value: unknown): value is JsonObject { + return typeof value === "object" && value !== null && !Array.isArray(value) +} + +export async function mergeJsonConfigAtKey(options: { + configPath: string + key: string + incoming: Record<string, unknown> +}): Promise<void> { + const { configPath, key, incoming } = options + const existing = await readJsonObjectSafe(configPath) + const existingEntries = isJsonObject(existing[key]) ? existing[key] : {} + const merged = { + ...existing, + [key]: { + ...existingEntries, + ...incoming, // incoming plugin entries overwrite same-named servers + }, + } + + await writeJsonSecure(configPath, merged) +} + +async function readJsonObjectSafe(configPath: string): Promise<JsonObject> { + if (!(await pathExists(configPath))) { + return {} + } + + try { + const parsed = await readJson<unknown>(configPath) + if (isJsonObject(parsed)) { + return parsed + } + } catch { + // Fall through to warning and replacement. + } + + console.warn( + `Warning: existing ${path.basename(configPath)} could not be parsed and will be replaced.`, + ) + return {} +} diff --git a/src/sync/kiro.ts b/src/sync/kiro.ts new file mode 100644 index 0000000..d95807f --- /dev/null +++ b/src/sync/kiro.ts @@ -0,0 +1,49 @@ +import path from "path" +import type { ClaudeHomeConfig } from "../parsers/claude-home" +import type { ClaudeMcpServer } from "../types/claude" +import type { KiroMcpServer } from "../types/kiro" +import { syncKiroCommands } from "./commands" +import { mergeJsonConfigAtKey } from "./json-config" +import { syncSkills } from "./skills" + +export async function syncToKiro( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + await syncSkills(config.skills, path.join(outputRoot, "skills")) + await syncKiroCommands(config, outputRoot) + + if (Object.keys(config.mcpServers).length > 0) { + await mergeJsonConfigAtKey({ + configPath: path.join(outputRoot, "settings", "mcp.json"), + key: "mcpServers", + incoming: convertMcpForKiro(config.mcpServers), + }) + } +} + +function convertMcpForKiro( + servers: Record<string, ClaudeMcpServer>, +): Record<string, KiroMcpServer> { + const result: Record<string, KiroMcpServer> = {} + + for (const [name, server] of Object.entries(servers)) { + if (server.command) { + result[name] = { + command: server.command, + args: server.args, + env: server.env, + } + continue + } + + if (server.url) { + result[name] = { + url: server.url, + headers: server.headers, + } + } + } + + return result +} diff --git a/src/sync/mcp-transports.ts b/src/sync/mcp-transports.ts new file mode 100644 index 0000000..6a172e8 --- /dev/null +++ b/src/sync/mcp-transports.ts @@ -0,0 +1,19 @@ +import type { ClaudeMcpServer } from "../types/claude" + +function getTransportType(server: ClaudeMcpServer): string { + return server.type?.toLowerCase().trim() ?? "" +} + +export function hasExplicitSseTransport(server: ClaudeMcpServer): boolean { + const type = getTransportType(server) + return type.includes("sse") +} + +export function hasExplicitHttpTransport(server: ClaudeMcpServer): boolean { + const type = getTransportType(server) + return type.includes("http") || type.includes("streamable") +} + +export function hasExplicitRemoteTransport(server: ClaudeMcpServer): boolean { + return hasExplicitSseTransport(server) || hasExplicitHttpTransport(server) +} diff --git a/src/sync/openclaw.ts b/src/sync/openclaw.ts new file mode 100644 index 0000000..450b3ec --- /dev/null +++ b/src/sync/openclaw.ts @@ -0,0 +1,18 @@ +import path from "path" +import type { ClaudeHomeConfig } from "../parsers/claude-home" +import { warnUnsupportedOpenClawCommands } from "./commands" +import { syncSkills } from "./skills" + +export async function syncToOpenClaw( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + await syncSkills(config.skills, path.join(outputRoot, "skills")) + warnUnsupportedOpenClawCommands(config) + + if (Object.keys(config.mcpServers).length > 0) { + console.warn( + "Warning: OpenClaw MCP sync is skipped because the current official OpenClaw docs do not clearly document an MCP server config contract.", + ) + } +} diff --git a/src/sync/opencode.ts b/src/sync/opencode.ts index e61e638..c5781e5 100644 --- a/src/sync/opencode.ts +++ b/src/sync/opencode.ts @@ -1,47 +1,27 @@ -import fs from "fs/promises" import path from "path" import type { ClaudeHomeConfig } from "../parsers/claude-home" import type { ClaudeMcpServer } from "../types/claude" import type { OpenCodeMcpServer } from "../types/opencode" -import { forceSymlink, isValidSkillName } from "../utils/symlink" +import { syncOpenCodeCommands } from "./commands" +import { mergeJsonConfigAtKey } from "./json-config" +import { syncSkills } from "./skills" export async function syncToOpenCode( config: ClaudeHomeConfig, outputRoot: string, ): Promise<void> { - // Ensure output directories exist - const skillsDir = path.join(outputRoot, "skills") - await fs.mkdir(skillsDir, { recursive: true }) - - // Symlink skills (with validation) - for (const skill of config.skills) { - if (!isValidSkillName(skill.name)) { - console.warn(`Skipping skill with invalid name: ${skill.name}`) - continue - } - const target = path.join(skillsDir, skill.name) - await forceSymlink(skill.sourceDir, target) - } + await syncSkills(config.skills, path.join(outputRoot, "skills")) + await syncOpenCodeCommands(config, outputRoot) // Merge MCP servers into opencode.json if (Object.keys(config.mcpServers).length > 0) { const configPath = path.join(outputRoot, "opencode.json") - const existing = await readJsonSafe(configPath) const mcpConfig = convertMcpForOpenCode(config.mcpServers) - existing.mcp = { ...(existing.mcp ?? {}), ...mcpConfig } - await fs.writeFile(configPath, JSON.stringify(existing, null, 2), { mode: 0o600 }) - } -} - -async function readJsonSafe(filePath: string): Promise<Record<string, unknown>> { - try { - const content = await fs.readFile(filePath, "utf-8") - return JSON.parse(content) as Record<string, unknown> - } catch (err) { - if ((err as NodeJS.ErrnoException).code === "ENOENT") { - return {} - } - throw err + await mergeJsonConfigAtKey({ + configPath, + key: "mcp", + incoming: mcpConfig, + }) } } diff --git a/src/sync/pi.ts b/src/sync/pi.ts index 3f6d0f6..9bd0076 100644 --- a/src/sync/pi.ts +++ b/src/sync/pi.ts @@ -1,8 +1,10 @@ -import fs from "fs/promises" import path from "path" import type { ClaudeHomeConfig } from "../parsers/claude-home" import type { ClaudeMcpServer } from "../types/claude" -import { forceSymlink, isValidSkillName } from "../utils/symlink" +import { ensureDir } from "../utils/files" +import { syncPiCommands } from "./commands" +import { mergeJsonConfigAtKey } from "./json-config" +import { syncSkills } from "./skills" type McporterServer = { baseUrl?: string @@ -20,45 +22,19 @@ export async function syncToPi( config: ClaudeHomeConfig, outputRoot: string, ): Promise<void> { - const skillsDir = path.join(outputRoot, "skills") const mcporterPath = path.join(outputRoot, "compound-engineering", "mcporter.json") - await fs.mkdir(skillsDir, { recursive: true }) - - for (const skill of config.skills) { - if (!isValidSkillName(skill.name)) { - console.warn(`Skipping skill with invalid name: ${skill.name}`) - continue - } - const target = path.join(skillsDir, skill.name) - await forceSymlink(skill.sourceDir, target) - } + await syncSkills(config.skills, path.join(outputRoot, "skills")) + await syncPiCommands(config, outputRoot) if (Object.keys(config.mcpServers).length > 0) { - await fs.mkdir(path.dirname(mcporterPath), { recursive: true }) - - const existing = await readJsonSafe(mcporterPath) + await ensureDir(path.dirname(mcporterPath)) const converted = convertMcpToMcporter(config.mcpServers) - const merged: McporterConfig = { - mcpServers: { - ...(existing.mcpServers ?? {}), - ...converted.mcpServers, - }, - } - - await fs.writeFile(mcporterPath, JSON.stringify(merged, null, 2), { mode: 0o600 }) - } -} - -async function readJsonSafe(filePath: string): Promise<Partial<McporterConfig>> { - try { - const content = await fs.readFile(filePath, "utf-8") - return JSON.parse(content) as Partial<McporterConfig> - } catch (err) { - if ((err as NodeJS.ErrnoException).code === "ENOENT") { - return {} - } - throw err + await mergeJsonConfigAtKey({ + configPath: mcporterPath, + key: "mcpServers", + incoming: converted.mcpServers, + }) } } diff --git a/src/sync/qwen.ts b/src/sync/qwen.ts new file mode 100644 index 0000000..99bc3a8 --- /dev/null +++ b/src/sync/qwen.ts @@ -0,0 +1,66 @@ +import path from "path" +import type { ClaudeHomeConfig } from "../parsers/claude-home" +import type { ClaudeMcpServer } from "../types/claude" +import type { QwenMcpServer } from "../types/qwen" +import { syncQwenCommands } from "./commands" +import { mergeJsonConfigAtKey } from "./json-config" +import { hasExplicitRemoteTransport, hasExplicitSseTransport } from "./mcp-transports" +import { syncSkills } from "./skills" + +export async function syncToQwen( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + await syncSkills(config.skills, path.join(outputRoot, "skills")) + await syncQwenCommands(config, outputRoot) + + if (Object.keys(config.mcpServers).length > 0) { + await mergeJsonConfigAtKey({ + configPath: path.join(outputRoot, "settings.json"), + key: "mcpServers", + incoming: convertMcpForQwen(config.mcpServers), + }) + } +} + +function convertMcpForQwen( + servers: Record<string, ClaudeMcpServer>, +): Record<string, QwenMcpServer> { + const result: Record<string, QwenMcpServer> = {} + + for (const [name, server] of Object.entries(servers)) { + if (server.command) { + result[name] = { + command: server.command, + args: server.args, + env: server.env, + } + continue + } + + if (!server.url) { + continue + } + + if (hasExplicitSseTransport(server)) { + result[name] = { + url: server.url, + headers: server.headers, + } + continue + } + + if (!hasExplicitRemoteTransport(server)) { + console.warn( + `Warning: Qwen MCP server "${name}" has an ambiguous remote transport; defaulting to Streamable HTTP.`, + ) + } + + result[name] = { + httpUrl: server.url, + headers: server.headers, + } + } + + return result +} diff --git a/src/sync/registry.ts b/src/sync/registry.ts new file mode 100644 index 0000000..e3f58e6 --- /dev/null +++ b/src/sync/registry.ts @@ -0,0 +1,141 @@ +import os from "os" +import path from "path" +import type { ClaudeHomeConfig } from "../parsers/claude-home" +import { syncToCodex } from "./codex" +import { syncToCopilot } from "./copilot" +import { syncToDroid } from "./droid" +import { syncToGemini } from "./gemini" +import { syncToKiro } from "./kiro" +import { syncToOpenClaw } from "./openclaw" +import { syncToOpenCode } from "./opencode" +import { syncToPi } from "./pi" +import { syncToQwen } from "./qwen" +import { syncToWindsurf } from "./windsurf" + +function getCopilotHomeRoot(home: string): string { + return path.join(home, ".copilot") +} + +function getGeminiHomeRoot(home: string): string { + return path.join(home, ".gemini") +} + +export type SyncTargetName = + | "opencode" + | "codex" + | "pi" + | "droid" + | "copilot" + | "gemini" + | "windsurf" + | "kiro" + | "qwen" + | "openclaw" + +export type SyncTargetDefinition = { + name: SyncTargetName + detectPaths: (home: string, cwd: string) => string[] + resolveOutputRoot: (home: string, cwd: string) => string + sync: (config: ClaudeHomeConfig, outputRoot: string) => Promise<void> +} + +export const syncTargets: SyncTargetDefinition[] = [ + { + name: "opencode", + detectPaths: (home, cwd) => [ + path.join(home, ".config", "opencode"), + path.join(cwd, ".opencode"), + ], + resolveOutputRoot: (home) => path.join(home, ".config", "opencode"), + sync: syncToOpenCode, + }, + { + name: "codex", + detectPaths: (home) => [path.join(home, ".codex")], + resolveOutputRoot: (home) => path.join(home, ".codex"), + sync: syncToCodex, + }, + { + name: "pi", + detectPaths: (home) => [path.join(home, ".pi")], + resolveOutputRoot: (home) => path.join(home, ".pi", "agent"), + sync: syncToPi, + }, + { + name: "droid", + detectPaths: (home) => [path.join(home, ".factory")], + resolveOutputRoot: (home) => path.join(home, ".factory"), + sync: syncToDroid, + }, + { + name: "copilot", + detectPaths: (home, cwd) => [ + getCopilotHomeRoot(home), + path.join(cwd, ".github", "skills"), + path.join(cwd, ".github", "agents"), + path.join(cwd, ".github", "copilot-instructions.md"), + ], + resolveOutputRoot: (home) => getCopilotHomeRoot(home), + sync: syncToCopilot, + }, + { + name: "gemini", + detectPaths: (home, cwd) => [ + path.join(cwd, ".gemini"), + getGeminiHomeRoot(home), + ], + resolveOutputRoot: (home) => getGeminiHomeRoot(home), + sync: syncToGemini, + }, + { + name: "windsurf", + detectPaths: (home, cwd) => [ + path.join(home, ".codeium", "windsurf"), + path.join(cwd, ".windsurf"), + ], + resolveOutputRoot: (home) => path.join(home, ".codeium", "windsurf"), + sync: syncToWindsurf, + }, + { + name: "kiro", + detectPaths: (home, cwd) => [ + path.join(home, ".kiro"), + path.join(cwd, ".kiro"), + ], + resolveOutputRoot: (home) => path.join(home, ".kiro"), + sync: syncToKiro, + }, + { + name: "qwen", + detectPaths: (home, cwd) => [ + path.join(home, ".qwen"), + path.join(cwd, ".qwen"), + ], + resolveOutputRoot: (home) => path.join(home, ".qwen"), + sync: syncToQwen, + }, + { + name: "openclaw", + detectPaths: (home) => [path.join(home, ".openclaw")], + resolveOutputRoot: (home) => path.join(home, ".openclaw"), + sync: syncToOpenClaw, + }, +] + +export const syncTargetNames = syncTargets.map((target) => target.name) + +export function isSyncTargetName(value: string): value is SyncTargetName { + return syncTargetNames.includes(value as SyncTargetName) +} + +export function getSyncTarget(name: SyncTargetName): SyncTargetDefinition { + const target = syncTargets.find((entry) => entry.name === name) + if (!target) { + throw new Error(`Unknown sync target: ${name}`) + } + return target +} + +export function getDefaultSyncRegistryContext(): { home: string; cwd: string } { + return { home: os.homedir(), cwd: process.cwd() } +} diff --git a/src/sync/skills.ts b/src/sync/skills.ts new file mode 100644 index 0000000..1fde9f0 --- /dev/null +++ b/src/sync/skills.ts @@ -0,0 +1,21 @@ +import path from "path" +import type { ClaudeSkill } from "../types/claude" +import { ensureDir } from "../utils/files" +import { forceSymlink, isValidSkillName } from "../utils/symlink" + +export async function syncSkills( + skills: ClaudeSkill[], + skillsDir: string, +): Promise<void> { + await ensureDir(skillsDir) + + for (const skill of skills) { + if (!isValidSkillName(skill.name)) { + console.warn(`Skipping skill with invalid name: ${skill.name}`) + continue + } + + const target = path.join(skillsDir, skill.name) + await forceSymlink(skill.sourceDir, target) + } +} diff --git a/src/sync/windsurf.ts b/src/sync/windsurf.ts new file mode 100644 index 0000000..59bea89 --- /dev/null +++ b/src/sync/windsurf.ts @@ -0,0 +1,59 @@ +import path from "path" +import type { ClaudeHomeConfig } from "../parsers/claude-home" +import type { ClaudeMcpServer } from "../types/claude" +import type { WindsurfMcpServerEntry } from "../types/windsurf" +import { syncWindsurfCommands } from "./commands" +import { mergeJsonConfigAtKey } from "./json-config" +import { hasExplicitSseTransport } from "./mcp-transports" +import { syncSkills } from "./skills" + +export async function syncToWindsurf( + config: ClaudeHomeConfig, + outputRoot: string, +): Promise<void> { + await syncSkills(config.skills, path.join(outputRoot, "skills")) + await syncWindsurfCommands(config, outputRoot, "global") + + if (Object.keys(config.mcpServers).length > 0) { + await mergeJsonConfigAtKey({ + configPath: path.join(outputRoot, "mcp_config.json"), + key: "mcpServers", + incoming: convertMcpForWindsurf(config.mcpServers), + }) + } +} + +function convertMcpForWindsurf( + servers: Record<string, ClaudeMcpServer>, +): Record<string, WindsurfMcpServerEntry> { + const result: Record<string, WindsurfMcpServerEntry> = {} + + for (const [name, server] of Object.entries(servers)) { + if (server.command) { + result[name] = { + command: server.command, + args: server.args, + env: server.env, + } + continue + } + + if (!server.url) { + continue + } + + const entry: WindsurfMcpServerEntry = { + headers: server.headers, + } + + if (hasExplicitSseTransport(server)) { + entry.url = server.url + } else { + entry.serverUrl = server.url + } + + result[name] = entry + } + + return result +} diff --git a/src/targets/copilot.ts b/src/targets/copilot.ts new file mode 100644 index 0000000..d0d1b1c --- /dev/null +++ b/src/targets/copilot.ts @@ -0,0 +1,48 @@ +import path from "path" +import { backupFile, copyDir, ensureDir, writeJson, writeText } from "../utils/files" +import type { CopilotBundle } from "../types/copilot" + +export async function writeCopilotBundle(outputRoot: string, bundle: CopilotBundle): Promise<void> { + const paths = resolveCopilotPaths(outputRoot) + await ensureDir(paths.githubDir) + + if (bundle.agents.length > 0) { + const agentsDir = path.join(paths.githubDir, "agents") + for (const agent of bundle.agents) { + await writeText(path.join(agentsDir, `${agent.name}.agent.md`), agent.content + "\n") + } + } + + if (bundle.generatedSkills.length > 0) { + const skillsDir = path.join(paths.githubDir, "skills") + for (const skill of bundle.generatedSkills) { + await writeText(path.join(skillsDir, skill.name, "SKILL.md"), skill.content + "\n") + } + } + + if (bundle.skillDirs.length > 0) { + const skillsDir = path.join(paths.githubDir, "skills") + for (const skill of bundle.skillDirs) { + await copyDir(skill.sourceDir, path.join(skillsDir, skill.name)) + } + } + + if (bundle.mcpConfig && Object.keys(bundle.mcpConfig).length > 0) { + const mcpPath = path.join(paths.githubDir, "copilot-mcp-config.json") + const backupPath = await backupFile(mcpPath) + if (backupPath) { + console.log(`Backed up existing copilot-mcp-config.json to ${backupPath}`) + } + await writeJson(mcpPath, { mcpServers: bundle.mcpConfig }) + } +} + +function resolveCopilotPaths(outputRoot: string) { + const base = path.basename(outputRoot) + // If already pointing at .github, write directly into it + if (base === ".github") { + return { githubDir: outputRoot } + } + // Otherwise nest under .github + return { githubDir: path.join(outputRoot, ".github") } +} diff --git a/src/targets/cursor.ts b/src/targets/cursor.ts deleted file mode 100644 index dd9c123..0000000 --- a/src/targets/cursor.ts +++ /dev/null @@ -1,48 +0,0 @@ -import path from "path" -import { backupFile, copyDir, ensureDir, writeJson, writeText } from "../utils/files" -import type { CursorBundle } from "../types/cursor" - -export async function writeCursorBundle(outputRoot: string, bundle: CursorBundle): Promise<void> { - const paths = resolveCursorPaths(outputRoot) - await ensureDir(paths.cursorDir) - - if (bundle.rules.length > 0) { - const rulesDir = path.join(paths.cursorDir, "rules") - for (const rule of bundle.rules) { - await writeText(path.join(rulesDir, `${rule.name}.mdc`), rule.content + "\n") - } - } - - if (bundle.commands.length > 0) { - const commandsDir = path.join(paths.cursorDir, "commands") - for (const command of bundle.commands) { - await writeText(path.join(commandsDir, `${command.name}.md`), command.content + "\n") - } - } - - if (bundle.skillDirs.length > 0) { - const skillsDir = path.join(paths.cursorDir, "skills") - for (const skill of bundle.skillDirs) { - await copyDir(skill.sourceDir, path.join(skillsDir, skill.name)) - } - } - - if (bundle.mcpServers && Object.keys(bundle.mcpServers).length > 0) { - const mcpPath = path.join(paths.cursorDir, "mcp.json") - const backupPath = await backupFile(mcpPath) - if (backupPath) { - console.log(`Backed up existing mcp.json to ${backupPath}`) - } - await writeJson(mcpPath, { mcpServers: bundle.mcpServers }) - } -} - -function resolveCursorPaths(outputRoot: string) { - const base = path.basename(outputRoot) - // If already pointing at .cursor, write directly into it - if (base === ".cursor") { - return { cursorDir: outputRoot } - } - // Otherwise nest under .cursor - return { cursorDir: path.join(outputRoot, ".cursor") } -} diff --git a/src/targets/droid.ts b/src/targets/droid.ts index 8560076..23bd46e 100644 --- a/src/targets/droid.ts +++ b/src/targets/droid.ts @@ -1,5 +1,5 @@ import path from "path" -import { copyDir, ensureDir, writeText } from "../utils/files" +import { copyDir, ensureDir, resolveCommandPath, writeText } from "../utils/files" import type { DroidBundle } from "../types/droid" export async function writeDroidBundle(outputRoot: string, bundle: DroidBundle): Promise<void> { @@ -9,7 +9,8 @@ export async function writeDroidBundle(outputRoot: string, bundle: DroidBundle): if (bundle.commands.length > 0) { await ensureDir(paths.commandsDir) for (const command of bundle.commands) { - await writeText(path.join(paths.commandsDir, `${command.name}.md`), command.content + "\n") + const dest = await resolveCommandPath(paths.commandsDir, command.name, ".md") + await writeText(dest, command.content + "\n") } } diff --git a/src/targets/gemini.ts b/src/targets/gemini.ts index 0bc8c66..0df7d51 100644 --- a/src/targets/gemini.ts +++ b/src/targets/gemini.ts @@ -1,5 +1,5 @@ import path from "path" -import { backupFile, copyDir, ensureDir, pathExists, readJson, writeJson, writeText } from "../utils/files" +import { backupFile, copyDir, ensureDir, pathExists, readJson, resolveCommandPath, writeJson, writeText } from "../utils/files" import type { GeminiBundle } from "../types/gemini" export async function writeGeminiBundle(outputRoot: string, bundle: GeminiBundle): Promise<void> { @@ -20,7 +20,8 @@ export async function writeGeminiBundle(outputRoot: string, bundle: GeminiBundle if (bundle.commands.length > 0) { for (const command of bundle.commands) { - await writeText(path.join(paths.commandsDir, `${command.name}.toml`), command.content + "\n") + const dest = await resolveCommandPath(paths.commandsDir, command.name, ".toml") + await writeText(dest, command.content + "\n") } } diff --git a/src/targets/index.ts b/src/targets/index.ts index b76dfc1..b1214d0 100644 --- a/src/targets/index.ts +++ b/src/targets/index.ts @@ -2,27 +2,69 @@ import type { ClaudePlugin } from "../types/claude" import type { OpenCodeBundle } from "../types/opencode" import type { CodexBundle } from "../types/codex" import type { DroidBundle } from "../types/droid" -import type { CursorBundle } from "../types/cursor" import type { PiBundle } from "../types/pi" +import type { CopilotBundle } from "../types/copilot" import type { GeminiBundle } from "../types/gemini" +import type { KiroBundle } from "../types/kiro" +import type { WindsurfBundle } from "../types/windsurf" +import type { OpenClawBundle } from "../types/openclaw" +import type { QwenBundle } from "../types/qwen" import { convertClaudeToOpenCode, type ClaudeToOpenCodeOptions } from "../converters/claude-to-opencode" import { convertClaudeToCodex } from "../converters/claude-to-codex" import { convertClaudeToDroid } from "../converters/claude-to-droid" -import { convertClaudeToCursor } from "../converters/claude-to-cursor" import { convertClaudeToPi } from "../converters/claude-to-pi" +import { convertClaudeToCopilot } from "../converters/claude-to-copilot" import { convertClaudeToGemini } from "../converters/claude-to-gemini" +import { convertClaudeToKiro } from "../converters/claude-to-kiro" +import { convertClaudeToWindsurf } from "../converters/claude-to-windsurf" +import { convertClaudeToOpenClaw } from "../converters/claude-to-openclaw" +import { convertClaudeToQwen } from "../converters/claude-to-qwen" import { writeOpenCodeBundle } from "./opencode" import { writeCodexBundle } from "./codex" import { writeDroidBundle } from "./droid" -import { writeCursorBundle } from "./cursor" import { writePiBundle } from "./pi" +import { writeCopilotBundle } from "./copilot" import { writeGeminiBundle } from "./gemini" +import { writeKiroBundle } from "./kiro" +import { writeWindsurfBundle } from "./windsurf" +import { writeOpenClawBundle } from "./openclaw" +import { writeQwenBundle } from "./qwen" + +export type TargetScope = "global" | "workspace" + +export function isTargetScope(value: string): value is TargetScope { + return value === "global" || value === "workspace" +} + +/** + * Validate a --scope flag against a target's supported scopes. + * Returns the resolved scope (explicit or default) or throws on invalid input. + */ +export function validateScope( + targetName: string, + target: TargetHandler, + scopeArg: string | undefined, +): TargetScope | undefined { + if (scopeArg === undefined) return target.defaultScope + + if (!target.supportedScopes) { + throw new Error(`Target "${targetName}" does not support the --scope flag.`) + } + if (!isTargetScope(scopeArg) || !target.supportedScopes.includes(scopeArg)) { + throw new Error(`Target "${targetName}" does not support --scope ${scopeArg}. Supported: ${target.supportedScopes.join(", ")}`) + } + return scopeArg +} export type TargetHandler<TBundle = unknown> = { name: string implemented: boolean + /** Default scope when --scope is not provided. Only meaningful when supportedScopes is defined. */ + defaultScope?: TargetScope + /** Valid scope values. If absent, the --scope flag is rejected for this target. */ + supportedScopes?: TargetScope[] convert: (plugin: ClaudePlugin, options: ClaudeToOpenCodeOptions) => TBundle | null - write: (outputRoot: string, bundle: TBundle) => Promise<void> + write: (outputRoot: string, bundle: TBundle, scope?: TargetScope) => Promise<void> } export const targets: Record<string, TargetHandler> = { @@ -44,22 +86,48 @@ export const targets: Record<string, TargetHandler> = { convert: convertClaudeToDroid as TargetHandler<DroidBundle>["convert"], write: writeDroidBundle as TargetHandler<DroidBundle>["write"], }, - cursor: { - name: "cursor", - implemented: true, - convert: convertClaudeToCursor as TargetHandler<CursorBundle>["convert"], - write: writeCursorBundle as TargetHandler<CursorBundle>["write"], - }, pi: { name: "pi", implemented: true, convert: convertClaudeToPi as TargetHandler<PiBundle>["convert"], write: writePiBundle as TargetHandler<PiBundle>["write"], }, + copilot: { + name: "copilot", + implemented: true, + convert: convertClaudeToCopilot as TargetHandler<CopilotBundle>["convert"], + write: writeCopilotBundle as TargetHandler<CopilotBundle>["write"], + }, gemini: { name: "gemini", implemented: true, convert: convertClaudeToGemini as TargetHandler<GeminiBundle>["convert"], write: writeGeminiBundle as TargetHandler<GeminiBundle>["write"], }, + kiro: { + name: "kiro", + implemented: true, + convert: convertClaudeToKiro as TargetHandler<KiroBundle>["convert"], + write: writeKiroBundle as TargetHandler<KiroBundle>["write"], + }, + windsurf: { + name: "windsurf", + implemented: true, + defaultScope: "global", + supportedScopes: ["global", "workspace"], + convert: convertClaudeToWindsurf as TargetHandler<WindsurfBundle>["convert"], + write: writeWindsurfBundle as TargetHandler<WindsurfBundle>["write"], + }, + openclaw: { + name: "openclaw", + implemented: true, + convert: convertClaudeToOpenClaw as TargetHandler<OpenClawBundle>["convert"], + write: writeOpenClawBundle as TargetHandler<OpenClawBundle>["write"], + }, + qwen: { + name: "qwen", + implemented: true, + convert: convertClaudeToQwen as TargetHandler<QwenBundle>["convert"], + write: writeQwenBundle as TargetHandler<QwenBundle>["write"], + }, } diff --git a/src/targets/kiro.ts b/src/targets/kiro.ts new file mode 100644 index 0000000..3597951 --- /dev/null +++ b/src/targets/kiro.ts @@ -0,0 +1,122 @@ +import path from "path" +import { backupFile, copyDir, ensureDir, pathExists, readJson, writeJson, writeText } from "../utils/files" +import type { KiroBundle } from "../types/kiro" + +export async function writeKiroBundle(outputRoot: string, bundle: KiroBundle): Promise<void> { + const paths = resolveKiroPaths(outputRoot) + await ensureDir(paths.kiroDir) + + // Write agents + if (bundle.agents.length > 0) { + for (const agent of bundle.agents) { + // Validate name doesn't escape agents directory + validatePathSafe(agent.name, "agent") + + // Write agent JSON config + await writeJson( + path.join(paths.agentsDir, `${agent.name}.json`), + agent.config, + ) + + // Write agent prompt file + await writeText( + path.join(paths.agentsDir, "prompts", `${agent.name}.md`), + agent.promptContent + "\n", + ) + } + } + + // Write generated skills (from commands) + if (bundle.generatedSkills.length > 0) { + for (const skill of bundle.generatedSkills) { + validatePathSafe(skill.name, "skill") + await writeText( + path.join(paths.skillsDir, skill.name, "SKILL.md"), + skill.content + "\n", + ) + } + } + + // Copy skill directories (pass-through) + if (bundle.skillDirs.length > 0) { + for (const skill of bundle.skillDirs) { + validatePathSafe(skill.name, "skill directory") + const destDir = path.join(paths.skillsDir, skill.name) + + // Validate destination doesn't escape skills directory + const resolvedDest = path.resolve(destDir) + if (!resolvedDest.startsWith(path.resolve(paths.skillsDir))) { + console.warn(`Warning: Skill name "${skill.name}" escapes .kiro/skills/. Skipping.`) + continue + } + + await copyDir(skill.sourceDir, destDir) + } + } + + // Write steering files + if (bundle.steeringFiles.length > 0) { + for (const file of bundle.steeringFiles) { + validatePathSafe(file.name, "steering file") + await writeText( + path.join(paths.steeringDir, `${file.name}.md`), + file.content + "\n", + ) + } + } + + // Write MCP servers to mcp.json + if (Object.keys(bundle.mcpServers).length > 0) { + const mcpPath = path.join(paths.settingsDir, "mcp.json") + const backupPath = await backupFile(mcpPath) + if (backupPath) { + console.log(`Backed up existing mcp.json to ${backupPath}`) + } + + // Merge with existing mcp.json if present + let existingConfig: Record<string, unknown> = {} + if (await pathExists(mcpPath)) { + try { + existingConfig = await readJson<Record<string, unknown>>(mcpPath) + } catch { + console.warn("Warning: existing mcp.json could not be parsed and will be replaced.") + } + } + + const existingServers = + existingConfig.mcpServers && typeof existingConfig.mcpServers === "object" + ? (existingConfig.mcpServers as Record<string, unknown>) + : {} + const merged = { ...existingConfig, mcpServers: { ...existingServers, ...bundle.mcpServers } } + await writeJson(mcpPath, merged) + } +} + +function resolveKiroPaths(outputRoot: string) { + const base = path.basename(outputRoot) + // If already pointing at .kiro, write directly into it + if (base === ".kiro") { + return { + kiroDir: outputRoot, + agentsDir: path.join(outputRoot, "agents"), + skillsDir: path.join(outputRoot, "skills"), + steeringDir: path.join(outputRoot, "steering"), + settingsDir: path.join(outputRoot, "settings"), + } + } + // Otherwise nest under .kiro + const kiroDir = path.join(outputRoot, ".kiro") + return { + kiroDir, + agentsDir: path.join(kiroDir, "agents"), + skillsDir: path.join(kiroDir, "skills"), + steeringDir: path.join(kiroDir, "steering"), + settingsDir: path.join(kiroDir, "settings"), + } +} + +function validatePathSafe(name: string, label: string): void { + if (name.includes("..") || name.includes("/") || name.includes("\\")) { + throw new Error(`${label} name contains unsafe path characters: ${name}`) + } +} diff --git a/src/targets/openclaw.ts b/src/targets/openclaw.ts new file mode 100644 index 0000000..d2ec688 --- /dev/null +++ b/src/targets/openclaw.ts @@ -0,0 +1,96 @@ +import path from "path" +import { promises as fs } from "fs" +import { backupFile, copyDir, ensureDir, pathExists, readJson, walkFiles, writeJson, writeText } from "../utils/files" +import type { OpenClawBundle } from "../types/openclaw" + +export async function writeOpenClawBundle(outputRoot: string, bundle: OpenClawBundle): Promise<void> { + const paths = resolveOpenClawPaths(outputRoot) + await ensureDir(paths.root) + + // Write openclaw.plugin.json + await writeJson(paths.manifestPath, bundle.manifest) + + // Write package.json + await writeJson(paths.packageJsonPath, bundle.packageJson) + + // Write index.ts entry point + await writeText(paths.entryPointPath, bundle.entryPoint) + + // Write generated skills (agents + commands converted to SKILL.md) + for (const skill of bundle.skills) { + const skillDir = path.join(paths.skillsDir, skill.dir) + await ensureDir(skillDir) + await writeText(path.join(skillDir, "SKILL.md"), skill.content + "\n") + } + + // Copy original skill directories (preserving references/, assets/, scripts/) + // and rewrite .claude/ paths to .openclaw/ in markdown files + for (const skill of bundle.skillDirCopies) { + const destDir = path.join(paths.skillsDir, skill.name) + await copyDir(skill.sourceDir, destDir) + await rewritePathsInDir(destDir) + } + + // Write openclaw.json config fragment if MCP servers exist + if (bundle.openclawConfig) { + const configPath = path.join(paths.root, "openclaw.json") + const backupPath = await backupFile(configPath) + if (backupPath) { + console.log(`Backed up existing config to ${backupPath}`) + } + const merged = await mergeOpenClawConfig(configPath, bundle.openclawConfig) + await writeJson(configPath, merged) + } +} + +function resolveOpenClawPaths(outputRoot: string) { + return { + root: outputRoot, + manifestPath: path.join(outputRoot, "openclaw.plugin.json"), + packageJsonPath: path.join(outputRoot, "package.json"), + entryPointPath: path.join(outputRoot, "index.ts"), + skillsDir: path.join(outputRoot, "skills"), + } +} + +async function rewritePathsInDir(dir: string): Promise<void> { + const files = await walkFiles(dir) + for (const file of files) { + if (!file.endsWith(".md")) continue + const content = await fs.readFile(file, "utf8") + const rewritten = content + .replace(/~\/\.claude\//g, "~/.openclaw/") + .replace(/\.claude\//g, ".openclaw/") + .replace(/\.claude-plugin\//g, "openclaw-plugin/") + if (rewritten !== content) { + await fs.writeFile(file, rewritten, "utf8") + } + } +} + +async function mergeOpenClawConfig( + configPath: string, + incoming: Record<string, unknown>, +): Promise<Record<string, unknown>> { + if (!(await pathExists(configPath))) return incoming + + let existing: Record<string, unknown> + try { + existing = await readJson<Record<string, unknown>>(configPath) + } catch { + console.warn( + `Warning: existing ${configPath} is not valid JSON. Writing plugin config without merging.`, + ) + return incoming + } + + // Merge MCP servers: existing takes precedence on conflict + const incomingMcp = (incoming.mcpServers ?? {}) as Record<string, unknown> + const existingMcp = (existing.mcpServers ?? {}) as Record<string, unknown> + const mergedMcp = { ...incomingMcp, ...existingMcp } + + return { + ...existing, + mcpServers: Object.keys(mergedMcp).length > 0 ? mergedMcp : undefined, + } +} diff --git a/src/targets/opencode.ts b/src/targets/opencode.ts index 24e8faf..cff2931 100644 --- a/src/targets/opencode.ts +++ b/src/targets/opencode.ts @@ -1,31 +1,97 @@ import path from "path" -import { backupFile, copyDir, ensureDir, writeJson, writeText } from "../utils/files" -import type { OpenCodeBundle } from "../types/opencode" +import { backupFile, copyDir, ensureDir, pathExists, readJson, resolveCommandPath, writeJson, writeText } from "../utils/files" +import type { OpenCodeBundle, OpenCodeConfig } from "../types/opencode" + +// Merges plugin config into existing opencode.json. User keys win on conflict. See ADR-002. +async function mergeOpenCodeConfig( + configPath: string, + incoming: OpenCodeConfig, +): Promise<OpenCodeConfig> { + // If no existing config, write plugin config as-is + if (!(await pathExists(configPath))) return incoming + + let existing: OpenCodeConfig + try { + existing = await readJson<OpenCodeConfig>(configPath) + } catch { + // Safety first per AGENTS.md -- do not destroy user data even if their config is malformed. + // Warn and fall back to plugin-only config rather than crashing. + console.warn( + `Warning: existing ${configPath} is not valid JSON. Writing plugin config without merging.` + ) + return incoming + } + + // User config wins on conflict -- see ADR-002 + // MCP servers: add plugin entry, skip keys already in user config. + const mergedMcp = { + ...(incoming.mcp ?? {}), + ...(existing.mcp ?? {}), // existing takes precedence (overwrites same-named plugin entry) + } + + // Permission: add plugin entry, skip keys already in user config. + const mergedPermission = incoming.permission + ? { + ...(incoming.permission), + ...(existing.permission ?? {}), // existing takes precedence + } + : existing.permission + + // Tools: same pattern + const mergedTools = incoming.tools + ? { + ...(incoming.tools), + ...(existing.tools ?? {}), + } + : existing.tools + + return { + ...existing, // all user keys preserved + $schema: incoming.$schema ?? existing.$schema, + mcp: Object.keys(mergedMcp).length > 0 ? mergedMcp : undefined, + permission: mergedPermission, + tools: mergedTools, + } +} export async function writeOpenCodeBundle(outputRoot: string, bundle: OpenCodeBundle): Promise<void> { - const paths = resolveOpenCodePaths(outputRoot) - await ensureDir(paths.root) + const openCodePaths = resolveOpenCodePaths(outputRoot) + await ensureDir(openCodePaths.root) - const backupPath = await backupFile(paths.configPath) + const hadExistingConfig = await pathExists(openCodePaths.configPath) + const backupPath = await backupFile(openCodePaths.configPath) if (backupPath) { console.log(`Backed up existing config to ${backupPath}`) } - await writeJson(paths.configPath, bundle.config) + const merged = await mergeOpenCodeConfig(openCodePaths.configPath, bundle.config) + await writeJson(openCodePaths.configPath, merged) + if (hadExistingConfig) { + console.log("Merged plugin config into existing opencode.json (user settings preserved)") + } - const agentsDir = paths.agentsDir + const agentsDir = openCodePaths.agentsDir for (const agent of bundle.agents) { await writeText(path.join(agentsDir, `${agent.name}.md`), agent.content + "\n") } + for (const commandFile of bundle.commandFiles) { + const dest = await resolveCommandPath(openCodePaths.commandDir, commandFile.name, ".md") + const cmdBackupPath = await backupFile(dest) + if (cmdBackupPath) { + console.log(`Backed up existing command file to ${cmdBackupPath}`) + } + await writeText(dest, commandFile.content + "\n") + } + if (bundle.plugins.length > 0) { - const pluginsDir = paths.pluginsDir + const pluginsDir = openCodePaths.pluginsDir for (const plugin of bundle.plugins) { await writeText(path.join(pluginsDir, plugin.name), plugin.content + "\n") } } if (bundle.skillDirs.length > 0) { - const skillsRoot = paths.skillsDir + const skillsRoot = openCodePaths.skillsDir for (const skill of bundle.skillDirs) { await copyDir(skill.sourceDir, path.join(skillsRoot, skill.name)) } @@ -43,6 +109,8 @@ function resolveOpenCodePaths(outputRoot: string) { agentsDir: path.join(outputRoot, "agents"), pluginsDir: path.join(outputRoot, "plugins"), skillsDir: path.join(outputRoot, "skills"), + // .md command files; alternative to the command key in opencode.json + commandDir: path.join(outputRoot, "commands"), } } @@ -53,5 +121,7 @@ function resolveOpenCodePaths(outputRoot: string) { agentsDir: path.join(outputRoot, ".opencode", "agents"), pluginsDir: path.join(outputRoot, ".opencode", "plugins"), skillsDir: path.join(outputRoot, ".opencode", "skills"), + // .md command files; alternative to the command key in opencode.json + commandDir: path.join(outputRoot, ".opencode", "commands"), } -} +} \ No newline at end of file diff --git a/src/targets/qwen.ts b/src/targets/qwen.ts new file mode 100644 index 0000000..22fe296 --- /dev/null +++ b/src/targets/qwen.ts @@ -0,0 +1,57 @@ +import path from "path" +import { backupFile, copyDir, ensureDir, resolveCommandPath, writeJson, writeText } from "../utils/files" +import type { QwenBundle, QwenExtensionConfig } from "../types/qwen" + +export async function writeQwenBundle(outputRoot: string, bundle: QwenBundle): Promise<void> { + const qwenPaths = resolveQwenPaths(outputRoot) + await ensureDir(qwenPaths.root) + + // Write qwen-extension.json config + const configPath = qwenPaths.configPath + const backupPath = await backupFile(configPath) + if (backupPath) { + console.log(`Backed up existing config to ${backupPath}`) + } + await writeJson(configPath, bundle.config) + + // Write context file (QWEN.md) + if (bundle.contextFile) { + await writeText(qwenPaths.contextPath, bundle.contextFile + "\n") + } + + // Write agents + const agentsDir = qwenPaths.agentsDir + await ensureDir(agentsDir) + for (const agent of bundle.agents) { + const ext = agent.format === "yaml" ? "yaml" : "md" + await writeText(path.join(agentsDir, `${agent.name}.${ext}`), agent.content + "\n") + } + + // Write commands + const commandsDir = qwenPaths.commandsDir + await ensureDir(commandsDir) + for (const commandFile of bundle.commandFiles) { + const dest = await resolveCommandPath(commandsDir, commandFile.name, ".md") + await writeText(dest, commandFile.content + "\n") + } + + // Copy skills + if (bundle.skillDirs.length > 0) { + const skillsRoot = qwenPaths.skillsDir + await ensureDir(skillsRoot) + for (const skill of bundle.skillDirs) { + await copyDir(skill.sourceDir, path.join(skillsRoot, skill.name)) + } + } +} + +function resolveQwenPaths(outputRoot: string) { + return { + root: outputRoot, + configPath: path.join(outputRoot, "qwen-extension.json"), + contextPath: path.join(outputRoot, "QWEN.md"), + agentsDir: path.join(outputRoot, "agents"), + commandsDir: path.join(outputRoot, "commands"), + skillsDir: path.join(outputRoot, "skills"), + } +} diff --git a/src/targets/windsurf.ts b/src/targets/windsurf.ts new file mode 100644 index 0000000..ee96045 --- /dev/null +++ b/src/targets/windsurf.ts @@ -0,0 +1,104 @@ +import path from "path" +import { backupFile, copyDir, ensureDir, pathExists, readJson, writeJsonSecure, writeText } from "../utils/files" +import { formatFrontmatter } from "../utils/frontmatter" +import type { WindsurfBundle } from "../types/windsurf" +import type { TargetScope } from "./index" + +/** + * Write a WindsurfBundle directly into outputRoot. + * + * Unlike other target writers, this writer expects outputRoot to be the final + * resolved directory — the CLI handles scope-based nesting (global vs workspace). + */ +export async function writeWindsurfBundle(outputRoot: string, bundle: WindsurfBundle, scope?: TargetScope): Promise<void> { + await ensureDir(outputRoot) + + // Write agent skills (before pass-through copies so pass-through takes precedence on collision) + if (bundle.agentSkills.length > 0) { + const skillsDir = path.join(outputRoot, "skills") + await ensureDir(skillsDir) + for (const skill of bundle.agentSkills) { + validatePathSafe(skill.name, "agent skill") + const destDir = path.join(skillsDir, skill.name) + + const resolvedDest = path.resolve(destDir) + if (!resolvedDest.startsWith(path.resolve(skillsDir))) { + console.warn(`Warning: Agent skill name "${skill.name}" escapes skills/. Skipping.`) + continue + } + + await ensureDir(destDir) + await writeText(path.join(destDir, "SKILL.md"), skill.content) + } + } + + // Write command workflows (flat in global_workflows/ for global scope, workflows/ for workspace) + if (bundle.commandWorkflows.length > 0) { + const workflowsDirName = scope === "global" ? "global_workflows" : "workflows" + const workflowsDir = path.join(outputRoot, workflowsDirName) + await ensureDir(workflowsDir) + for (const workflow of bundle.commandWorkflows) { + validatePathSafe(workflow.name, "command workflow") + const content = formatWorkflowContent(workflow.name, workflow.description, workflow.body) + await writeText(path.join(workflowsDir, `${workflow.name}.md`), content) + } + } + + // Copy pass-through skill directories (after generated skills so copies overwrite on collision) + if (bundle.skillDirs.length > 0) { + const skillsDir = path.join(outputRoot, "skills") + await ensureDir(skillsDir) + for (const skill of bundle.skillDirs) { + validatePathSafe(skill.name, "skill directory") + const destDir = path.join(skillsDir, skill.name) + + const resolvedDest = path.resolve(destDir) + if (!resolvedDest.startsWith(path.resolve(skillsDir))) { + console.warn(`Warning: Skill name "${skill.name}" escapes skills/. Skipping.`) + continue + } + + await copyDir(skill.sourceDir, destDir) + } + } + + // Merge MCP config + if (bundle.mcpConfig) { + const mcpPath = path.join(outputRoot, "mcp_config.json") + const backupPath = await backupFile(mcpPath) + if (backupPath) { + console.log(`Backed up existing mcp_config.json to ${backupPath}`) + } + + let existingConfig: Record<string, unknown> = {} + if (await pathExists(mcpPath)) { + try { + const parsed = await readJson<unknown>(mcpPath) + if (parsed && typeof parsed === "object" && !Array.isArray(parsed)) { + existingConfig = parsed as Record<string, unknown> + } + } catch { + console.warn("Warning: existing mcp_config.json could not be parsed and will be replaced.") + } + } + + const existingServers = + existingConfig.mcpServers && + typeof existingConfig.mcpServers === "object" && + !Array.isArray(existingConfig.mcpServers) + ? (existingConfig.mcpServers as Record<string, unknown>) + : {} + const merged = { ...existingConfig, mcpServers: { ...existingServers, ...bundle.mcpConfig.mcpServers } } + await writeJsonSecure(mcpPath, merged) + } +} + +function validatePathSafe(name: string, label: string): void { + if (name.includes("..") || name.includes("/") || name.includes("\\")) { + throw new Error(`${label} name contains unsafe path characters: ${name}`) + } +} + +function formatWorkflowContent(name: string, description: string, body: string): string { + return formatFrontmatter({ description }, `# ${name}\n\n${body}`) + "\n" +} diff --git a/src/types/copilot.ts b/src/types/copilot.ts new file mode 100644 index 0000000..8d1ae12 --- /dev/null +++ b/src/types/copilot.ts @@ -0,0 +1,31 @@ +export type CopilotAgent = { + name: string + content: string +} + +export type CopilotGeneratedSkill = { + name: string + content: string +} + +export type CopilotSkillDir = { + name: string + sourceDir: string +} + +export type CopilotMcpServer = { + type: string + command?: string + args?: string[] + url?: string + tools: string[] + env?: Record<string, string> + headers?: Record<string, string> +} + +export type CopilotBundle = { + agents: CopilotAgent[] + generatedSkills: CopilotGeneratedSkill[] + skillDirs: CopilotSkillDir[] + mcpConfig?: Record<string, CopilotMcpServer> +} diff --git a/src/types/cursor.ts b/src/types/cursor.ts deleted file mode 100644 index fc88828..0000000 --- a/src/types/cursor.ts +++ /dev/null @@ -1,29 +0,0 @@ -export type CursorRule = { - name: string - content: string -} - -export type CursorCommand = { - name: string - content: string -} - -export type CursorSkillDir = { - name: string - sourceDir: string -} - -export type CursorMcpServer = { - command?: string - args?: string[] - env?: Record<string, string> - url?: string - headers?: Record<string, string> -} - -export type CursorBundle = { - rules: CursorRule[] - commands: CursorCommand[] - skillDirs: CursorSkillDir[] - mcpServers?: Record<string, CursorMcpServer> -} diff --git a/src/types/kiro.ts b/src/types/kiro.ts new file mode 100644 index 0000000..00491c8 --- /dev/null +++ b/src/types/kiro.ts @@ -0,0 +1,46 @@ +export type KiroAgent = { + name: string + config: KiroAgentConfig + promptContent: string +} + +export type KiroAgentConfig = { + name: string + description: string + prompt: `file://${string}` + tools: ["*"] + resources: string[] + includeMcpJson: true + welcomeMessage?: string +} + +export type KiroSkill = { + name: string + content: string // Full SKILL.md with YAML frontmatter +} + +export type KiroSkillDir = { + name: string + sourceDir: string +} + +export type KiroSteeringFile = { + name: string + content: string +} + +export type KiroMcpServer = { + command?: string + args?: string[] + env?: Record<string, string> + url?: string + headers?: Record<string, string> +} + +export type KiroBundle = { + agents: KiroAgent[] + generatedSkills: KiroSkill[] + skillDirs: KiroSkillDir[] + steeringFiles: KiroSteeringFile[] + mcpServers: Record<string, KiroMcpServer> +} diff --git a/src/types/openclaw.ts b/src/types/openclaw.ts new file mode 100644 index 0000000..378336f --- /dev/null +++ b/src/types/openclaw.ts @@ -0,0 +1,54 @@ +export type OpenClawPluginManifest = { + id: string + name: string + kind: "tool" + configSchema: OpenClawConfigSchema + uiHints?: Record<string, OpenClawUiHint> + skills?: string[] +} + +export type OpenClawConfigSchema = { + type: "object" + properties: Record<string, OpenClawConfigProperty> + additionalProperties?: boolean + required?: string[] +} + +export type OpenClawConfigProperty = { + type: string + description?: string + default?: unknown +} + +export type OpenClawUiHint = { + label: string + sensitive?: boolean + placeholder?: string +} + +export type OpenClawSkillFile = { + name: string + content: string + /** Subdirectory path inside skills/ (e.g. "agent-native-reviewer") */ + dir: string +} + +export type OpenClawCommandRegistration = { + name: string + description: string + acceptsArgs: boolean + /** The prompt body that becomes the command handler response */ + body: string +} + +export type OpenClawBundle = { + manifest: OpenClawPluginManifest + packageJson: Record<string, unknown> + entryPoint: string + skills: OpenClawSkillFile[] + /** Skill directories to copy verbatim (original Claude skills with references/) */ + skillDirCopies: { sourceDir: string; name: string }[] + commands: OpenClawCommandRegistration[] + /** openclaw.json fragment for MCP servers */ + openclawConfig?: Record<string, unknown> +} diff --git a/src/types/opencode.ts b/src/types/opencode.ts index 0338892..a66546e 100644 --- a/src/types/opencode.ts +++ b/src/types/opencode.ts @@ -7,7 +7,6 @@ export type OpenCodeConfig = { tools?: Record<string, boolean> permission?: Record<string, OpenCodePermission | Record<string, OpenCodePermission>> agent?: Record<string, OpenCodeAgentConfig> - command?: Record<string, OpenCodeCommandConfig> mcp?: Record<string, OpenCodeMcpServer> } @@ -20,13 +19,6 @@ export type OpenCodeAgentConfig = { permission?: Record<string, OpenCodePermission> } -export type OpenCodeCommandConfig = { - description?: string - model?: string - agent?: string - template: string -} - export type OpenCodeMcpServer = { type: "local" | "remote" command?: string[] @@ -46,9 +38,16 @@ export type OpenCodePluginFile = { content: string } +export type OpenCodeCommandFile = { + name: string + content: string +} + export type OpenCodeBundle = { config: OpenCodeConfig agents: OpenCodeAgentFile[] + // Commands are written as individual .md files, not in opencode.json. See ADR-001. + commandFiles: OpenCodeCommandFile[] plugins: OpenCodePluginFile[] skillDirs: { sourceDir: string; name: string }[] } diff --git a/src/types/qwen.ts b/src/types/qwen.ts new file mode 100644 index 0000000..c6bb106 --- /dev/null +++ b/src/types/qwen.ts @@ -0,0 +1,51 @@ +export type QwenExtensionConfig = { + name: string + version: string + mcpServers?: Record<string, QwenMcpServer> + contextFileName?: string + commands?: string + skills?: string + agents?: string + settings?: QwenSetting[] +} + +export type QwenMcpServer = { + command?: string + args?: string[] + env?: Record<string, string> + cwd?: string + httpUrl?: string + url?: string + headers?: Record<string, string> +} + +export type QwenSetting = { + name: string + description: string + envVar: string + sensitive?: boolean +} + +export type QwenAgentFile = { + name: string + content: string + format: "yaml" | "markdown" +} + +export type QwenSkillDir = { + sourceDir: string + name: string +} + +export type QwenCommandFile = { + name: string + content: string +} + +export type QwenBundle = { + config: QwenExtensionConfig + agents: QwenAgentFile[] + commandFiles: QwenCommandFile[] + skillDirs: QwenSkillDir[] + contextFile?: string +} diff --git a/src/types/windsurf.ts b/src/types/windsurf.ts new file mode 100644 index 0000000..14e9ff9 --- /dev/null +++ b/src/types/windsurf.ts @@ -0,0 +1,35 @@ +export type WindsurfWorkflow = { + name: string + description: string + body: string +} + +export type WindsurfGeneratedSkill = { + name: string + content: string +} + +export type WindsurfSkillDir = { + name: string + sourceDir: string +} + +export type WindsurfMcpServerEntry = { + command?: string + args?: string[] + env?: Record<string, string> + serverUrl?: string + url?: string + headers?: Record<string, string> +} + +export type WindsurfMcpConfig = { + mcpServers: Record<string, WindsurfMcpServerEntry> +} + +export type WindsurfBundle = { + agentSkills: WindsurfGeneratedSkill[] + commandWorkflows: WindsurfWorkflow[] + skillDirs: WindsurfSkillDir[] + mcpConfig: WindsurfMcpConfig | null +} diff --git a/src/utils/codex-agents.ts b/src/utils/codex-agents.ts index 620e1ce..23cc05a 100644 --- a/src/utils/codex-agents.ts +++ b/src/utils/codex-agents.ts @@ -18,7 +18,7 @@ Tool mapping: - Glob: use rg --files or find - LS: use ls via shell_command - WebFetch/WebSearch: use curl or Context7 for library docs -- AskUserQuestion/Question: ask the user in chat +- AskUserQuestion/Question: present choices as a numbered list in chat and wait for a reply number. For multi-select (multiSelect: true), accept comma-separated numbers. Never skip or auto-configure — always wait for the user's response before proceeding. - Task/Subagent/Parallel: run sequentially in main thread; use multi_tool_use.parallel for tool calls - TodoWrite/TodoRead: use file-based todos in todos/ with file-todos skill - Skill: open the referenced SKILL.md and follow it diff --git a/src/utils/detect-tools.ts b/src/utils/detect-tools.ts new file mode 100644 index 0000000..f27bed5 --- /dev/null +++ b/src/utils/detect-tools.ts @@ -0,0 +1,37 @@ +import os from "os" +import { pathExists } from "./files" +import { syncTargets } from "../sync/registry" + +export type DetectedTool = { + name: string + detected: boolean + reason: string +} + +export async function detectInstalledTools( + home: string = os.homedir(), + cwd: string = process.cwd(), +): Promise<DetectedTool[]> { + const results: DetectedTool[] = [] + for (const target of syncTargets) { + let detected = false + let reason = "not found" + for (const p of target.detectPaths(home, cwd)) { + if (await pathExists(p)) { + detected = true + reason = `found ${p}` + break + } + } + results.push({ name: target.name, detected, reason }) + } + return results +} + +export async function getDetectedTargetNames( + home: string = os.homedir(), + cwd: string = process.cwd(), +): Promise<string[]> { + const tools = await detectInstalledTools(home, cwd) + return tools.filter((t) => t.detected).map((t) => t.name) +} diff --git a/src/utils/files.ts b/src/utils/files.ts index 9994d0c..8ca608a 100644 --- a/src/utils/files.ts +++ b/src/utils/files.ts @@ -41,11 +41,25 @@ export async function writeText(filePath: string, content: string): Promise<void await fs.writeFile(filePath, content, "utf8") } +export async function writeTextSecure(filePath: string, content: string): Promise<void> { + await ensureDir(path.dirname(filePath)) + await fs.writeFile(filePath, content, { encoding: "utf8", mode: 0o600 }) + await fs.chmod(filePath, 0o600) +} + export async function writeJson(filePath: string, data: unknown): Promise<void> { const content = JSON.stringify(data, null, 2) await writeText(filePath, content + "\n") } +/** Write JSON with restrictive permissions (0o600) for files containing secrets */ +export async function writeJsonSecure(filePath: string, data: unknown): Promise<void> { + const content = JSON.stringify(data, null, 2) + await ensureDir(path.dirname(filePath)) + await fs.writeFile(filePath, content + "\n", { encoding: "utf8", mode: 0o600 }) + await fs.chmod(filePath, 0o600) +} + export async function walkFiles(root: string): Promise<string[]> { const entries = await fs.readdir(root, { withFileTypes: true }) const results: string[] = [] @@ -61,6 +75,21 @@ export async function walkFiles(root: string): Promise<string[]> { return results } +/** + * Resolve a colon-separated command name into a filesystem path. + * e.g. resolveCommandPath("/commands", "ce:plan", ".md") -> "/commands/ce/plan.md" + * Creates intermediate directories as needed. + */ +export async function resolveCommandPath(dir: string, name: string, ext: string): Promise<string> { + const parts = name.split(":") + if (parts.length > 1) { + const nestedDir = path.join(dir, ...parts.slice(0, -1)) + await ensureDir(nestedDir) + return path.join(nestedDir, `${parts[parts.length - 1]}${ext}`) + } + return path.join(dir, `${name}${ext}`) +} + export async function copyDir(sourceDir: string, targetDir: string): Promise<void> { await ensureDir(targetDir) const entries = await fs.readdir(sourceDir, { withFileTypes: true }) diff --git a/src/utils/frontmatter.ts b/src/utils/frontmatter.ts index a799c94..dfe85bf 100644 --- a/src/utils/frontmatter.ts +++ b/src/utils/frontmatter.ts @@ -58,7 +58,7 @@ function formatYamlValue(value: unknown): string { if (raw.includes("\n")) { return `|\n${raw.split("\n").map((line) => ` ${line}`).join("\n")}` } - if (raw.includes(":") || raw.startsWith("[") || raw.startsWith("{")) { + if (raw.includes(":") || raw.startsWith("[") || raw.startsWith("{") || raw === "*") { return JSON.stringify(raw) } return raw diff --git a/src/utils/resolve-output.ts b/src/utils/resolve-output.ts new file mode 100644 index 0000000..724f142 --- /dev/null +++ b/src/utils/resolve-output.ts @@ -0,0 +1,50 @@ +import os from "os" +import path from "path" +import type { TargetScope } from "../targets" + +export function resolveTargetOutputRoot(options: { + targetName: string + outputRoot: string + codexHome: string + piHome: string + openclawHome?: string + qwenHome?: string + pluginName?: string + hasExplicitOutput: boolean + scope?: TargetScope +}): string { + const { targetName, outputRoot, codexHome, piHome, openclawHome, qwenHome, pluginName, hasExplicitOutput, scope } = options + if (targetName === "codex") return codexHome + if (targetName === "pi") return piHome + if (targetName === "droid") return path.join(os.homedir(), ".factory") + if (targetName === "cursor") { + const base = hasExplicitOutput ? outputRoot : process.cwd() + return path.join(base, ".cursor") + } + if (targetName === "gemini") { + const base = hasExplicitOutput ? outputRoot : process.cwd() + return path.join(base, ".gemini") + } + if (targetName === "copilot") { + const base = hasExplicitOutput ? outputRoot : process.cwd() + return path.join(base, ".github") + } + if (targetName === "kiro") { + const base = hasExplicitOutput ? outputRoot : process.cwd() + return path.join(base, ".kiro") + } + if (targetName === "windsurf") { + if (hasExplicitOutput) return outputRoot + if (scope === "global") return path.join(os.homedir(), ".codeium", "windsurf") + return path.join(process.cwd(), ".windsurf") + } + if (targetName === "openclaw") { + const home = openclawHome ?? path.join(os.homedir(), ".openclaw", "extensions") + return path.join(home, pluginName ?? "plugin") + } + if (targetName === "qwen") { + const home = qwenHome ?? path.join(os.homedir(), ".qwen", "extensions") + return path.join(home, pluginName ?? "plugin") + } + return outputRoot +} diff --git a/src/utils/secrets.ts b/src/utils/secrets.ts new file mode 100644 index 0000000..45f196d --- /dev/null +++ b/src/utils/secrets.ts @@ -0,0 +1,24 @@ +export const SENSITIVE_PATTERN = /key|token|secret|password|credential|api_key/i + +/** Check if any MCP servers have env vars that might contain secrets */ +export function hasPotentialSecrets( + servers: Record<string, { env?: Record<string, string> }>, +): boolean { + for (const server of Object.values(servers)) { + if (server.env) { + for (const key of Object.keys(server.env)) { + if (SENSITIVE_PATTERN.test(key)) return true + } + } + } + return false +} + +/** Return names of MCP servers whose env vars may contain secrets */ +export function findServersWithPotentialSecrets( + servers: Record<string, { env?: Record<string, string> }>, +): string[] { + return Object.entries(servers) + .filter(([, s]) => s.env && Object.keys(s.env).some((k) => SENSITIVE_PATTERN.test(k))) + .map(([name]) => name) +} diff --git a/src/utils/symlink.ts b/src/utils/symlink.ts index 8855adb..9268cad 100644 --- a/src/utils/symlink.ts +++ b/src/utils/symlink.ts @@ -2,7 +2,7 @@ import fs from "fs/promises" /** * Create a symlink, safely replacing any existing symlink at target. - * Only removes existing symlinks - refuses to delete real directories. + * Only removes existing symlinks - skips real directories with a warning. */ export async function forceSymlink(source: string, target: string): Promise<void> { try { @@ -11,11 +11,9 @@ export async function forceSymlink(source: string, target: string): Promise<void // Safe to remove existing symlink await fs.unlink(target) } else if (stat.isDirectory()) { - // Refuse to delete real directories - throw new Error( - `Cannot create symlink at ${target}: a real directory exists there. ` + - `Remove it manually if you want to replace it with a symlink.` - ) + // Skip real directories rather than deleting them + console.warn(`Skipping ${target}: a real directory exists there (remove it manually to replace with a symlink).`) + return } else { // Regular file - remove it await fs.unlink(target) diff --git a/tests/claude-home.test.ts b/tests/claude-home.test.ts new file mode 100644 index 0000000..499160d --- /dev/null +++ b/tests/claude-home.test.ts @@ -0,0 +1,46 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import os from "os" +import path from "path" +import { loadClaudeHome } from "../src/parsers/claude-home" + +describe("loadClaudeHome", () => { + test("loads personal skills, commands, and MCP servers", async () => { + const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "claude-home-")) + const skillDir = path.join(tempHome, "skills", "reviewer") + const commandsDir = path.join(tempHome, "commands") + + await fs.mkdir(skillDir, { recursive: true }) + await fs.writeFile(path.join(skillDir, "SKILL.md"), "---\nname: reviewer\n---\nReview things.\n") + + await fs.mkdir(path.join(commandsDir, "workflows"), { recursive: true }) + await fs.writeFile( + path.join(commandsDir, "workflows", "plan.md"), + "---\ndescription: Planning command\nargument-hint: \"[feature]\"\n---\nPlan the work.\n", + ) + await fs.writeFile( + path.join(commandsDir, "custom.md"), + "---\nname: custom-command\ndescription: Custom command\nallowed-tools: Bash, Read\n---\nDo custom work.\n", + ) + + await fs.writeFile( + path.join(tempHome, "settings.json"), + JSON.stringify({ + mcpServers: { + context7: { url: "https://mcp.context7.com/mcp" }, + }, + }), + ) + + const config = await loadClaudeHome(tempHome) + + expect(config.skills.map((skill) => skill.name)).toEqual(["reviewer"]) + expect(config.commands?.map((command) => command.name)).toEqual([ + "custom-command", + "workflows:plan", + ]) + expect(config.commands?.find((command) => command.name === "workflows:plan")?.argumentHint).toBe("[feature]") + expect(config.commands?.find((command) => command.name === "custom-command")?.allowedTools).toEqual(["Bash", "Read"]) + expect(config.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp") + }) +}) diff --git a/tests/cli.test.ts b/tests/cli.test.ts index 49c20a6..390d06c 100644 --- a/tests/cli.test.ts +++ b/tests/cli.test.ts @@ -426,4 +426,184 @@ describe("CLI", () => { expect(await exists(path.join(piRoot, "prompts", "workflows-review.md"))).toBe(true) expect(await exists(path.join(piRoot, "extensions", "compound-engineering-compat.ts"))).toBe(true) }) + + test("install --to opencode uses permissions:none by default", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "cli-perms-none-")) + const fixtureRoot = path.join(import.meta.dir, "fixtures", "sample-plugin") + + const proc = Bun.spawn([ + "bun", + "run", + "src/index.ts", + "install", + fixtureRoot, + "--to", + "opencode", + "--output", + tempRoot, + ], { + cwd: path.join(import.meta.dir, ".."), + stdout: "pipe", + stderr: "pipe", + }) + + const exitCode = await proc.exited + const stdout = await new Response(proc.stdout).text() + const stderr = await new Response(proc.stderr).text() + + if (exitCode !== 0) { + throw new Error(`CLI failed (exit ${exitCode}).\nstdout: ${stdout}\nstderr: ${stderr}`) + } + + expect(stdout).toContain("Installed compound-engineering") + + const opencodeJsonPath = path.join(tempRoot, "opencode.json") + const content = await fs.readFile(opencodeJsonPath, "utf-8") + const json = JSON.parse(content) + + expect(json).not.toHaveProperty("permission") + expect(json).not.toHaveProperty("tools") + }) + + test("install --to opencode --permissions broad writes permission block", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "cli-perms-broad-")) + const fixtureRoot = path.join(import.meta.dir, "fixtures", "sample-plugin") + + const proc = Bun.spawn([ + "bun", + "run", + "src/index.ts", + "install", + fixtureRoot, + "--to", + "opencode", + "--permissions", + "broad", + "--output", + tempRoot, + ], { + cwd: path.join(import.meta.dir, ".."), + stdout: "pipe", + stderr: "pipe", + }) + + const exitCode = await proc.exited + const stdout = await new Response(proc.stdout).text() + const stderr = await new Response(proc.stderr).text() + + if (exitCode !== 0) { + throw new Error(`CLI failed (exit ${exitCode}).\nstdout: ${stdout}\nstderr: ${stderr}`) + } + + expect(stdout).toContain("Installed compound-engineering") + + const opencodeJsonPath = path.join(tempRoot, "opencode.json") + const content = await fs.readFile(opencodeJsonPath, "utf-8") + const json = JSON.parse(content) + + expect(json).toHaveProperty("permission") + expect(json.permission).not.toBeNull() + }) + + test("sync --target all detects new sync targets and ignores stale cursor directories", async () => { + const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "cli-sync-home-")) + const tempCwd = await fs.mkdtemp(path.join(os.tmpdir(), "cli-sync-cwd-")) + const repoRoot = path.join(import.meta.dir, "..") + const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") + const claudeSkillsDir = path.join(tempHome, ".claude", "skills", "skill-one") + const claudeCommandsDir = path.join(tempHome, ".claude", "commands", "workflows") + + await fs.mkdir(path.dirname(claudeSkillsDir), { recursive: true }) + await fs.cp(fixtureSkillDir, claudeSkillsDir, { recursive: true }) + await fs.mkdir(claudeCommandsDir, { recursive: true }) + await fs.writeFile( + path.join(claudeCommandsDir, "plan.md"), + [ + "---", + "name: workflows:plan", + "description: Plan work", + "argument-hint: \"[goal]\"", + "---", + "", + "Plan the work.", + ].join("\n"), + ) + await fs.writeFile( + path.join(tempHome, ".claude", "settings.json"), + JSON.stringify({ + mcpServers: { + local: { command: "echo", args: ["hello"] }, + remote: { url: "https://example.com/mcp" }, + legacy: { type: "sse", url: "https://example.com/sse" }, + }, + }, null, 2), + ) + + await fs.mkdir(path.join(tempHome, ".config", "opencode"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".codex"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".pi"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".factory"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".copilot"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".gemini"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".codeium", "windsurf"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".kiro"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".qwen"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".openclaw"), { recursive: true }) + await fs.mkdir(path.join(tempCwd, ".cursor"), { recursive: true }) + + const proc = Bun.spawn([ + "bun", + "run", + path.join(repoRoot, "src", "index.ts"), + "sync", + "--target", + "all", + ], { + cwd: tempCwd, + stdout: "pipe", + stderr: "pipe", + env: { + ...process.env, + HOME: tempHome, + }, + }) + + const exitCode = await proc.exited + const stdout = await new Response(proc.stdout).text() + const stderr = await new Response(proc.stderr).text() + + if (exitCode !== 0) { + throw new Error(`CLI failed (exit ${exitCode}).\nstdout: ${stdout}\nstderr: ${stderr}`) + } + + expect(stdout).toContain("Synced to codex") + expect(stdout).toContain("Synced to opencode") + expect(stdout).toContain("Synced to pi") + expect(stdout).toContain("Synced to droid") + expect(stdout).toContain("Synced to windsurf") + expect(stdout).toContain("Synced to kiro") + expect(stdout).toContain("Synced to qwen") + expect(stdout).toContain("Synced to openclaw") + expect(stdout).toContain("Synced to copilot") + expect(stdout).toContain("Synced to gemini") + expect(stdout).not.toContain("cursor") + + expect(await exists(path.join(tempHome, ".config", "opencode", "commands", "workflows:plan.md"))).toBe(true) + expect(await exists(path.join(tempHome, ".codex", "config.toml"))).toBe(true) + expect(await exists(path.join(tempHome, ".codex", "prompts", "workflows-plan.md"))).toBe(true) + expect(await exists(path.join(tempHome, ".codex", "skills", "workflows-plan", "SKILL.md"))).toBe(true) + expect(await exists(path.join(tempHome, ".pi", "agent", "prompts", "workflows-plan.md"))).toBe(true) + expect(await exists(path.join(tempHome, ".factory", "commands", "plan.md"))).toBe(true) + expect(await exists(path.join(tempHome, ".codeium", "windsurf", "mcp_config.json"))).toBe(true) + expect(await exists(path.join(tempHome, ".codeium", "windsurf", "global_workflows", "workflows-plan.md"))).toBe(true) + expect(await exists(path.join(tempHome, ".kiro", "settings", "mcp.json"))).toBe(true) + expect(await exists(path.join(tempHome, ".kiro", "skills", "workflows-plan", "SKILL.md"))).toBe(true) + expect(await exists(path.join(tempHome, ".qwen", "settings.json"))).toBe(true) + expect(await exists(path.join(tempHome, ".qwen", "commands", "workflows", "plan.md"))).toBe(true) + expect(await exists(path.join(tempHome, ".copilot", "mcp-config.json"))).toBe(true) + expect(await exists(path.join(tempHome, ".copilot", "skills", "workflows-plan", "SKILL.md"))).toBe(true) + expect(await exists(path.join(tempHome, ".gemini", "settings.json"))).toBe(true) + expect(await exists(path.join(tempHome, ".gemini", "commands", "workflows", "plan.toml"))).toBe(true) + expect(await exists(path.join(tempHome, ".openclaw", "skills", "skill-one"))).toBe(true) + }) }) diff --git a/tests/converter.test.ts b/tests/converter.test.ts index 3b3053e..dfac9ab 100644 --- a/tests/converter.test.ts +++ b/tests/converter.test.ts @@ -8,7 +8,7 @@ import type { ClaudePlugin } from "../src/types/claude" const fixtureRoot = path.join(import.meta.dir, "fixtures", "sample-plugin") describe("convertClaudeToOpenCode", () => { - test("maps commands, permissions, and agents", async () => { + test("from-command mode: map allowedTools to global permission block", async () => { const plugin = await loadClaudePlugin(fixtureRoot) const bundle = convertClaudeToOpenCode(plugin, { agentMode: "subagent", @@ -16,8 +16,9 @@ describe("convertClaudeToOpenCode", () => { permissions: "from-commands", }) - expect(bundle.config.command?.["workflows:review"]).toBeDefined() - expect(bundle.config.command?.["plan_review"]).toBeDefined() + expect(bundle.config.command).toBeUndefined() + expect(bundle.commandFiles.find((f) => f.name === "workflows:review")).toBeDefined() + expect(bundle.commandFiles.find((f) => f.name === "plan_review")).toBeDefined() const permission = bundle.config.permission as Record<string, string | Record<string, string>> expect(Object.keys(permission).sort()).toEqual([ @@ -71,8 +72,10 @@ describe("convertClaudeToOpenCode", () => { expect(parsed.data.model).toBe("anthropic/claude-sonnet-4-20250514") expect(parsed.data.temperature).toBe(0.1) - const modelCommand = bundle.config.command?.["workflows:work"] - expect(modelCommand?.model).toBe("openai/gpt-4o") + const modelCommand = bundle.commandFiles.find((f) => f.name === "workflows:work") + expect(modelCommand).toBeDefined() + const commandParsed = parseFrontmatter(modelCommand!.content) + expect(commandParsed.data.model).toBe("openai/gpt-4o") }) test("resolves bare Claude model aliases to full IDs", () => { @@ -129,6 +132,18 @@ describe("convertClaudeToOpenCode", () => { expect(hookFile!.content).toContain("// timeout: 30s") expect(hookFile!.content).toContain("// Prompt hook for Write|Edit") expect(hookFile!.content).toContain("// Agent hook for Write|Edit: security-sentinel") + + // PreToolUse (tool.execute.before) handlers are wrapped in try-catch + // to prevent hook failures from crashing parallel tool call batches (#85) + const beforeIdx = hookFile!.content.indexOf('"tool.execute.before"') + const afterIdx = hookFile!.content.indexOf('"tool.execute.after"') + const beforeBlock = hookFile!.content.slice(beforeIdx, afterIdx) + expect(beforeBlock).toContain("try {") + expect(beforeBlock).toContain("} catch (err) {") + + // PostToolUse (tool.execute.after) handlers are NOT wrapped in try-catch + const afterBlock = hookFile!.content.slice(afterIdx, hookFile!.content.indexOf('"session.created"')) + expect(afterBlock).not.toContain("try {") }) test("converts MCP servers", async () => { @@ -199,7 +214,7 @@ describe("convertClaudeToOpenCode", () => { expect(parsed.data.mode).toBe("primary") }) - test("excludes commands with disable-model-invocation from command map", async () => { + test("excludes commands with disable-model-invocation from commandFiles", async () => { const plugin = await loadClaudePlugin(fixtureRoot) const bundle = convertClaudeToOpenCode(plugin, { agentMode: "subagent", @@ -208,10 +223,10 @@ describe("convertClaudeToOpenCode", () => { }) // deploy-docs has disable-model-invocation: true, should be excluded - expect(bundle.config.command?.["deploy-docs"]).toBeUndefined() + expect(bundle.commandFiles.find((f) => f.name === "deploy-docs")).toBeUndefined() // Normal commands should still be present - expect(bundle.config.command?.["workflows:review"]).toBeDefined() + expect(bundle.commandFiles.find((f) => f.name === "workflows:review")).toBeDefined() }) test("rewrites .claude/ paths to .opencode/ in command bodies", () => { @@ -240,10 +255,11 @@ Run \`/compound-engineering-setup\` to create a settings file.`, permissions: "none", }) - const template = bundle.config.command?.["review"]?.template ?? "" + const commandFile = bundle.commandFiles.find((f) => f.name === "review") + expect(commandFile).toBeDefined() // Tool-agnostic path in project root — no rewriting needed - expect(template).toContain("compound-engineering.local.md") + expect(commandFile!.content).toContain("compound-engineering.local.md") }) test("rewrites .claude/ paths in agent bodies", () => { @@ -273,4 +289,33 @@ Run \`/compound-engineering-setup\` to create a settings file.`, // Tool-agnostic path in project root — no rewriting needed expect(agentFile!.content).toContain("compound-engineering.local.md") }) + + test("command .md files include description in frontmatter", () => { + const plugin: ClaudePlugin = { + root: "/tmp/plugin", + manifest: { name: "fixture", version: "1.0.0" }, + agents: [], + commands: [ + { + name: "test-cmd", + description: "Test description", + body: "Do the thing", + sourcePath: "/tmp/plugin/commands/test-cmd.md", + }, + ], + skills: [], + } + + const bundle = convertClaudeToOpenCode(plugin, { + agentMode: "subagent", + inferTemperature: false, + permissions: "none", + }) + + const commandFile = bundle.commandFiles.find((f) => f.name === "test-cmd") + expect(commandFile).toBeDefined() + const parsed = parseFrontmatter(commandFile!.content) + expect(parsed.data.description).toBe("Test description") + expect(parsed.body).toContain("Do the thing") + }) }) diff --git a/tests/copilot-converter.test.ts b/tests/copilot-converter.test.ts new file mode 100644 index 0000000..22f7973 --- /dev/null +++ b/tests/copilot-converter.test.ts @@ -0,0 +1,467 @@ +import { describe, expect, test, spyOn } from "bun:test" +import { convertClaudeToCopilot, transformContentForCopilot } from "../src/converters/claude-to-copilot" +import { parseFrontmatter } from "../src/utils/frontmatter" +import type { ClaudePlugin } from "../src/types/claude" + +const fixturePlugin: ClaudePlugin = { + root: "/tmp/plugin", + manifest: { name: "fixture", version: "1.0.0" }, + agents: [ + { + name: "Security Reviewer", + description: "Security-focused code review agent", + capabilities: ["Threat modeling", "OWASP"], + model: "claude-sonnet-4-20250514", + body: "Focus on vulnerabilities.", + sourcePath: "/tmp/plugin/agents/security-reviewer.md", + }, + ], + commands: [ + { + name: "workflows:plan", + description: "Planning command", + argumentHint: "[FOCUS]", + model: "inherit", + allowedTools: ["Read"], + body: "Plan the work.", + sourcePath: "/tmp/plugin/commands/workflows/plan.md", + }, + ], + skills: [ + { + name: "existing-skill", + description: "Existing skill", + sourceDir: "/tmp/plugin/skills/existing-skill", + skillPath: "/tmp/plugin/skills/existing-skill/SKILL.md", + }, + ], + hooks: undefined, + mcpServers: undefined, +} + +const defaultOptions = { + agentMode: "subagent" as const, + inferTemperature: false, + permissions: "none" as const, +} + +describe("convertClaudeToCopilot", () => { + test("converts agents to .agent.md with Copilot frontmatter", () => { + const bundle = convertClaudeToCopilot(fixturePlugin, defaultOptions) + + expect(bundle.agents).toHaveLength(1) + const agent = bundle.agents[0] + expect(agent.name).toBe("security-reviewer") + + const parsed = parseFrontmatter(agent.content) + expect(parsed.data.description).toBe("Security-focused code review agent") + expect(parsed.data.tools).toEqual(["*"]) + expect(parsed.data.infer).toBe(true) + expect(parsed.body).toContain("Capabilities") + expect(parsed.body).toContain("Threat modeling") + expect(parsed.body).toContain("Focus on vulnerabilities.") + }) + + test("agent description is required, fallback generated if missing", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { + name: "basic-agent", + body: "Do things.", + sourcePath: "/tmp/plugin/agents/basic.md", + }, + ], + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + const parsed = parseFrontmatter(bundle.agents[0].content) + expect(parsed.data.description).toBe("Converted from Claude agent basic-agent") + }) + + test("agent with empty body gets default body", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { + name: "empty-agent", + description: "Empty agent", + body: "", + sourcePath: "/tmp/plugin/agents/empty.md", + }, + ], + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + const parsed = parseFrontmatter(bundle.agents[0].content) + expect(parsed.body).toContain("Instructions converted from the empty-agent agent.") + }) + + test("agent capabilities are prepended to body", () => { + const bundle = convertClaudeToCopilot(fixturePlugin, defaultOptions) + const parsed = parseFrontmatter(bundle.agents[0].content) + expect(parsed.body).toMatch(/## Capabilities\n- Threat modeling\n- OWASP/) + }) + + test("agent model field is passed through", () => { + const bundle = convertClaudeToCopilot(fixturePlugin, defaultOptions) + const parsed = parseFrontmatter(bundle.agents[0].content) + expect(parsed.data.model).toBe("claude-sonnet-4-20250514") + }) + + test("agent without model omits model field", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { + name: "no-model", + description: "No model agent", + body: "Content.", + sourcePath: "/tmp/plugin/agents/no-model.md", + }, + ], + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + const parsed = parseFrontmatter(bundle.agents[0].content) + expect(parsed.data.model).toBeUndefined() + }) + + test("agent tools defaults to [*]", () => { + const bundle = convertClaudeToCopilot(fixturePlugin, defaultOptions) + const parsed = parseFrontmatter(bundle.agents[0].content) + expect(parsed.data.tools).toEqual(["*"]) + }) + + test("agent infer defaults to true", () => { + const bundle = convertClaudeToCopilot(fixturePlugin, defaultOptions) + const parsed = parseFrontmatter(bundle.agents[0].content) + expect(parsed.data.infer).toBe(true) + }) + + test("warns when agent body exceeds 30k characters", () => { + const warnSpy = spyOn(console, "warn").mockImplementation(() => {}) + + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { + name: "large-agent", + description: "Large agent", + body: "x".repeat(31_000), + sourcePath: "/tmp/plugin/agents/large.md", + }, + ], + commands: [], + skills: [], + } + + convertClaudeToCopilot(plugin, defaultOptions) + expect(warnSpy).toHaveBeenCalledWith( + expect.stringContaining("exceeds 30000 characters"), + ) + + warnSpy.mockRestore() + }) + + test("converts commands to skills with SKILL.md format", () => { + const bundle = convertClaudeToCopilot(fixturePlugin, defaultOptions) + + expect(bundle.generatedSkills).toHaveLength(1) + const skill = bundle.generatedSkills[0] + expect(skill.name).toBe("workflows-plan") + + const parsed = parseFrontmatter(skill.content) + expect(parsed.data.name).toBe("workflows-plan") + expect(parsed.data.description).toBe("Planning command") + expect(parsed.body).toContain("Plan the work.") + }) + + test("preserves namespaced command names with hyphens", () => { + const bundle = convertClaudeToCopilot(fixturePlugin, defaultOptions) + expect(bundle.generatedSkills[0].name).toBe("workflows-plan") + }) + + test("command name collision after normalization is deduplicated", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + commands: [ + { + name: "workflows:plan", + description: "Workflow plan", + body: "Plan body.", + sourcePath: "/tmp/plugin/commands/workflows/plan.md", + }, + { + name: "workflows:plan", + description: "Duplicate plan", + body: "Duplicate body.", + sourcePath: "/tmp/plugin/commands/workflows/plan2.md", + }, + ], + agents: [], + skills: [], + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + const names = bundle.generatedSkills.map((s) => s.name) + expect(names).toEqual(["workflows-plan", "workflows-plan-2"]) + }) + + test("namespaced and non-namespaced commands produce distinct names", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + commands: [ + { + name: "workflows:plan", + description: "Workflow plan", + body: "Plan body.", + sourcePath: "/tmp/plugin/commands/workflows/plan.md", + }, + { + name: "plan", + description: "Top-level plan", + body: "Top plan body.", + sourcePath: "/tmp/plugin/commands/plan.md", + }, + ], + agents: [], + skills: [], + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + const names = bundle.generatedSkills.map((s) => s.name) + expect(names).toEqual(["workflows-plan", "plan"]) + }) + + test("command allowedTools is silently dropped", () => { + const bundle = convertClaudeToCopilot(fixturePlugin, defaultOptions) + const skill = bundle.generatedSkills[0] + expect(skill.content).not.toContain("allowedTools") + expect(skill.content).not.toContain("allowed-tools") + }) + + test("command with argument-hint gets Arguments section", () => { + const bundle = convertClaudeToCopilot(fixturePlugin, defaultOptions) + const skill = bundle.generatedSkills[0] + expect(skill.content).toContain("## Arguments") + expect(skill.content).toContain("[FOCUS]") + }) + + test("passes through skill directories", () => { + const bundle = convertClaudeToCopilot(fixturePlugin, defaultOptions) + + expect(bundle.skillDirs).toHaveLength(1) + expect(bundle.skillDirs[0].name).toBe("existing-skill") + expect(bundle.skillDirs[0].sourceDir).toBe("/tmp/plugin/skills/existing-skill") + }) + + test("skill and generated skill name collision is deduplicated", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + commands: [ + { + name: "existing-skill", + description: "Colliding command", + body: "This collides with skill name.", + sourcePath: "/tmp/plugin/commands/existing-skill.md", + }, + ], + agents: [], + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + // The command should get deduplicated since the skill name is reserved + expect(bundle.generatedSkills[0].name).toBe("existing-skill-2") + expect(bundle.skillDirs[0].name).toBe("existing-skill") + }) + + test("converts MCP servers with COPILOT_MCP_ prefix", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [], + commands: [], + skills: [], + mcpServers: { + playwright: { + command: "npx", + args: ["-y", "@anthropic/mcp-playwright"], + env: { DISPLAY: ":0", API_KEY: "secret" }, + }, + }, + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + expect(bundle.mcpConfig).toBeDefined() + expect(bundle.mcpConfig!.playwright.type).toBe("local") + expect(bundle.mcpConfig!.playwright.command).toBe("npx") + expect(bundle.mcpConfig!.playwright.args).toEqual(["-y", "@anthropic/mcp-playwright"]) + expect(bundle.mcpConfig!.playwright.tools).toEqual(["*"]) + expect(bundle.mcpConfig!.playwright.env).toEqual({ + COPILOT_MCP_DISPLAY: ":0", + COPILOT_MCP_API_KEY: "secret", + }) + }) + + test("MCP env vars already prefixed are not double-prefixed", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [], + commands: [], + skills: [], + mcpServers: { + server: { + command: "node", + args: ["server.js"], + env: { COPILOT_MCP_TOKEN: "abc" }, + }, + }, + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + expect(bundle.mcpConfig!.server.env).toEqual({ COPILOT_MCP_TOKEN: "abc" }) + }) + + test("MCP servers get type field (local vs sse)", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [], + commands: [], + skills: [], + mcpServers: { + local: { command: "npx", args: ["server"] }, + remote: { url: "https://mcp.example.com/sse" }, + }, + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + expect(bundle.mcpConfig!.local.type).toBe("local") + expect(bundle.mcpConfig!.remote.type).toBe("sse") + }) + + test("MCP headers pass through for remote servers", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [], + commands: [], + skills: [], + mcpServers: { + remote: { + url: "https://mcp.example.com/sse", + headers: { Authorization: "Bearer token" }, + }, + }, + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + expect(bundle.mcpConfig!.remote.url).toBe("https://mcp.example.com/sse") + expect(bundle.mcpConfig!.remote.headers).toEqual({ Authorization: "Bearer token" }) + }) + + test("warns when hooks are present", () => { + const warnSpy = spyOn(console, "warn").mockImplementation(() => {}) + + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [], + commands: [], + skills: [], + hooks: { + hooks: { + PreToolUse: [{ matcher: "Bash", hooks: [{ type: "command", command: "echo test" }] }], + }, + }, + } + + convertClaudeToCopilot(plugin, defaultOptions) + expect(warnSpy).toHaveBeenCalledWith( + "Warning: Copilot does not support hooks. Hooks were skipped during conversion.", + ) + + warnSpy.mockRestore() + }) + + test("no warning when hooks are absent", () => { + const warnSpy = spyOn(console, "warn").mockImplementation(() => {}) + + convertClaudeToCopilot(fixturePlugin, defaultOptions) + expect(warnSpy).not.toHaveBeenCalled() + + warnSpy.mockRestore() + }) + + test("plugin with zero agents produces empty agents array", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [], + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + expect(bundle.agents).toHaveLength(0) + }) + + test("plugin with only skills works", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [], + commands: [], + } + + const bundle = convertClaudeToCopilot(plugin, defaultOptions) + expect(bundle.agents).toHaveLength(0) + expect(bundle.generatedSkills).toHaveLength(0) + expect(bundle.skillDirs).toHaveLength(1) + }) +}) + +describe("transformContentForCopilot", () => { + test("rewrites .claude/ paths to .github/", () => { + const input = "Read `.claude/compound-engineering.local.md` for config." + const result = transformContentForCopilot(input) + expect(result).toContain(".github/compound-engineering.local.md") + expect(result).not.toContain(".claude/") + }) + + test("rewrites ~/.claude/ paths to ~/.copilot/", () => { + const input = "Global config at ~/.claude/settings.json" + const result = transformContentForCopilot(input) + expect(result).toContain("~/.copilot/settings.json") + expect(result).not.toContain("~/.claude/") + }) + + test("transforms Task agent calls to skill references", () => { + const input = `Run agents: + +- Task repo-research-analyst(feature_description) +- Task learnings-researcher(feature_description) + +Task best-practices-researcher(topic)` + + const result = transformContentForCopilot(input) + expect(result).toContain("Use the repo-research-analyst skill to: feature_description") + expect(result).toContain("Use the learnings-researcher skill to: feature_description") + expect(result).toContain("Use the best-practices-researcher skill to: topic") + expect(result).not.toContain("Task repo-research-analyst(") + }) + + test("replaces colons with hyphens in slash commands", () => { + const input = `1. Run /deepen-plan to enhance +2. Start /workflows:work to implement +3. File at /tmp/output.md` + + const result = transformContentForCopilot(input) + expect(result).toContain("/deepen-plan") + expect(result).toContain("/workflows-work") + expect(result).not.toContain("/workflows:work") + // File paths preserved + expect(result).toContain("/tmp/output.md") + }) + + test("transforms @agent references to agent references", () => { + const input = "Have @security-sentinel and @dhh-rails-reviewer check the code." + const result = transformContentForCopilot(input) + expect(result).toContain("the security-sentinel agent") + expect(result).toContain("the dhh-rails-reviewer agent") + expect(result).not.toContain("@security-sentinel") + }) +}) diff --git a/tests/copilot-writer.test.ts b/tests/copilot-writer.test.ts new file mode 100644 index 0000000..6c430a1 --- /dev/null +++ b/tests/copilot-writer.test.ts @@ -0,0 +1,189 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import path from "path" +import os from "os" +import { writeCopilotBundle } from "../src/targets/copilot" +import type { CopilotBundle } from "../src/types/copilot" + +async function exists(filePath: string): Promise<boolean> { + try { + await fs.access(filePath) + return true + } catch { + return false + } +} + +describe("writeCopilotBundle", () => { + test("writes agents, generated skills, copied skills, and MCP config", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-test-")) + const bundle: CopilotBundle = { + agents: [ + { + name: "security-reviewer", + content: "---\ndescription: Security\ntools:\n - '*'\ninfer: true\n---\n\nReview code.", + }, + ], + generatedSkills: [ + { + name: "plan", + content: "---\nname: plan\ndescription: Planning\n---\n\nPlan the work.", + }, + ], + skillDirs: [ + { + name: "skill-one", + sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"), + }, + ], + mcpConfig: { + playwright: { + type: "local", + command: "npx", + args: ["-y", "@anthropic/mcp-playwright"], + tools: ["*"], + }, + }, + } + + await writeCopilotBundle(tempRoot, bundle) + + expect(await exists(path.join(tempRoot, ".github", "agents", "security-reviewer.agent.md"))).toBe(true) + expect(await exists(path.join(tempRoot, ".github", "skills", "plan", "SKILL.md"))).toBe(true) + expect(await exists(path.join(tempRoot, ".github", "skills", "skill-one", "SKILL.md"))).toBe(true) + expect(await exists(path.join(tempRoot, ".github", "copilot-mcp-config.json"))).toBe(true) + + const agentContent = await fs.readFile( + path.join(tempRoot, ".github", "agents", "security-reviewer.agent.md"), + "utf8", + ) + expect(agentContent).toContain("Review code.") + + const skillContent = await fs.readFile( + path.join(tempRoot, ".github", "skills", "plan", "SKILL.md"), + "utf8", + ) + expect(skillContent).toContain("Plan the work.") + + const mcpContent = JSON.parse( + await fs.readFile(path.join(tempRoot, ".github", "copilot-mcp-config.json"), "utf8"), + ) + expect(mcpContent.mcpServers.playwright.command).toBe("npx") + }) + + test("agents use .agent.md file extension", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-ext-")) + const bundle: CopilotBundle = { + agents: [{ name: "test-agent", content: "Agent content" }], + generatedSkills: [], + skillDirs: [], + } + + await writeCopilotBundle(tempRoot, bundle) + + expect(await exists(path.join(tempRoot, ".github", "agents", "test-agent.agent.md"))).toBe(true) + // Should NOT create a plain .md file + expect(await exists(path.join(tempRoot, ".github", "agents", "test-agent.md"))).toBe(false) + }) + + test("writes directly into .github output root without double-nesting", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-home-")) + const githubRoot = path.join(tempRoot, ".github") + const bundle: CopilotBundle = { + agents: [{ name: "reviewer", content: "Reviewer agent content" }], + generatedSkills: [{ name: "plan", content: "Plan content" }], + skillDirs: [], + } + + await writeCopilotBundle(githubRoot, bundle) + + expect(await exists(path.join(githubRoot, "agents", "reviewer.agent.md"))).toBe(true) + expect(await exists(path.join(githubRoot, "skills", "plan", "SKILL.md"))).toBe(true) + // Should NOT double-nest under .github/.github + expect(await exists(path.join(githubRoot, ".github"))).toBe(false) + }) + + test("handles empty bundles gracefully", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-empty-")) + const bundle: CopilotBundle = { + agents: [], + generatedSkills: [], + skillDirs: [], + } + + await writeCopilotBundle(tempRoot, bundle) + expect(await exists(tempRoot)).toBe(true) + }) + + test("writes multiple agents as separate .agent.md files", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-multi-")) + const githubRoot = path.join(tempRoot, ".github") + const bundle: CopilotBundle = { + agents: [ + { name: "security-sentinel", content: "Security rules" }, + { name: "performance-oracle", content: "Performance rules" }, + { name: "code-simplicity-reviewer", content: "Simplicity rules" }, + ], + generatedSkills: [], + skillDirs: [], + } + + await writeCopilotBundle(githubRoot, bundle) + + expect(await exists(path.join(githubRoot, "agents", "security-sentinel.agent.md"))).toBe(true) + expect(await exists(path.join(githubRoot, "agents", "performance-oracle.agent.md"))).toBe(true) + expect(await exists(path.join(githubRoot, "agents", "code-simplicity-reviewer.agent.md"))).toBe(true) + }) + + test("backs up existing copilot-mcp-config.json before overwriting", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-backup-")) + const githubRoot = path.join(tempRoot, ".github") + await fs.mkdir(githubRoot, { recursive: true }) + + // Write an existing config + const mcpPath = path.join(githubRoot, "copilot-mcp-config.json") + await fs.writeFile(mcpPath, JSON.stringify({ mcpServers: { old: { type: "local", command: "old-cmd", tools: ["*"] } } })) + + const bundle: CopilotBundle = { + agents: [], + generatedSkills: [], + skillDirs: [], + mcpConfig: { + newServer: { type: "local", command: "new-cmd", tools: ["*"] }, + }, + } + + await writeCopilotBundle(githubRoot, bundle) + + // New config should have the new content + const newContent = JSON.parse(await fs.readFile(mcpPath, "utf8")) + expect(newContent.mcpServers.newServer.command).toBe("new-cmd") + + // A backup file should exist + const files = await fs.readdir(githubRoot) + const backupFiles = files.filter((f) => f.startsWith("copilot-mcp-config.json.bak.")) + expect(backupFiles.length).toBeGreaterThanOrEqual(1) + }) + + test("creates skill directories with SKILL.md", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-genskill-")) + const bundle: CopilotBundle = { + agents: [], + generatedSkills: [ + { + name: "deploy", + content: "---\nname: deploy\ndescription: Deploy skill\n---\n\nDeploy steps.", + }, + ], + skillDirs: [], + } + + await writeCopilotBundle(tempRoot, bundle) + + const skillPath = path.join(tempRoot, ".github", "skills", "deploy", "SKILL.md") + expect(await exists(skillPath)).toBe(true) + + const content = await fs.readFile(skillPath, "utf8") + expect(content).toContain("Deploy steps.") + }) +}) diff --git a/tests/cursor-converter.test.ts b/tests/cursor-converter.test.ts deleted file mode 100644 index 9e3adaf..0000000 --- a/tests/cursor-converter.test.ts +++ /dev/null @@ -1,347 +0,0 @@ -import { describe, expect, test, spyOn } from "bun:test" -import { convertClaudeToCursor, transformContentForCursor } from "../src/converters/claude-to-cursor" -import { parseFrontmatter } from "../src/utils/frontmatter" -import type { ClaudePlugin } from "../src/types/claude" - -const fixturePlugin: ClaudePlugin = { - root: "/tmp/plugin", - manifest: { name: "fixture", version: "1.0.0" }, - agents: [ - { - name: "Security Reviewer", - description: "Security-focused code review agent", - capabilities: ["Threat modeling", "OWASP"], - model: "claude-sonnet-4-20250514", - body: "Focus on vulnerabilities.", - sourcePath: "/tmp/plugin/agents/security-reviewer.md", - }, - ], - commands: [ - { - name: "workflows:plan", - description: "Planning command", - argumentHint: "[FOCUS]", - model: "inherit", - allowedTools: ["Read"], - body: "Plan the work.", - sourcePath: "/tmp/plugin/commands/workflows/plan.md", - }, - ], - skills: [ - { - name: "existing-skill", - description: "Existing skill", - sourceDir: "/tmp/plugin/skills/existing-skill", - skillPath: "/tmp/plugin/skills/existing-skill/SKILL.md", - }, - ], - hooks: undefined, - mcpServers: undefined, -} - -const defaultOptions = { - agentMode: "subagent" as const, - inferTemperature: false, - permissions: "none" as const, -} - -describe("convertClaudeToCursor", () => { - test("converts agents to rules with .mdc frontmatter", () => { - const bundle = convertClaudeToCursor(fixturePlugin, defaultOptions) - - expect(bundle.rules).toHaveLength(1) - const rule = bundle.rules[0] - expect(rule.name).toBe("security-reviewer") - - const parsed = parseFrontmatter(rule.content) - expect(parsed.data.description).toBe("Security-focused code review agent") - expect(parsed.data.alwaysApply).toBe(false) - // globs is omitted (Agent Requested mode doesn't need it) - expect(parsed.body).toContain("Capabilities") - expect(parsed.body).toContain("Threat modeling") - expect(parsed.body).toContain("Focus on vulnerabilities.") - }) - - test("agent with empty description gets default", () => { - const plugin: ClaudePlugin = { - ...fixturePlugin, - agents: [ - { - name: "basic-agent", - body: "Do things.", - sourcePath: "/tmp/plugin/agents/basic.md", - }, - ], - } - - const bundle = convertClaudeToCursor(plugin, defaultOptions) - const parsed = parseFrontmatter(bundle.rules[0].content) - expect(parsed.data.description).toBe("Converted from Claude agent basic-agent") - }) - - test("agent with empty body gets default body", () => { - const plugin: ClaudePlugin = { - ...fixturePlugin, - agents: [ - { - name: "empty-agent", - description: "Empty agent", - body: "", - sourcePath: "/tmp/plugin/agents/empty.md", - }, - ], - } - - const bundle = convertClaudeToCursor(plugin, defaultOptions) - const parsed = parseFrontmatter(bundle.rules[0].content) - expect(parsed.body).toContain("Instructions converted from the empty-agent agent.") - }) - - test("agent capabilities are prepended to body", () => { - const bundle = convertClaudeToCursor(fixturePlugin, defaultOptions) - const parsed = parseFrontmatter(bundle.rules[0].content) - expect(parsed.body).toMatch(/## Capabilities\n- Threat modeling\n- OWASP/) - }) - - test("agent model field is silently dropped", () => { - const bundle = convertClaudeToCursor(fixturePlugin, defaultOptions) - const parsed = parseFrontmatter(bundle.rules[0].content) - expect(parsed.data.model).toBeUndefined() - }) - - test("flattens namespaced command names", () => { - const bundle = convertClaudeToCursor(fixturePlugin, defaultOptions) - - expect(bundle.commands).toHaveLength(1) - const command = bundle.commands[0] - expect(command.name).toBe("plan") - }) - - test("commands are plain markdown without frontmatter", () => { - const bundle = convertClaudeToCursor(fixturePlugin, defaultOptions) - const command = bundle.commands[0] - - // Should NOT start with --- - expect(command.content.startsWith("---")).toBe(false) - // Should include the description as a comment - expect(command.content).toContain("<!-- Planning command -->") - expect(command.content).toContain("Plan the work.") - }) - - test("command name collision after flattening is deduplicated", () => { - const plugin: ClaudePlugin = { - ...fixturePlugin, - commands: [ - { - name: "workflows:plan", - description: "Workflow plan", - body: "Plan body.", - sourcePath: "/tmp/plugin/commands/workflows/plan.md", - }, - { - name: "plan", - description: "Top-level plan", - body: "Top plan body.", - sourcePath: "/tmp/plugin/commands/plan.md", - }, - ], - agents: [], - skills: [], - } - - const bundle = convertClaudeToCursor(plugin, defaultOptions) - const names = bundle.commands.map((c) => c.name) - expect(names).toEqual(["plan", "plan-2"]) - }) - - test("command with disable-model-invocation is still included", () => { - const plugin: ClaudePlugin = { - ...fixturePlugin, - commands: [ - { - name: "setup", - description: "Setup command", - disableModelInvocation: true, - body: "Setup body.", - sourcePath: "/tmp/plugin/commands/setup.md", - }, - ], - agents: [], - skills: [], - } - - const bundle = convertClaudeToCursor(plugin, defaultOptions) - expect(bundle.commands).toHaveLength(1) - expect(bundle.commands[0].name).toBe("setup") - }) - - test("command allowedTools is silently dropped", () => { - const bundle = convertClaudeToCursor(fixturePlugin, defaultOptions) - const command = bundle.commands[0] - expect(command.content).not.toContain("allowedTools") - expect(command.content).not.toContain("Read") - }) - - test("command with argument-hint gets Arguments section", () => { - const bundle = convertClaudeToCursor(fixturePlugin, defaultOptions) - const command = bundle.commands[0] - expect(command.content).toContain("## Arguments") - expect(command.content).toContain("[FOCUS]") - }) - - test("passes through skill directories", () => { - const bundle = convertClaudeToCursor(fixturePlugin, defaultOptions) - - expect(bundle.skillDirs).toHaveLength(1) - expect(bundle.skillDirs[0].name).toBe("existing-skill") - expect(bundle.skillDirs[0].sourceDir).toBe("/tmp/plugin/skills/existing-skill") - }) - - test("converts MCP servers to JSON config", () => { - const plugin: ClaudePlugin = { - ...fixturePlugin, - agents: [], - commands: [], - skills: [], - mcpServers: { - playwright: { - command: "npx", - args: ["-y", "@anthropic/mcp-playwright"], - env: { DISPLAY: ":0" }, - }, - }, - } - - const bundle = convertClaudeToCursor(plugin, defaultOptions) - expect(bundle.mcpServers).toBeDefined() - expect(bundle.mcpServers!.playwright.command).toBe("npx") - expect(bundle.mcpServers!.playwright.args).toEqual(["-y", "@anthropic/mcp-playwright"]) - expect(bundle.mcpServers!.playwright.env).toEqual({ DISPLAY: ":0" }) - }) - - test("MCP headers pass through for remote servers", () => { - const plugin: ClaudePlugin = { - ...fixturePlugin, - agents: [], - commands: [], - skills: [], - mcpServers: { - remote: { - url: "https://mcp.example.com/sse", - headers: { Authorization: "Bearer token" }, - }, - }, - } - - const bundle = convertClaudeToCursor(plugin, defaultOptions) - expect(bundle.mcpServers!.remote.url).toBe("https://mcp.example.com/sse") - expect(bundle.mcpServers!.remote.headers).toEqual({ Authorization: "Bearer token" }) - }) - - test("warns when hooks are present", () => { - const warnSpy = spyOn(console, "warn").mockImplementation(() => {}) - - const plugin: ClaudePlugin = { - ...fixturePlugin, - agents: [], - commands: [], - skills: [], - hooks: { - hooks: { - PreToolUse: [{ matcher: "Bash", hooks: [{ type: "command", command: "echo test" }] }], - }, - }, - } - - convertClaudeToCursor(plugin, defaultOptions) - expect(warnSpy).toHaveBeenCalledWith( - "Warning: Cursor does not support hooks. Hooks were skipped during conversion.", - ) - - warnSpy.mockRestore() - }) - - test("no warning when hooks are absent", () => { - const warnSpy = spyOn(console, "warn").mockImplementation(() => {}) - - convertClaudeToCursor(fixturePlugin, defaultOptions) - expect(warnSpy).not.toHaveBeenCalled() - - warnSpy.mockRestore() - }) - - test("plugin with zero agents produces empty rules array", () => { - const plugin: ClaudePlugin = { - ...fixturePlugin, - agents: [], - } - - const bundle = convertClaudeToCursor(plugin, defaultOptions) - expect(bundle.rules).toHaveLength(0) - }) - - test("plugin with only skills works", () => { - const plugin: ClaudePlugin = { - ...fixturePlugin, - agents: [], - commands: [], - } - - const bundle = convertClaudeToCursor(plugin, defaultOptions) - expect(bundle.rules).toHaveLength(0) - expect(bundle.commands).toHaveLength(0) - expect(bundle.skillDirs).toHaveLength(1) - }) -}) - -describe("transformContentForCursor", () => { - test("rewrites .claude/ paths to .cursor/", () => { - const input = "Read `.claude/compound-engineering.local.md` for config." - const result = transformContentForCursor(input) - expect(result).toContain(".cursor/compound-engineering.local.md") - expect(result).not.toContain(".claude/") - }) - - test("rewrites ~/.claude/ paths to ~/.cursor/", () => { - const input = "Global config at ~/.claude/settings.json" - const result = transformContentForCursor(input) - expect(result).toContain("~/.cursor/settings.json") - expect(result).not.toContain("~/.claude/") - }) - - test("transforms Task agent calls to skill references", () => { - const input = `Run agents: - -- Task repo-research-analyst(feature_description) -- Task learnings-researcher(feature_description) - -Task best-practices-researcher(topic)` - - const result = transformContentForCursor(input) - expect(result).toContain("Use the repo-research-analyst skill to: feature_description") - expect(result).toContain("Use the learnings-researcher skill to: feature_description") - expect(result).toContain("Use the best-practices-researcher skill to: topic") - expect(result).not.toContain("Task repo-research-analyst(") - }) - - test("flattens slash commands", () => { - const input = `1. Run /deepen-plan to enhance -2. Start /workflows:work to implement -3. File at /tmp/output.md` - - const result = transformContentForCursor(input) - expect(result).toContain("/deepen-plan") - expect(result).toContain("/work") - expect(result).not.toContain("/workflows:work") - // File paths preserved - expect(result).toContain("/tmp/output.md") - }) - - test("transforms @agent references to rule references", () => { - const input = "Have @security-sentinel and @dhh-rails-reviewer check the code." - const result = transformContentForCursor(input) - expect(result).toContain("the security-sentinel rule") - expect(result).toContain("the dhh-rails-reviewer rule") - expect(result).not.toContain("@security-sentinel") - }) -}) diff --git a/tests/cursor-writer.test.ts b/tests/cursor-writer.test.ts deleted file mode 100644 index 111af02..0000000 --- a/tests/cursor-writer.test.ts +++ /dev/null @@ -1,137 +0,0 @@ -import { describe, expect, test } from "bun:test" -import { promises as fs } from "fs" -import path from "path" -import os from "os" -import { writeCursorBundle } from "../src/targets/cursor" -import type { CursorBundle } from "../src/types/cursor" - -async function exists(filePath: string): Promise<boolean> { - try { - await fs.access(filePath) - return true - } catch { - return false - } -} - -describe("writeCursorBundle", () => { - test("writes rules, commands, skills, and mcp.json", async () => { - const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "cursor-test-")) - const bundle: CursorBundle = { - rules: [{ name: "security-reviewer", content: "---\ndescription: Security\nglobs: \"\"\nalwaysApply: false\n---\n\nReview code." }], - commands: [{ name: "plan", content: "<!-- Planning -->\n\nPlan the work." }], - skillDirs: [ - { - name: "skill-one", - sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"), - }, - ], - mcpServers: { - playwright: { command: "npx", args: ["-y", "@anthropic/mcp-playwright"] }, - }, - } - - await writeCursorBundle(tempRoot, bundle) - - expect(await exists(path.join(tempRoot, ".cursor", "rules", "security-reviewer.mdc"))).toBe(true) - expect(await exists(path.join(tempRoot, ".cursor", "commands", "plan.md"))).toBe(true) - expect(await exists(path.join(tempRoot, ".cursor", "skills", "skill-one", "SKILL.md"))).toBe(true) - expect(await exists(path.join(tempRoot, ".cursor", "mcp.json"))).toBe(true) - - const ruleContent = await fs.readFile( - path.join(tempRoot, ".cursor", "rules", "security-reviewer.mdc"), - "utf8", - ) - expect(ruleContent).toContain("Review code.") - - const commandContent = await fs.readFile( - path.join(tempRoot, ".cursor", "commands", "plan.md"), - "utf8", - ) - expect(commandContent).toContain("Plan the work.") - - const mcpContent = JSON.parse( - await fs.readFile(path.join(tempRoot, ".cursor", "mcp.json"), "utf8"), - ) - expect(mcpContent.mcpServers.playwright.command).toBe("npx") - }) - - test("writes directly into a .cursor output root without double-nesting", async () => { - const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "cursor-home-")) - const cursorRoot = path.join(tempRoot, ".cursor") - const bundle: CursorBundle = { - rules: [{ name: "reviewer", content: "Reviewer rule content" }], - commands: [{ name: "plan", content: "Plan content" }], - skillDirs: [], - } - - await writeCursorBundle(cursorRoot, bundle) - - expect(await exists(path.join(cursorRoot, "rules", "reviewer.mdc"))).toBe(true) - expect(await exists(path.join(cursorRoot, "commands", "plan.md"))).toBe(true) - // Should NOT double-nest under .cursor/.cursor - expect(await exists(path.join(cursorRoot, ".cursor"))).toBe(false) - }) - - test("handles empty bundles gracefully", async () => { - const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "cursor-empty-")) - const bundle: CursorBundle = { - rules: [], - commands: [], - skillDirs: [], - } - - await writeCursorBundle(tempRoot, bundle) - expect(await exists(tempRoot)).toBe(true) - }) - - test("writes multiple rules as separate .mdc files", async () => { - const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "cursor-multi-")) - const cursorRoot = path.join(tempRoot, ".cursor") - const bundle: CursorBundle = { - rules: [ - { name: "security-sentinel", content: "Security rules" }, - { name: "performance-oracle", content: "Performance rules" }, - { name: "code-simplicity-reviewer", content: "Simplicity rules" }, - ], - commands: [], - skillDirs: [], - } - - await writeCursorBundle(cursorRoot, bundle) - - expect(await exists(path.join(cursorRoot, "rules", "security-sentinel.mdc"))).toBe(true) - expect(await exists(path.join(cursorRoot, "rules", "performance-oracle.mdc"))).toBe(true) - expect(await exists(path.join(cursorRoot, "rules", "code-simplicity-reviewer.mdc"))).toBe(true) - }) - - test("backs up existing mcp.json before overwriting", async () => { - const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "cursor-backup-")) - const cursorRoot = path.join(tempRoot, ".cursor") - await fs.mkdir(cursorRoot, { recursive: true }) - - // Write an existing mcp.json - const mcpPath = path.join(cursorRoot, "mcp.json") - await fs.writeFile(mcpPath, JSON.stringify({ mcpServers: { old: { command: "old-cmd" } } })) - - const bundle: CursorBundle = { - rules: [], - commands: [], - skillDirs: [], - mcpServers: { - newServer: { command: "new-cmd" }, - }, - } - - await writeCursorBundle(cursorRoot, bundle) - - // New mcp.json should have the new content - const newContent = JSON.parse(await fs.readFile(mcpPath, "utf8")) - expect(newContent.mcpServers.newServer.command).toBe("new-cmd") - - // A backup file should exist - const files = await fs.readdir(cursorRoot) - const backupFiles = files.filter((f) => f.startsWith("mcp.json.bak.")) - expect(backupFiles.length).toBeGreaterThanOrEqual(1) - }) -}) diff --git a/tests/detect-tools.test.ts b/tests/detect-tools.test.ts new file mode 100644 index 0000000..b819909 --- /dev/null +++ b/tests/detect-tools.test.ts @@ -0,0 +1,119 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import path from "path" +import os from "os" +import { detectInstalledTools, getDetectedTargetNames } from "../src/utils/detect-tools" + +describe("detectInstalledTools", () => { + test("detects tools when config directories exist", async () => { + const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "detect-tools-")) + const tempCwd = await fs.mkdtemp(path.join(os.tmpdir(), "detect-tools-cwd-")) + + // Create directories for some tools + await fs.mkdir(path.join(tempHome, ".codex"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".codeium", "windsurf"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".gemini"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".copilot"), { recursive: true }) + + const results = await detectInstalledTools(tempHome, tempCwd) + + const codex = results.find((t) => t.name === "codex") + expect(codex?.detected).toBe(true) + expect(codex?.reason).toContain(".codex") + + const windsurf = results.find((t) => t.name === "windsurf") + expect(windsurf?.detected).toBe(true) + expect(windsurf?.reason).toContain(".codeium/windsurf") + + const gemini = results.find((t) => t.name === "gemini") + expect(gemini?.detected).toBe(true) + expect(gemini?.reason).toContain(".gemini") + + const copilot = results.find((t) => t.name === "copilot") + expect(copilot?.detected).toBe(true) + expect(copilot?.reason).toContain(".copilot") + + // Tools without directories should not be detected + const opencode = results.find((t) => t.name === "opencode") + expect(opencode?.detected).toBe(false) + + const droid = results.find((t) => t.name === "droid") + expect(droid?.detected).toBe(false) + + const pi = results.find((t) => t.name === "pi") + expect(pi?.detected).toBe(false) + }) + + test("returns all tools with detected=false when no directories exist", async () => { + const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "detect-empty-")) + const tempCwd = await fs.mkdtemp(path.join(os.tmpdir(), "detect-empty-cwd-")) + + const results = await detectInstalledTools(tempHome, tempCwd) + + expect(results.length).toBe(10) + for (const tool of results) { + expect(tool.detected).toBe(false) + expect(tool.reason).toBe("not found") + } + }) + + test("detects home-based tools", async () => { + const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "detect-home-")) + const tempCwd = await fs.mkdtemp(path.join(os.tmpdir(), "detect-home-cwd-")) + + await fs.mkdir(path.join(tempHome, ".config", "opencode"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".factory"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".pi"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".openclaw"), { recursive: true }) + + const results = await detectInstalledTools(tempHome, tempCwd) + + expect(results.find((t) => t.name === "opencode")?.detected).toBe(true) + expect(results.find((t) => t.name === "droid")?.detected).toBe(true) + expect(results.find((t) => t.name === "pi")?.detected).toBe(true) + expect(results.find((t) => t.name === "openclaw")?.detected).toBe(true) + }) + + test("detects copilot from project-specific skills without generic .github false positives", async () => { + const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "detect-copilot-home-")) + const tempCwd = await fs.mkdtemp(path.join(os.tmpdir(), "detect-copilot-cwd-")) + + await fs.mkdir(path.join(tempCwd, ".github"), { recursive: true }) + + let results = await detectInstalledTools(tempHome, tempCwd) + expect(results.find((t) => t.name === "copilot")?.detected).toBe(false) + + await fs.mkdir(path.join(tempCwd, ".github", "skills"), { recursive: true }) + + results = await detectInstalledTools(tempHome, tempCwd) + expect(results.find((t) => t.name === "copilot")?.detected).toBe(true) + expect(results.find((t) => t.name === "copilot")?.reason).toContain(".github/skills") + }) +}) + +describe("getDetectedTargetNames", () => { + test("returns only names of detected tools", async () => { + const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "detect-names-")) + const tempCwd = await fs.mkdtemp(path.join(os.tmpdir(), "detect-names-cwd-")) + + await fs.mkdir(path.join(tempHome, ".codex"), { recursive: true }) + await fs.mkdir(path.join(tempHome, ".gemini"), { recursive: true }) + + const names = await getDetectedTargetNames(tempHome, tempCwd) + + expect(names).toContain("codex") + expect(names).toContain("gemini") + expect(names).not.toContain("opencode") + expect(names).not.toContain("droid") + expect(names).not.toContain("pi") + expect(names).not.toContain("cursor") + }) + + test("returns empty array when nothing detected", async () => { + const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "detect-none-")) + const tempCwd = await fs.mkdtemp(path.join(os.tmpdir(), "detect-none-cwd-")) + + const names = await getDetectedTargetNames(tempHome, tempCwd) + expect(names).toEqual([]) + }) +}) diff --git a/tests/kiro-converter.test.ts b/tests/kiro-converter.test.ts new file mode 100644 index 0000000..e638f71 --- /dev/null +++ b/tests/kiro-converter.test.ts @@ -0,0 +1,381 @@ +import { describe, expect, test } from "bun:test" +import { convertClaudeToKiro, transformContentForKiro } from "../src/converters/claude-to-kiro" +import { parseFrontmatter } from "../src/utils/frontmatter" +import type { ClaudePlugin } from "../src/types/claude" + +const fixturePlugin: ClaudePlugin = { + root: "/tmp/plugin", + manifest: { name: "fixture", version: "1.0.0" }, + agents: [ + { + name: "Security Reviewer", + description: "Security-focused agent", + capabilities: ["Threat modeling", "OWASP"], + model: "claude-sonnet-4-20250514", + body: "Focus on vulnerabilities.", + sourcePath: "/tmp/plugin/agents/security-reviewer.md", + }, + ], + commands: [ + { + name: "workflows:plan", + description: "Planning command", + argumentHint: "[FOCUS]", + model: "inherit", + allowedTools: ["Read"], + body: "Plan the work.", + sourcePath: "/tmp/plugin/commands/workflows/plan.md", + }, + ], + skills: [ + { + name: "existing-skill", + description: "Existing skill", + sourceDir: "/tmp/plugin/skills/existing-skill", + skillPath: "/tmp/plugin/skills/existing-skill/SKILL.md", + }, + ], + hooks: undefined, + mcpServers: { + local: { command: "echo", args: ["hello"] }, + }, +} + +const defaultOptions = { + agentMode: "subagent" as const, + inferTemperature: false, + permissions: "none" as const, +} + +describe("convertClaudeToKiro", () => { + test("converts agents to Kiro agent configs with prompt files", () => { + const bundle = convertClaudeToKiro(fixturePlugin, defaultOptions) + + const agent = bundle.agents.find((a) => a.name === "security-reviewer") + expect(agent).toBeDefined() + expect(agent!.config.name).toBe("security-reviewer") + expect(agent!.config.description).toBe("Security-focused agent") + expect(agent!.config.prompt).toBe("file://./prompts/security-reviewer.md") + expect(agent!.config.tools).toEqual(["*"]) + expect(agent!.config.includeMcpJson).toBe(true) + expect(agent!.config.resources).toContain("file://.kiro/steering/**/*.md") + expect(agent!.config.resources).toContain("skill://.kiro/skills/**/SKILL.md") + expect(agent!.promptContent).toContain("Focus on vulnerabilities.") + }) + + test("agent config has welcomeMessage generated from description", () => { + const bundle = convertClaudeToKiro(fixturePlugin, defaultOptions) + const agent = bundle.agents.find((a) => a.name === "security-reviewer") + expect(agent!.config.welcomeMessage).toContain("security-reviewer") + expect(agent!.config.welcomeMessage).toContain("Security-focused agent") + }) + + test("agent with capabilities prepended to prompt content", () => { + const bundle = convertClaudeToKiro(fixturePlugin, defaultOptions) + const agent = bundle.agents.find((a) => a.name === "security-reviewer") + expect(agent!.promptContent).toContain("## Capabilities") + expect(agent!.promptContent).toContain("- Threat modeling") + expect(agent!.promptContent).toContain("- OWASP") + }) + + test("agent with empty description gets default description", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { + name: "my-agent", + body: "Do things.", + sourcePath: "/tmp/plugin/agents/my-agent.md", + }, + ], + commands: [], + skills: [], + } + + const bundle = convertClaudeToKiro(plugin, defaultOptions) + expect(bundle.agents[0].config.description).toBe("Use this agent for my-agent tasks") + }) + + test("agent model field silently dropped", () => { + const bundle = convertClaudeToKiro(fixturePlugin, defaultOptions) + const agent = bundle.agents.find((a) => a.name === "security-reviewer") + expect((agent!.config as Record<string, unknown>).model).toBeUndefined() + }) + + test("agent with empty body gets default body text", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { + name: "Empty Agent", + description: "An empty agent", + body: "", + sourcePath: "/tmp/plugin/agents/empty.md", + }, + ], + commands: [], + skills: [], + } + + const bundle = convertClaudeToKiro(plugin, defaultOptions) + expect(bundle.agents[0].promptContent).toContain("Instructions converted from the Empty Agent agent.") + }) + + test("converts commands to SKILL.md with valid frontmatter", () => { + const bundle = convertClaudeToKiro(fixturePlugin, defaultOptions) + + expect(bundle.generatedSkills).toHaveLength(1) + const skill = bundle.generatedSkills[0] + expect(skill.name).toBe("workflows-plan") + const parsed = parseFrontmatter(skill.content) + expect(parsed.data.name).toBe("workflows-plan") + expect(parsed.data.description).toBe("Planning command") + expect(parsed.body).toContain("Plan the work.") + }) + + test("command with disable-model-invocation is still included", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + commands: [ + { + name: "disabled-command", + description: "Disabled command", + disableModelInvocation: true, + body: "Disabled body.", + sourcePath: "/tmp/plugin/commands/disabled.md", + }, + ], + agents: [], + skills: [], + } + + const bundle = convertClaudeToKiro(plugin, defaultOptions) + expect(bundle.generatedSkills).toHaveLength(1) + expect(bundle.generatedSkills[0].name).toBe("disabled-command") + }) + + test("command allowedTools silently dropped", () => { + const bundle = convertClaudeToKiro(fixturePlugin, defaultOptions) + const skill = bundle.generatedSkills[0] + expect(skill.content).not.toContain("allowedTools") + }) + + test("skills pass through as directory references", () => { + const bundle = convertClaudeToKiro(fixturePlugin, defaultOptions) + + expect(bundle.skillDirs).toHaveLength(1) + expect(bundle.skillDirs[0].name).toBe("existing-skill") + expect(bundle.skillDirs[0].sourceDir).toBe("/tmp/plugin/skills/existing-skill") + }) + + test("MCP stdio servers convert to mcp.json-compatible config", () => { + const bundle = convertClaudeToKiro(fixturePlugin, defaultOptions) + expect(bundle.mcpServers.local.command).toBe("echo") + expect(bundle.mcpServers.local.args).toEqual(["hello"]) + }) + + test("MCP HTTP servers skipped with warning", () => { + const warnings: string[] = [] + const originalWarn = console.warn + console.warn = (msg: string) => warnings.push(msg) + + const plugin: ClaudePlugin = { + ...fixturePlugin, + mcpServers: { + httpServer: { url: "https://example.com/mcp" }, + }, + agents: [], + commands: [], + skills: [], + } + + const bundle = convertClaudeToKiro(plugin, defaultOptions) + console.warn = originalWarn + + expect(Object.keys(bundle.mcpServers)).toHaveLength(0) + expect(warnings.some((w) => w.includes("no command") || w.includes("HTTP"))).toBe(true) + }) + + test("plugin with zero agents produces empty agents array", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [], + commands: [], + skills: [], + } + + const bundle = convertClaudeToKiro(plugin, defaultOptions) + expect(bundle.agents).toHaveLength(0) + expect(bundle.generatedSkills).toHaveLength(0) + expect(bundle.skillDirs).toHaveLength(0) + }) + + test("plugin with only skills works correctly", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [], + commands: [], + } + + const bundle = convertClaudeToKiro(plugin, defaultOptions) + expect(bundle.agents).toHaveLength(0) + expect(bundle.generatedSkills).toHaveLength(0) + expect(bundle.skillDirs).toHaveLength(1) + }) + + test("skill name colliding with command name: command gets deduplicated", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + skills: [{ name: "my-command", description: "Existing skill", sourceDir: "/tmp/skill", skillPath: "/tmp/skill/SKILL.md" }], + commands: [{ name: "my-command", description: "A command", body: "Body.", sourcePath: "/tmp/commands/cmd.md" }], + agents: [], + } + + const bundle = convertClaudeToKiro(plugin, defaultOptions) + + // Skill keeps original name, command gets deduplicated + expect(bundle.skillDirs[0].name).toBe("my-command") + expect(bundle.generatedSkills[0].name).toBe("my-command-2") + }) + + test("hooks present emits console.warn", () => { + const warnings: string[] = [] + const originalWarn = console.warn + console.warn = (msg: string) => warnings.push(msg) + + const plugin: ClaudePlugin = { + ...fixturePlugin, + hooks: { hooks: { PreToolUse: [{ matcher: "*", hooks: [{ type: "command", command: "echo test" }] }] } }, + agents: [], + commands: [], + skills: [], + } + + convertClaudeToKiro(plugin, defaultOptions) + console.warn = originalWarn + + expect(warnings.some((w) => w.includes("Kiro"))).toBe(true) + }) + + test("steering file not generated when CLAUDE.md missing", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + root: "/tmp/nonexistent-plugin-dir", + agents: [], + commands: [], + skills: [], + } + + const bundle = convertClaudeToKiro(plugin, defaultOptions) + expect(bundle.steeringFiles).toHaveLength(0) + }) + + test("name normalization handles various inputs", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { name: "My Cool Agent!!!", description: "Cool", body: "Body.", sourcePath: "/tmp/a.md" }, + { name: "UPPERCASE-AGENT", description: "Upper", body: "Body.", sourcePath: "/tmp/b.md" }, + { name: "agent--with--double-hyphens", description: "Hyphens", body: "Body.", sourcePath: "/tmp/c.md" }, + ], + commands: [], + skills: [], + } + + const bundle = convertClaudeToKiro(plugin, defaultOptions) + expect(bundle.agents[0].name).toBe("my-cool-agent") + expect(bundle.agents[1].name).toBe("uppercase-agent") + expect(bundle.agents[2].name).toBe("agent-with-double-hyphens") // collapsed + }) + + test("description truncation to 1024 chars", () => { + const longDesc = "a".repeat(2000) + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { name: "long-desc", description: longDesc, body: "Body.", sourcePath: "/tmp/a.md" }, + ], + commands: [], + skills: [], + } + + const bundle = convertClaudeToKiro(plugin, defaultOptions) + expect(bundle.agents[0].config.description.length).toBeLessThanOrEqual(1024) + expect(bundle.agents[0].config.description.endsWith("...")).toBe(true) + }) + + test("empty plugin produces empty bundle", () => { + const plugin: ClaudePlugin = { + root: "/tmp/empty", + manifest: { name: "empty", version: "1.0.0" }, + agents: [], + commands: [], + skills: [], + } + + const bundle = convertClaudeToKiro(plugin, defaultOptions) + expect(bundle.agents).toHaveLength(0) + expect(bundle.generatedSkills).toHaveLength(0) + expect(bundle.skillDirs).toHaveLength(0) + expect(bundle.steeringFiles).toHaveLength(0) + expect(Object.keys(bundle.mcpServers)).toHaveLength(0) + }) +}) + +describe("transformContentForKiro", () => { + test("transforms .claude/ paths to .kiro/", () => { + const result = transformContentForKiro("Read .claude/settings.json for config.") + expect(result).toContain(".kiro/settings.json") + expect(result).not.toContain(".claude/") + }) + + test("transforms ~/.claude/ paths to ~/.kiro/", () => { + const result = transformContentForKiro("Check ~/.claude/config for settings.") + expect(result).toContain("~/.kiro/config") + expect(result).not.toContain("~/.claude/") + }) + + test("transforms Task agent(args) to use_subagent reference", () => { + const input = `Run these: + +- Task repo-research-analyst(feature_description) +- Task learnings-researcher(feature_description) + +Task best-practices-researcher(topic)` + + const result = transformContentForKiro(input) + expect(result).toContain("Use the use_subagent tool to delegate to the repo-research-analyst agent: feature_description") + expect(result).toContain("Use the use_subagent tool to delegate to the learnings-researcher agent: feature_description") + expect(result).toContain("Use the use_subagent tool to delegate to the best-practices-researcher agent: topic") + expect(result).not.toContain("Task repo-research-analyst") + }) + + test("transforms @agent references for known agents only", () => { + const result = transformContentForKiro("Ask @security-sentinel for a review.", ["security-sentinel"]) + expect(result).toContain("the security-sentinel agent") + expect(result).not.toContain("@security-sentinel") + }) + + test("does not transform @unknown-name when not in known agents", () => { + const result = transformContentForKiro("Contact @someone-else for help.", ["security-sentinel"]) + expect(result).toContain("@someone-else") + }) + + test("transforms Claude tool names to Kiro equivalents", () => { + const result = transformContentForKiro("Use the Bash tool to run commands. Use Read to check files.") + expect(result).toContain("shell tool") + expect(result).toContain("read to") + }) + + test("transforms slash command refs to skill activation", () => { + const result = transformContentForKiro("Run /workflows:plan to start planning.") + expect(result).toContain("the workflows-plan skill") + }) + + test("does not transform partial .claude paths like package/.claude-config/", () => { + const result = transformContentForKiro("Check some-package/.claude-config/settings") + // The .claude-config/ part should be transformed since it starts with .claude/ + // but only when preceded by a word boundary + expect(result).toContain("some-package/") + }) +}) diff --git a/tests/kiro-writer.test.ts b/tests/kiro-writer.test.ts new file mode 100644 index 0000000..301dcb6 --- /dev/null +++ b/tests/kiro-writer.test.ts @@ -0,0 +1,273 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import path from "path" +import os from "os" +import { writeKiroBundle } from "../src/targets/kiro" +import type { KiroBundle } from "../src/types/kiro" + +async function exists(filePath: string): Promise<boolean> { + try { + await fs.access(filePath) + return true + } catch { + return false + } +} + +const emptyBundle: KiroBundle = { + agents: [], + generatedSkills: [], + skillDirs: [], + steeringFiles: [], + mcpServers: {}, +} + +describe("writeKiroBundle", () => { + test("writes agents, skills, steering, and mcp.json", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "kiro-test-")) + const bundle: KiroBundle = { + agents: [ + { + name: "security-reviewer", + config: { + name: "security-reviewer", + description: "Security-focused agent", + prompt: "file://./prompts/security-reviewer.md", + tools: ["*"], + resources: ["file://.kiro/steering/**/*.md", "skill://.kiro/skills/**/SKILL.md"], + includeMcpJson: true, + welcomeMessage: "Switching to security-reviewer.", + }, + promptContent: "Review code for vulnerabilities.", + }, + ], + generatedSkills: [ + { + name: "workflows-plan", + content: "---\nname: workflows-plan\ndescription: Planning\n---\n\nPlan the work.", + }, + ], + skillDirs: [ + { + name: "skill-one", + sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"), + }, + ], + steeringFiles: [ + { name: "compound-engineering", content: "# Steering content\n\nFollow these guidelines." }, + ], + mcpServers: { + playwright: { command: "npx", args: ["-y", "@anthropic/mcp-playwright"] }, + }, + } + + await writeKiroBundle(tempRoot, bundle) + + // Agent JSON config + const agentConfigPath = path.join(tempRoot, ".kiro", "agents", "security-reviewer.json") + expect(await exists(agentConfigPath)).toBe(true) + const agentConfig = JSON.parse(await fs.readFile(agentConfigPath, "utf8")) + expect(agentConfig.name).toBe("security-reviewer") + expect(agentConfig.includeMcpJson).toBe(true) + expect(agentConfig.tools).toEqual(["*"]) + + // Agent prompt file + const promptPath = path.join(tempRoot, ".kiro", "agents", "prompts", "security-reviewer.md") + expect(await exists(promptPath)).toBe(true) + const promptContent = await fs.readFile(promptPath, "utf8") + expect(promptContent).toContain("Review code for vulnerabilities.") + + // Generated skill + const skillPath = path.join(tempRoot, ".kiro", "skills", "workflows-plan", "SKILL.md") + expect(await exists(skillPath)).toBe(true) + const skillContent = await fs.readFile(skillPath, "utf8") + expect(skillContent).toContain("Plan the work.") + + // Copied skill + expect(await exists(path.join(tempRoot, ".kiro", "skills", "skill-one", "SKILL.md"))).toBe(true) + + // Steering file + const steeringPath = path.join(tempRoot, ".kiro", "steering", "compound-engineering.md") + expect(await exists(steeringPath)).toBe(true) + const steeringContent = await fs.readFile(steeringPath, "utf8") + expect(steeringContent).toContain("Follow these guidelines.") + + // MCP config + const mcpPath = path.join(tempRoot, ".kiro", "settings", "mcp.json") + expect(await exists(mcpPath)).toBe(true) + const mcpContent = JSON.parse(await fs.readFile(mcpPath, "utf8")) + expect(mcpContent.mcpServers.playwright.command).toBe("npx") + }) + + test("does not double-nest when output root is .kiro", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "kiro-home-")) + const kiroRoot = path.join(tempRoot, ".kiro") + const bundle: KiroBundle = { + ...emptyBundle, + agents: [ + { + name: "reviewer", + config: { + name: "reviewer", + description: "A reviewer", + prompt: "file://./prompts/reviewer.md", + tools: ["*"], + resources: [], + includeMcpJson: true, + }, + promptContent: "Review content.", + }, + ], + } + + await writeKiroBundle(kiroRoot, bundle) + + expect(await exists(path.join(kiroRoot, "agents", "reviewer.json"))).toBe(true) + // Should NOT double-nest under .kiro/.kiro + expect(await exists(path.join(kiroRoot, ".kiro"))).toBe(false) + }) + + test("handles empty bundles gracefully", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "kiro-empty-")) + + await writeKiroBundle(tempRoot, emptyBundle) + expect(await exists(tempRoot)).toBe(true) + }) + + test("backs up existing mcp.json before overwrite", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "kiro-backup-")) + const kiroRoot = path.join(tempRoot, ".kiro") + const settingsDir = path.join(kiroRoot, "settings") + await fs.mkdir(settingsDir, { recursive: true }) + + // Write existing mcp.json + const mcpPath = path.join(settingsDir, "mcp.json") + await fs.writeFile(mcpPath, JSON.stringify({ mcpServers: { old: { command: "old-cmd" } } })) + + const bundle: KiroBundle = { + ...emptyBundle, + mcpServers: { newServer: { command: "new-cmd" } }, + } + + await writeKiroBundle(kiroRoot, bundle) + + // New mcp.json should have the new content + const newContent = JSON.parse(await fs.readFile(mcpPath, "utf8")) + expect(newContent.mcpServers.newServer.command).toBe("new-cmd") + + // A backup file should exist + const files = await fs.readdir(settingsDir) + const backupFiles = files.filter((f) => f.startsWith("mcp.json.bak.")) + expect(backupFiles.length).toBeGreaterThanOrEqual(1) + }) + + test("merges mcpServers into existing mcp.json without clobbering other keys", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "kiro-merge-")) + const kiroRoot = path.join(tempRoot, ".kiro") + const settingsDir = path.join(kiroRoot, "settings") + await fs.mkdir(settingsDir, { recursive: true }) + + // Write existing mcp.json with other keys + const mcpPath = path.join(settingsDir, "mcp.json") + await fs.writeFile(mcpPath, JSON.stringify({ + customKey: "preserve-me", + mcpServers: { old: { command: "old-cmd" } }, + })) + + const bundle: KiroBundle = { + ...emptyBundle, + mcpServers: { newServer: { command: "new-cmd" } }, + } + + await writeKiroBundle(kiroRoot, bundle) + + const content = JSON.parse(await fs.readFile(mcpPath, "utf8")) + expect(content.customKey).toBe("preserve-me") + expect(content.mcpServers.old.command).toBe("old-cmd") + expect(content.mcpServers.newServer.command).toBe("new-cmd") + }) + + test("mcp.json fresh write when no existing file", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "kiro-fresh-")) + const bundle: KiroBundle = { + ...emptyBundle, + mcpServers: { myServer: { command: "my-cmd", args: ["--flag"] } }, + } + + await writeKiroBundle(tempRoot, bundle) + + const mcpPath = path.join(tempRoot, ".kiro", "settings", "mcp.json") + expect(await exists(mcpPath)).toBe(true) + const content = JSON.parse(await fs.readFile(mcpPath, "utf8")) + expect(content.mcpServers.myServer.command).toBe("my-cmd") + expect(content.mcpServers.myServer.args).toEqual(["--flag"]) + }) + + test("agent JSON files are valid JSON with expected fields", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "kiro-json-")) + const bundle: KiroBundle = { + ...emptyBundle, + agents: [ + { + name: "test-agent", + config: { + name: "test-agent", + description: "Test agent", + prompt: "file://./prompts/test-agent.md", + tools: ["*"], + resources: ["file://.kiro/steering/**/*.md"], + includeMcpJson: true, + welcomeMessage: "Hello from test-agent.", + }, + promptContent: "Do test things.", + }, + ], + } + + await writeKiroBundle(tempRoot, bundle) + + const configPath = path.join(tempRoot, ".kiro", "agents", "test-agent.json") + const raw = await fs.readFile(configPath, "utf8") + const parsed = JSON.parse(raw) // Should not throw + expect(parsed.name).toBe("test-agent") + expect(parsed.prompt).toBe("file://./prompts/test-agent.md") + expect(parsed.tools).toEqual(["*"]) + expect(parsed.includeMcpJson).toBe(true) + expect(parsed.welcomeMessage).toBe("Hello from test-agent.") + }) + + test("path traversal attempt in skill name is rejected", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "kiro-traversal-")) + const bundle: KiroBundle = { + ...emptyBundle, + generatedSkills: [ + { name: "../escape", content: "Malicious content" }, + ], + } + + expect(writeKiroBundle(tempRoot, bundle)).rejects.toThrow("unsafe path") + }) + + test("path traversal in agent name is rejected", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "kiro-traversal2-")) + const bundle: KiroBundle = { + ...emptyBundle, + agents: [ + { + name: "../escape", + config: { + name: "../escape", + description: "Malicious", + prompt: "file://./prompts/../escape.md", + tools: ["*"], + resources: [], + includeMcpJson: true, + }, + promptContent: "Bad.", + }, + ], + } + + expect(writeKiroBundle(tempRoot, bundle)).rejects.toThrow("unsafe path") + }) +}) diff --git a/tests/openclaw-converter.test.ts b/tests/openclaw-converter.test.ts new file mode 100644 index 0000000..e1648d5 --- /dev/null +++ b/tests/openclaw-converter.test.ts @@ -0,0 +1,204 @@ +import { describe, expect, test } from "bun:test" +import { convertClaudeToOpenClaw } from "../src/converters/claude-to-openclaw" +import { parseFrontmatter } from "../src/utils/frontmatter" +import type { ClaudePlugin } from "../src/types/claude" + +const fixturePlugin: ClaudePlugin = { + root: "/tmp/plugin", + manifest: { name: "compound-engineering", version: "1.0.0", description: "A plugin" }, + agents: [ + { + name: "security-reviewer", + description: "Security-focused agent", + capabilities: ["Threat modeling", "OWASP"], + model: "claude-sonnet-4-20250514", + body: "Focus on vulnerabilities in ~/.claude/settings.", + sourcePath: "/tmp/plugin/agents/security-reviewer.md", + }, + ], + commands: [ + { + name: "workflows:plan", + description: "Planning command", + argumentHint: "[FOCUS]", + model: "inherit", + allowedTools: ["Read"], + body: "Plan the work. See ~/.claude/settings for config.", + sourcePath: "/tmp/plugin/commands/workflows/plan.md", + }, + { + name: "disabled-cmd", + description: "Disabled command", + model: "inherit", + allowedTools: [], + body: "Should be excluded.", + disableModelInvocation: true, + sourcePath: "/tmp/plugin/commands/disabled-cmd.md", + }, + ], + skills: [ + { + name: "existing-skill", + description: "Existing skill", + sourceDir: "/tmp/plugin/skills/existing-skill", + skillPath: "/tmp/plugin/skills/existing-skill/SKILL.md", + }, + ], + hooks: undefined, + mcpServers: { + local: { command: "npx", args: ["-y", "some-mcp-server"] }, + remote: { url: "https://mcp.example.com/api", headers: { Authorization: "Bearer token" } }, + }, +} + +const defaultOptions = { + agentMode: "subagent" as const, + inferTemperature: false, + permissions: "none" as const, +} + +describe("convertClaudeToOpenClaw", () => { + test("converts agents to skill files with SKILL.md content", () => { + const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions) + + const skill = bundle.skills.find((s) => s.name === "security-reviewer") + expect(skill).toBeDefined() + expect(skill!.dir).toBe("agent-security-reviewer") + const parsed = parseFrontmatter(skill!.content) + expect(parsed.data.name).toBe("security-reviewer") + expect(parsed.data.description).toBe("Security-focused agent") + expect(parsed.data.model).toBe("claude-sonnet-4-20250514") + expect(parsed.body).toContain("Focus on vulnerabilities") + }) + + test("converts commands to skill files (excluding disableModelInvocation)", () => { + const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions) + + const cmdSkill = bundle.skills.find((s) => s.name === "workflows:plan") + expect(cmdSkill).toBeDefined() + expect(cmdSkill!.dir).toBe("cmd-workflows:plan") + + const disabledSkill = bundle.skills.find((s) => s.name === "disabled-cmd") + expect(disabledSkill).toBeUndefined() + }) + + test("commands list excludes disableModelInvocation commands", () => { + const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions) + + const cmd = bundle.commands.find((c) => c.name === "workflows-plan") + expect(cmd).toBeDefined() + expect(cmd!.description).toBe("Planning command") + expect(cmd!.acceptsArgs).toBe(true) + + const disabled = bundle.commands.find((c) => c.name === "disabled-cmd") + expect(disabled).toBeUndefined() + }) + + test("command colons are replaced with dashes in command registrations", () => { + const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions) + + const cmd = bundle.commands.find((c) => c.name === "workflows-plan") + expect(cmd).toBeDefined() + expect(cmd!.name).not.toContain(":") + }) + + test("manifest includes plugin id, display name, and skills list", () => { + const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions) + + expect(bundle.manifest.id).toBe("compound-engineering") + expect(bundle.manifest.name).toBe("Compound Engineering") + expect(bundle.manifest.kind).toBe("tool") + expect(bundle.manifest.configSchema).toEqual({ + type: "object", + properties: {}, + }) + expect(bundle.manifest.skills).toContain("skills/agent-security-reviewer") + expect(bundle.manifest.skills).toContain("skills/cmd-workflows:plan") + expect(bundle.manifest.skills).toContain("skills/existing-skill") + }) + + test("package.json uses plugin name and version", () => { + const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions) + + expect(bundle.packageJson.name).toBe("openclaw-compound-engineering") + expect(bundle.packageJson.version).toBe("1.0.0") + expect(bundle.packageJson.type).toBe("module") + }) + + test("skillDirCopies includes original skill directories", () => { + const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions) + + const copy = bundle.skillDirCopies.find((s) => s.name === "existing-skill") + expect(copy).toBeDefined() + expect(copy!.sourceDir).toBe("/tmp/plugin/skills/existing-skill") + }) + + test("stdio MCP servers included in openclaw config", () => { + const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions) + + expect(bundle.openclawConfig).toBeDefined() + const mcp = (bundle.openclawConfig!.mcpServers as Record<string, unknown>) + expect(mcp.local).toBeDefined() + expect((mcp.local as any).type).toBe("stdio") + expect((mcp.local as any).command).toBe("npx") + }) + + test("HTTP MCP servers included as http type in openclaw config", () => { + const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions) + + const mcp = (bundle.openclawConfig!.mcpServers as Record<string, unknown>) + expect(mcp.remote).toBeDefined() + expect((mcp.remote as any).type).toBe("http") + expect((mcp.remote as any).url).toBe("https://mcp.example.com/api") + }) + + test("paths are rewritten from .claude/ to .openclaw/ in skill content", () => { + const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions) + + const agentSkill = bundle.skills.find((s) => s.name === "security-reviewer") + expect(agentSkill!.content).toContain("~/.openclaw/settings") + expect(agentSkill!.content).not.toContain("~/.claude/settings") + + const cmdSkill = bundle.skills.find((s) => s.name === "workflows:plan") + expect(cmdSkill!.content).toContain("~/.openclaw/settings") + expect(cmdSkill!.content).not.toContain("~/.claude/settings") + }) + + test("generateEntryPoint uses JSON.stringify for safe string escaping", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + commands: [ + { + name: "tricky-cmd", + description: 'Has "quotes" and \\backslashes\\ and\nnewlines', + model: "inherit", + allowedTools: [], + body: "body", + sourcePath: "/tmp/cmd.md", + }, + ], + } + const bundle = convertClaudeToOpenClaw(plugin, defaultOptions) + + // Entry point must be valid JS/TS — JSON.stringify handles all special chars + expect(bundle.entryPoint).toContain('"tricky-cmd"') + expect(bundle.entryPoint).toContain('\\"quotes\\"') + expect(bundle.entryPoint).toContain("\\\\backslashes\\\\") + expect(bundle.entryPoint).toContain("\\n") + // No raw unescaped newline inside a string literal + const lines = bundle.entryPoint.split("\n") + const nameLine = lines.find((l) => l.includes("tricky-cmd") && l.includes("name:")) + expect(nameLine).toBeDefined() + }) + + test("generateEntryPoint emits typed skills record", () => { + const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions) + expect(bundle.entryPoint).toContain("const skills: Record<string, string> = {}") + }) + + test("plugin without MCP servers has no openclawConfig", () => { + const plugin: ClaudePlugin = { ...fixturePlugin, mcpServers: undefined } + const bundle = convertClaudeToOpenClaw(plugin, defaultOptions) + expect(bundle.openclawConfig).toBeUndefined() + }) +}) diff --git a/tests/openclaw-writer.test.ts b/tests/openclaw-writer.test.ts new file mode 100644 index 0000000..ab618d9 --- /dev/null +++ b/tests/openclaw-writer.test.ts @@ -0,0 +1,43 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import os from "os" +import path from "path" +import { writeOpenClawBundle } from "../src/targets/openclaw" +import type { OpenClawBundle } from "../src/types/openclaw" + +describe("writeOpenClawBundle", () => { + test("writes openclaw.plugin.json with a configSchema", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-writer-")) + const bundle: OpenClawBundle = { + manifest: { + id: "compound-engineering", + name: "Compound Engineering", + kind: "tool", + configSchema: { + type: "object", + properties: {}, + }, + skills: [], + }, + packageJson: { + name: "openclaw-compound-engineering", + version: "1.0.0", + }, + entryPoint: "export default async function register() {}", + skills: [], + skillDirCopies: [], + commands: [], + } + + await writeOpenClawBundle(tempRoot, bundle) + + const manifest = JSON.parse( + await fs.readFile(path.join(tempRoot, "openclaw.plugin.json"), "utf8"), + ) + + expect(manifest.configSchema).toEqual({ + type: "object", + properties: {}, + }) + }) +}) diff --git a/tests/opencode-writer.test.ts b/tests/opencode-writer.test.ts index 0bafcc0..33b5b4c 100644 --- a/tests/opencode-writer.test.ts +++ b/tests/opencode-writer.test.ts @@ -3,6 +3,7 @@ import { promises as fs } from "fs" import path from "path" import os from "os" import { writeOpenCodeBundle } from "../src/targets/opencode" +import { mergeJsonConfigAtKey } from "../src/sync/json-config" import type { OpenCodeBundle } from "../src/types/opencode" async function exists(filePath: string): Promise<boolean> { @@ -21,6 +22,7 @@ describe("writeOpenCodeBundle", () => { config: { $schema: "https://opencode.ai/config.json" }, agents: [{ name: "agent-one", content: "Agent content" }], plugins: [{ name: "hook.ts", content: "export {}" }], + commandFiles: [], skillDirs: [ { name: "skill-one", @@ -44,6 +46,7 @@ describe("writeOpenCodeBundle", () => { config: { $schema: "https://opencode.ai/config.json" }, agents: [{ name: "agent-one", content: "Agent content" }], plugins: [], + commandFiles: [], skillDirs: [ { name: "skill-one", @@ -68,6 +71,7 @@ describe("writeOpenCodeBundle", () => { config: { $schema: "https://opencode.ai/config.json" }, agents: [{ name: "agent-one", content: "Agent content" }], plugins: [], + commandFiles: [], skillDirs: [ { name: "skill-one", @@ -85,28 +89,35 @@ describe("writeOpenCodeBundle", () => { expect(await exists(path.join(outputRoot, ".opencode"))).toBe(false) }) - test("backs up existing opencode.json before overwriting", async () => { + test("merges plugin config into existing opencode.json without destroying user keys", async () => { const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-backup-")) const outputRoot = path.join(tempRoot, ".opencode") const configPath = path.join(outputRoot, "opencode.json") - // Create existing config + // Create existing config with user keys await fs.mkdir(outputRoot, { recursive: true }) const originalConfig = { $schema: "https://opencode.ai/config.json", custom: "value" } await fs.writeFile(configPath, JSON.stringify(originalConfig, null, 2)) + // Bundle adds mcp server but keeps user's custom key const bundle: OpenCodeBundle = { - config: { $schema: "https://opencode.ai/config.json", new: "config" }, + config: { + $schema: "https://opencode.ai/config.json", + mcp: { "plugin-server": { type: "local", command: "uvx", args: ["plugin-srv"] } } + }, agents: [], plugins: [], + commandFiles: [], skillDirs: [], } await writeOpenCodeBundle(outputRoot, bundle) - // New config should be written + // Merged config should have both user key and plugin key const newConfig = JSON.parse(await fs.readFile(configPath, "utf8")) - expect(newConfig.new).toBe("config") + expect(newConfig.custom).toBe("value") // user key preserved + expect(newConfig.mcp).toBeDefined() + expect(newConfig.mcp["plugin-server"]).toBeDefined() // Backup should exist with original content const files = await fs.readdir(outputRoot) @@ -116,4 +127,166 @@ describe("writeOpenCodeBundle", () => { const backupContent = JSON.parse(await fs.readFile(path.join(outputRoot, backupFileName!), "utf8")) expect(backupContent.custom).toBe("value") }) + + test("merges mcp servers without overwriting user entry", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-merge-mcp-")) + const outputRoot = path.join(tempRoot, ".opencode") + const configPath = path.join(outputRoot, "opencode.json") + + // Create existing config with user's mcp server + await fs.mkdir(outputRoot, { recursive: true }) + const existingConfig = { + mcp: { "user-server": { type: "local", command: "uvx", args: ["user-srv"] } } + } + await fs.writeFile(configPath, JSON.stringify(existingConfig, null, 2)) + + // Bundle adds plugin server AND has conflicting user-server with different args + const bundle: OpenCodeBundle = { + config: { + $schema: "https://opencode.ai/config.json", + mcp: { + "plugin-server": { type: "local", command: "uvx", args: ["plugin-srv"] }, + "user-server": { type: "local", command: "uvx", args: ["plugin-override"] } // conflict + } + }, + agents: [], + plugins: [], + commandFiles: [], + skillDirs: [], + } + + await writeOpenCodeBundle(outputRoot, bundle) + + // Merged config should have both servers, with user-server keeping user's original args + const mergedConfig = JSON.parse(await fs.readFile(configPath, "utf8")) + expect(mergedConfig.mcp).toBeDefined() + expect(mergedConfig.mcp["plugin-server"]).toBeDefined() + expect(mergedConfig.mcp["user-server"]).toBeDefined() + expect(mergedConfig.mcp["user-server"].args[0]).toBe("user-srv") // user wins on conflict + expect(mergedConfig.mcp["plugin-server"].args[0]).toBe("plugin-srv") // plugin entry present + }) + + test("preserves unrelated user keys when merging opencode.json", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-preserve-")) + const outputRoot = path.join(tempRoot, ".opencode") + const configPath = path.join(outputRoot, "opencode.json") + + // Create existing config with multiple user keys + await fs.mkdir(outputRoot, { recursive: true }) + const existingConfig = { + model: "my-model", + theme: "dark", + mcp: {} + } + await fs.writeFile(configPath, JSON.stringify(existingConfig, null, 2)) + + // Bundle adds plugin-specific keys + const bundle: OpenCodeBundle = { + config: { + $schema: "https://opencode.ai/config.json", + mcp: { "plugin-server": { type: "local", command: "uvx", args: ["plugin-srv"] } }, + permission: { "bash": "allow" } + }, + agents: [], + plugins: [], + commandFiles: [], + skillDirs: [], + } + + await writeOpenCodeBundle(outputRoot, bundle) + + // All user keys preserved + const mergedConfig = JSON.parse(await fs.readFile(configPath, "utf8")) + expect(mergedConfig.model).toBe("my-model") + expect(mergedConfig.theme).toBe("dark") + expect(mergedConfig.mcp["plugin-server"]).toBeDefined() + expect(mergedConfig.permission["bash"]).toBe("allow") + }) + + test("writes command files as .md in commands/ directory", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-cmd-")) + const outputRoot = path.join(tempRoot, ".config", "opencode") + const bundle: OpenCodeBundle = { + config: { $schema: "https://opencode.ai/config.json" }, + agents: [], + plugins: [], + commandFiles: [{ name: "my-cmd", content: "---\ndescription: Test\n---\n\nDo something." }], + skillDirs: [], + } + + await writeOpenCodeBundle(outputRoot, bundle) + + const cmdPath = path.join(outputRoot, "commands", "my-cmd.md") + expect(await exists(cmdPath)).toBe(true) + + const content = await fs.readFile(cmdPath, "utf8") + expect(content).toBe("---\ndescription: Test\n---\n\nDo something.\n") + }) + + test("backs up existing command .md file before overwriting", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-cmd-backup-")) + const outputRoot = path.join(tempRoot, ".opencode") + const commandsDir = path.join(outputRoot, "commands") + await fs.mkdir(commandsDir, { recursive: true }) + + const cmdPath = path.join(commandsDir, "my-cmd.md") + await fs.writeFile(cmdPath, "old content\n") + + const bundle: OpenCodeBundle = { + config: { $schema: "https://opencode.ai/config.json" }, + agents: [], + plugins: [], + commandFiles: [{ name: "my-cmd", content: "---\ndescription: New\n---\n\nNew content." }], + skillDirs: [], + } + + await writeOpenCodeBundle(outputRoot, bundle) + + // New content should be written + const content = await fs.readFile(cmdPath, "utf8") + expect(content).toBe("---\ndescription: New\n---\n\nNew content.\n") + + // Backup should exist + const files = await fs.readdir(commandsDir) + const backupFileName = files.find((f) => f.startsWith("my-cmd.md.bak.")) + expect(backupFileName).toBeDefined() + + const backupContent = await fs.readFile(path.join(commandsDir, backupFileName!), "utf8") + expect(backupContent).toBe("old content\n") + }) +}) + +describe("mergeJsonConfigAtKey", () => { + test("incoming plugin entries overwrite same-named servers", async () => { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "json-merge-")) + const configPath = path.join(tempDir, "opencode.json") + + // User has an existing MCP server config + const existingConfig = { + model: "my-model", + mcp: { + "user-server": { type: "local", command: ["uvx", "user-srv"] }, + }, + } + await fs.writeFile(configPath, JSON.stringify(existingConfig, null, 2)) + + // Plugin syncs its servers, overwriting same-named entries + await mergeJsonConfigAtKey({ + configPath, + key: "mcp", + incoming: { + "plugin-server": { type: "local", command: ["uvx", "plugin-srv"] }, + "user-server": { type: "local", command: ["uvx", "plugin-override"] }, + }, + }) + + const merged = JSON.parse(await fs.readFile(configPath, "utf8")) + + // User's top-level keys preserved + expect(merged.model).toBe("my-model") + // Plugin server added + expect(merged.mcp["plugin-server"]).toBeDefined() + // Plugin server overwrites same-named existing entry + expect(merged.mcp["user-server"].command[1]).toBe("plugin-override") + }) }) diff --git a/tests/qwen-converter.test.ts b/tests/qwen-converter.test.ts new file mode 100644 index 0000000..b9690a3 --- /dev/null +++ b/tests/qwen-converter.test.ts @@ -0,0 +1,238 @@ +import { describe, expect, test } from "bun:test" +import { convertClaudeToQwen } from "../src/converters/claude-to-qwen" +import { parseFrontmatter } from "../src/utils/frontmatter" +import type { ClaudePlugin } from "../src/types/claude" + +const fixturePlugin: ClaudePlugin = { + root: "/tmp/plugin", + manifest: { name: "compound-engineering", version: "1.2.0", description: "A plugin for engineers" }, + agents: [ + { + name: "security-sentinel", + description: "Security-focused agent", + capabilities: ["Threat modeling", "OWASP"], + model: "claude-sonnet-4-20250514", + body: "Focus on vulnerabilities in ~/.claude/settings.", + sourcePath: "/tmp/plugin/agents/security-sentinel.md", + }, + { + name: "brainstorm-agent", + description: "Creative brainstormer", + model: "inherit", + body: "Generate ideas.", + sourcePath: "/tmp/plugin/agents/brainstorm-agent.md", + }, + ], + commands: [ + { + name: "workflows:plan", + description: "Planning command", + argumentHint: "[FOCUS]", + model: "inherit", + allowedTools: ["Read"], + body: "Plan the work. Config at ~/.claude/settings.", + sourcePath: "/tmp/plugin/commands/workflows/plan.md", + }, + { + name: "disabled-cmd", + description: "Disabled", + model: "inherit", + allowedTools: [], + body: "Should be excluded.", + disableModelInvocation: true, + sourcePath: "/tmp/plugin/commands/disabled-cmd.md", + }, + ], + skills: [ + { + name: "existing-skill", + description: "Existing skill", + sourceDir: "/tmp/plugin/skills/existing-skill", + skillPath: "/tmp/plugin/skills/existing-skill/SKILL.md", + }, + ], + hooks: undefined, + mcpServers: { + local: { command: "npx", args: ["-y", "some-mcp"], env: { API_KEY: "${YOUR_API_KEY}" } }, + remote: { url: "https://mcp.example.com/api", headers: { Authorization: "Bearer token" } }, + }, +} + +const defaultOptions = { + agentMode: "subagent" as const, + inferTemperature: false, +} + +describe("convertClaudeToQwen", () => { + test("converts agents to yaml format with frontmatter", () => { + const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions) + + const agent = bundle.agents.find((a) => a.name === "security-sentinel") + expect(agent).toBeDefined() + expect(agent!.format).toBe("yaml") + const parsed = parseFrontmatter(agent!.content) + expect(parsed.data.name).toBe("security-sentinel") + expect(parsed.data.description).toBe("Security-focused agent") + expect(parsed.data.model).toBe("anthropic/claude-sonnet-4-20250514") + expect(parsed.body).toContain("Focus on vulnerabilities") + }) + + test("agent with inherit model has no model field in frontmatter", () => { + const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions) + const agent = bundle.agents.find((a) => a.name === "brainstorm-agent") + expect(agent).toBeDefined() + const parsed = parseFrontmatter(agent!.content) + expect(parsed.data.model).toBeUndefined() + }) + + test("inferTemperature injects temperature based on agent name/description", () => { + const bundle = convertClaudeToQwen(fixturePlugin, { ...defaultOptions, inferTemperature: true }) + + const sentinel = bundle.agents.find((a) => a.name === "security-sentinel") + const parsed = parseFrontmatter(sentinel!.content) + expect(parsed.data.temperature).toBe(0.1) // review/security → 0.1 + + const brainstorm = bundle.agents.find((a) => a.name === "brainstorm-agent") + const bParsed = parseFrontmatter(brainstorm!.content) + expect(bParsed.data.temperature).toBe(0.6) // brainstorm → 0.6 + }) + + test("inferTemperature returns undefined for unrecognized agents (no temperature set)", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [{ name: "my-helper", description: "Generic helper", model: "inherit", body: "help", sourcePath: "/tmp/a.md" }], + } + const bundle = convertClaudeToQwen(plugin, { ...defaultOptions, inferTemperature: true }) + const agent = bundle.agents[0] + const parsed = parseFrontmatter(agent.content) + expect(parsed.data.temperature).toBeUndefined() + }) + + test("converts commands to command files excluding disableModelInvocation", () => { + const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions) + + const planCmd = bundle.commandFiles.find((c) => c.name === "workflows:plan") + expect(planCmd).toBeDefined() + const parsed = parseFrontmatter(planCmd!.content) + expect(parsed.data.description).toBe("Planning command") + expect(parsed.data.allowedTools).toEqual(["Read"]) + + const disabled = bundle.commandFiles.find((c) => c.name === "disabled-cmd") + expect(disabled).toBeUndefined() + }) + + test("config uses plugin manifest name and version", () => { + const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions) + expect(bundle.config.name).toBe("compound-engineering") + expect(bundle.config.version).toBe("1.2.0") + expect(bundle.config.commands).toBe("commands") + expect(bundle.config.skills).toBe("skills") + expect(bundle.config.agents).toBe("agents") + }) + + test("stdio MCP servers are included in config", () => { + const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions) + expect(bundle.config.mcpServers).toBeDefined() + const local = bundle.config.mcpServers!.local + expect(local.command).toBe("npx") + expect(local.args).toEqual(["-y", "some-mcp"]) + // No cwd field + expect((local as any).cwd).toBeUndefined() + }) + + test("remote MCP servers are skipped with a warning (not converted to curl)", () => { + const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions) + // Only local (stdio) server should be present + expect(bundle.config.mcpServers).toBeDefined() + expect(bundle.config.mcpServers!.remote).toBeUndefined() + expect(bundle.config.mcpServers!.local).toBeDefined() + }) + + test("placeholder env vars are extracted as settings", () => { + const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions) + expect(bundle.config.settings).toBeDefined() + const apiKeySetting = bundle.config.settings!.find((s) => s.envVar === "API_KEY") + expect(apiKeySetting).toBeDefined() + expect(apiKeySetting!.sensitive).toBe(true) + expect(apiKeySetting!.name).toBe("Api Key") + }) + + test("plugin with no MCP servers has no mcpServers in config", () => { + const plugin: ClaudePlugin = { ...fixturePlugin, mcpServers: undefined } + const bundle = convertClaudeToQwen(plugin, defaultOptions) + expect(bundle.config.mcpServers).toBeUndefined() + }) + + test("context file uses plugin.manifest.name and manifest.description", () => { + const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions) + expect(bundle.contextFile).toContain("# compound-engineering") + expect(bundle.contextFile).toContain("A plugin for engineers") + expect(bundle.contextFile).toContain("## Agents") + expect(bundle.contextFile).toContain("security-sentinel") + expect(bundle.contextFile).toContain("## Commands") + expect(bundle.contextFile).toContain("/workflows:plan") + // Disabled commands excluded + expect(bundle.contextFile).not.toContain("disabled-cmd") + expect(bundle.contextFile).toContain("## Skills") + expect(bundle.contextFile).toContain("existing-skill") + }) + + test("paths are rewritten from .claude/ to .qwen/ in agent and command content", () => { + const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions) + + const agent = bundle.agents.find((a) => a.name === "security-sentinel") + expect(agent!.content).toContain("~/.qwen/settings") + expect(agent!.content).not.toContain("~/.claude/settings") + + const cmd = bundle.commandFiles.find((c) => c.name === "workflows:plan") + expect(cmd!.content).toContain("~/.qwen/settings") + expect(cmd!.content).not.toContain("~/.claude/settings") + }) + + test("opencode paths are NOT rewritten (only claude paths)", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { + name: "test-agent", + description: "test", + model: "inherit", + body: "See .opencode/config and ~/.config/opencode/settings", + sourcePath: "/tmp/a.md", + }, + ], + } + const bundle = convertClaudeToQwen(plugin, defaultOptions) + const agent = bundle.agents[0] + // opencode paths should NOT be rewritten + expect(agent.content).toContain(".opencode/config") + expect(agent.content).not.toContain(".qwen/config") + }) + + test("skillDirs passes through original skills", () => { + const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions) + const skill = bundle.skillDirs.find((s) => s.name === "existing-skill") + expect(skill).toBeDefined() + expect(skill!.sourceDir).toBe("/tmp/plugin/skills/existing-skill") + }) + + test("normalizeModel prefixes claude models with anthropic/", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [{ name: "a", description: "d", model: "claude-opus-4-5", body: "b", sourcePath: "/tmp/a.md" }], + } + const bundle = convertClaudeToQwen(plugin, defaultOptions) + const parsed = parseFrontmatter(bundle.agents[0].content) + expect(parsed.data.model).toBe("anthropic/claude-opus-4-5") + }) + + test("normalizeModel passes through already-namespaced models unchanged", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [{ name: "a", description: "d", model: "google/gemini-2.0", body: "b", sourcePath: "/tmp/a.md" }], + } + const bundle = convertClaudeToQwen(plugin, defaultOptions) + const parsed = parseFrontmatter(bundle.agents[0].content) + expect(parsed.data.model).toBe("google/gemini-2.0") + }) +}) diff --git a/tests/resolve-output.test.ts b/tests/resolve-output.test.ts new file mode 100644 index 0000000..0d6488d --- /dev/null +++ b/tests/resolve-output.test.ts @@ -0,0 +1,131 @@ +import { describe, expect, test } from "bun:test" +import os from "os" +import path from "path" +import { resolveTargetOutputRoot } from "../src/utils/resolve-output" + +const baseOptions = { + outputRoot: "/tmp/output", + codexHome: path.join(os.homedir(), ".codex"), + piHome: path.join(os.homedir(), ".pi", "agent"), + hasExplicitOutput: false, +} + +describe("resolveTargetOutputRoot", () => { + test("codex returns codexHome", () => { + const result = resolveTargetOutputRoot({ ...baseOptions, targetName: "codex" }) + expect(result).toBe(baseOptions.codexHome) + }) + + test("pi returns piHome", () => { + const result = resolveTargetOutputRoot({ ...baseOptions, targetName: "pi" }) + expect(result).toBe(baseOptions.piHome) + }) + + test("droid returns ~/.factory", () => { + const result = resolveTargetOutputRoot({ ...baseOptions, targetName: "droid" }) + expect(result).toBe(path.join(os.homedir(), ".factory")) + }) + + test("cursor with no explicit output uses cwd", () => { + const result = resolveTargetOutputRoot({ ...baseOptions, targetName: "cursor" }) + expect(result).toBe(path.join(process.cwd(), ".cursor")) + }) + + test("cursor with explicit output uses outputRoot", () => { + const result = resolveTargetOutputRoot({ + ...baseOptions, + targetName: "cursor", + hasExplicitOutput: true, + }) + expect(result).toBe(path.join("/tmp/output", ".cursor")) + }) + + test("windsurf default scope (global) resolves to ~/.codeium/windsurf/", () => { + const result = resolveTargetOutputRoot({ + ...baseOptions, + targetName: "windsurf", + scope: "global", + }) + expect(result).toBe(path.join(os.homedir(), ".codeium", "windsurf")) + }) + + test("windsurf workspace scope resolves to cwd/.windsurf/", () => { + const result = resolveTargetOutputRoot({ + ...baseOptions, + targetName: "windsurf", + scope: "workspace", + }) + expect(result).toBe(path.join(process.cwd(), ".windsurf")) + }) + + test("windsurf with explicit output overrides global scope", () => { + const result = resolveTargetOutputRoot({ + ...baseOptions, + targetName: "windsurf", + hasExplicitOutput: true, + scope: "global", + }) + expect(result).toBe("/tmp/output") + }) + + test("windsurf with explicit output overrides workspace scope", () => { + const result = resolveTargetOutputRoot({ + ...baseOptions, + targetName: "windsurf", + hasExplicitOutput: true, + scope: "workspace", + }) + expect(result).toBe("/tmp/output") + }) + + test("windsurf with no scope and no explicit output uses cwd/.windsurf/", () => { + const result = resolveTargetOutputRoot({ + ...baseOptions, + targetName: "windsurf", + }) + expect(result).toBe(path.join(process.cwd(), ".windsurf")) + }) + + test("opencode returns outputRoot as-is", () => { + const result = resolveTargetOutputRoot({ ...baseOptions, targetName: "opencode" }) + expect(result).toBe("/tmp/output") + }) + + test("openclaw uses openclawHome + pluginName", () => { + const result = resolveTargetOutputRoot({ + ...baseOptions, + targetName: "openclaw", + openclawHome: "/custom/openclaw/extensions", + pluginName: "my-plugin", + }) + expect(result).toBe("/custom/openclaw/extensions/my-plugin") + }) + + test("openclaw falls back to default home when not provided", () => { + const result = resolveTargetOutputRoot({ + ...baseOptions, + targetName: "openclaw", + pluginName: "my-plugin", + }) + expect(result).toBe(path.join(os.homedir(), ".openclaw", "extensions", "my-plugin")) + }) + + test("qwen uses qwenHome + pluginName", () => { + const result = resolveTargetOutputRoot({ + ...baseOptions, + targetName: "qwen", + qwenHome: "/custom/qwen/extensions", + pluginName: "my-plugin", + }) + expect(result).toBe("/custom/qwen/extensions/my-plugin") + }) + + test("qwen falls back to default home when not provided", () => { + const result = resolveTargetOutputRoot({ + ...baseOptions, + targetName: "qwen", + pluginName: "my-plugin", + }) + expect(result).toBe(path.join(os.homedir(), ".qwen", "extensions", "my-plugin")) + }) +}) diff --git a/tests/sync-codex.test.ts b/tests/sync-codex.test.ts new file mode 100644 index 0000000..9714ba8 --- /dev/null +++ b/tests/sync-codex.test.ts @@ -0,0 +1,64 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import os from "os" +import path from "path" +import type { ClaudeHomeConfig } from "../src/parsers/claude-home" +import { syncToCodex } from "../src/sync/codex" + +describe("syncToCodex", () => { + test("writes stdio and remote MCP servers into a managed block without clobbering user config", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-codex-")) + const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") + const configPath = path.join(tempRoot, "config.toml") + + await fs.writeFile( + configPath, + [ + "[custom]", + "enabled = true", + "", + "# BEGIN compound-plugin Claude Code MCP", + "[mcp_servers.old]", + "command = \"old\"", + "# END compound-plugin Claude Code MCP", + "", + "[post]", + "value = 2", + "", + ].join("\n"), + ) + + const config: ClaudeHomeConfig = { + skills: [ + { + name: "skill-one", + sourceDir: fixtureSkillDir, + skillPath: path.join(fixtureSkillDir, "SKILL.md"), + }, + ], + mcpServers: { + local: { command: "echo", args: ["hello"], env: { KEY: "VALUE" } }, + remote: { url: "https://example.com/mcp", headers: { Authorization: "Bearer token" } }, + }, + } + + await syncToCodex(config, tempRoot) + + const skillPath = path.join(tempRoot, "skills", "skill-one") + expect((await fs.lstat(skillPath)).isSymbolicLink()).toBe(true) + + const content = await fs.readFile(configPath, "utf8") + expect(content).toContain("[custom]") + expect(content).toContain("[post]") + expect(content).not.toContain("[mcp_servers.old]") + expect(content).toContain("[mcp_servers.local]") + expect(content).toContain("command = \"echo\"") + expect(content).toContain("[mcp_servers.remote]") + expect(content).toContain("url = \"https://example.com/mcp\"") + expect(content).toContain("http_headers") + expect(content.match(/# BEGIN compound-plugin Claude Code MCP/g)?.length).toBe(1) + + const perms = (await fs.stat(configPath)).mode & 0o777 + expect(perms).toBe(0o600) + }) +}) diff --git a/tests/sync-copilot.test.ts b/tests/sync-copilot.test.ts new file mode 100644 index 0000000..a95393c --- /dev/null +++ b/tests/sync-copilot.test.ts @@ -0,0 +1,204 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import path from "path" +import os from "os" +import { syncToCopilot } from "../src/sync/copilot" +import type { ClaudeHomeConfig } from "../src/parsers/claude-home" + +describe("syncToCopilot", () => { + test("symlinks skills to .github/skills/", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-")) + const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") + + const config: ClaudeHomeConfig = { + skills: [ + { + name: "skill-one", + sourceDir: fixtureSkillDir, + skillPath: path.join(fixtureSkillDir, "SKILL.md"), + }, + ], + mcpServers: {}, + } + + await syncToCopilot(config, tempRoot) + + const linkedSkillPath = path.join(tempRoot, "skills", "skill-one") + const linkedStat = await fs.lstat(linkedSkillPath) + expect(linkedStat.isSymbolicLink()).toBe(true) + }) + + test("converts personal commands into Copilot skills", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-cmd-")) + + const config: ClaudeHomeConfig = { + skills: [], + commands: [ + { + name: "workflows:plan", + description: "Planning command", + argumentHint: "[goal]", + body: "Plan the work carefully.", + sourcePath: "/tmp/workflows/plan.md", + }, + ], + mcpServers: {}, + } + + await syncToCopilot(config, tempRoot) + + const skillContent = await fs.readFile( + path.join(tempRoot, "skills", "workflows-plan", "SKILL.md"), + "utf8", + ) + expect(skillContent).toContain("name: workflows-plan") + expect(skillContent).toContain("Planning command") + expect(skillContent).toContain("## Arguments") + }) + + test("skips skills with invalid names", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-invalid-")) + + const config: ClaudeHomeConfig = { + skills: [ + { + name: "../escape-attempt", + sourceDir: "/tmp/bad-skill", + skillPath: "/tmp/bad-skill/SKILL.md", + }, + ], + mcpServers: {}, + } + + await syncToCopilot(config, tempRoot) + + const skillsDir = path.join(tempRoot, "skills") + const entries = await fs.readdir(skillsDir).catch(() => []) + expect(entries).toHaveLength(0) + }) + + test("merges MCP config with existing file", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-merge-")) + const mcpPath = path.join(tempRoot, "mcp-config.json") + + await fs.writeFile( + mcpPath, + JSON.stringify({ + mcpServers: { + existing: { type: "local", command: "node", args: ["server.js"], tools: ["*"] }, + }, + }, null, 2), + ) + + const config: ClaudeHomeConfig = { + skills: [], + mcpServers: { + context7: { url: "https://mcp.context7.com/mcp" }, + }, + } + + await syncToCopilot(config, tempRoot) + + const merged = JSON.parse(await fs.readFile(mcpPath, "utf8")) as { + mcpServers: Record<string, { command?: string; url?: string; type: string }> + } + + expect(merged.mcpServers.existing?.command).toBe("node") + expect(merged.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp") + expect(merged.mcpServers.context7?.type).toBe("http") + }) + + test("transforms MCP env var names to COPILOT_MCP_ prefix", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-env-")) + + const config: ClaudeHomeConfig = { + skills: [], + mcpServers: { + server: { + command: "echo", + args: ["hello"], + env: { API_KEY: "secret", COPILOT_MCP_TOKEN: "already-prefixed" }, + }, + }, + } + + await syncToCopilot(config, tempRoot) + + const mcpPath = path.join(tempRoot, "mcp-config.json") + const mcpConfig = JSON.parse(await fs.readFile(mcpPath, "utf8")) as { + mcpServers: Record<string, { env?: Record<string, string> }> + } + + expect(mcpConfig.mcpServers.server?.env).toEqual({ + COPILOT_MCP_API_KEY: "secret", + COPILOT_MCP_TOKEN: "already-prefixed", + }) + }) + + test("writes MCP config with restricted permissions", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-perms-")) + + const config: ClaudeHomeConfig = { + skills: [], + mcpServers: { + server: { command: "echo", args: ["hello"] }, + }, + } + + await syncToCopilot(config, tempRoot) + + const mcpPath = path.join(tempRoot, "mcp-config.json") + const stat = await fs.stat(mcpPath) + // Check owner read+write permission (0o600 = 33216 in decimal, masked to file perms) + const perms = stat.mode & 0o777 + expect(perms).toBe(0o600) + }) + + test("does not write MCP config when no MCP servers", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-nomcp-")) + const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") + + const config: ClaudeHomeConfig = { + skills: [ + { + name: "skill-one", + sourceDir: fixtureSkillDir, + skillPath: path.join(fixtureSkillDir, "SKILL.md"), + }, + ], + mcpServers: {}, + } + + await syncToCopilot(config, tempRoot) + + const mcpExists = await fs.access(path.join(tempRoot, "mcp-config.json")).then(() => true).catch(() => false) + expect(mcpExists).toBe(false) + }) + + test("preserves explicit SSE transport for legacy remote servers", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-sse-")) + + const config: ClaudeHomeConfig = { + skills: [], + mcpServers: { + legacy: { + type: "sse", + url: "https://example.com/sse", + }, + }, + } + + await syncToCopilot(config, tempRoot) + + const mcpPath = path.join(tempRoot, "mcp-config.json") + const mcpConfig = JSON.parse(await fs.readFile(mcpPath, "utf8")) as { + mcpServers: Record<string, { type?: string; url?: string }> + } + + expect(mcpConfig.mcpServers.legacy).toEqual({ + type: "sse", + tools: ["*"], + url: "https://example.com/sse", + }) + }) +}) diff --git a/tests/sync-cursor.test.ts b/tests/sync-cursor.test.ts deleted file mode 100644 index e314d28..0000000 --- a/tests/sync-cursor.test.ts +++ /dev/null @@ -1,92 +0,0 @@ -import { describe, expect, test } from "bun:test" -import { promises as fs } from "fs" -import path from "path" -import os from "os" -import { syncToCursor } from "../src/sync/cursor" -import type { ClaudeHomeConfig } from "../src/parsers/claude-home" - -describe("syncToCursor", () => { - test("symlinks skills and writes mcp.json", async () => { - const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-cursor-")) - const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") - - const config: ClaudeHomeConfig = { - skills: [ - { - name: "skill-one", - sourceDir: fixtureSkillDir, - skillPath: path.join(fixtureSkillDir, "SKILL.md"), - }, - ], - mcpServers: { - context7: { url: "https://mcp.context7.com/mcp" }, - local: { command: "echo", args: ["hello"], env: { FOO: "bar" } }, - }, - } - - await syncToCursor(config, tempRoot) - - // Check skill symlink - const linkedSkillPath = path.join(tempRoot, "skills", "skill-one") - const linkedStat = await fs.lstat(linkedSkillPath) - expect(linkedStat.isSymbolicLink()).toBe(true) - - // Check mcp.json - const mcpPath = path.join(tempRoot, "mcp.json") - const mcpConfig = JSON.parse(await fs.readFile(mcpPath, "utf8")) as { - mcpServers: Record<string, { url?: string; command?: string; args?: string[]; env?: Record<string, string> }> - } - - expect(mcpConfig.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp") - expect(mcpConfig.mcpServers.local?.command).toBe("echo") - expect(mcpConfig.mcpServers.local?.args).toEqual(["hello"]) - expect(mcpConfig.mcpServers.local?.env).toEqual({ FOO: "bar" }) - }) - - test("merges existing mcp.json", async () => { - const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-cursor-merge-")) - const mcpPath = path.join(tempRoot, "mcp.json") - - await fs.writeFile( - mcpPath, - JSON.stringify({ mcpServers: { existing: { command: "node", args: ["server.js"] } } }, null, 2), - ) - - const config: ClaudeHomeConfig = { - skills: [], - mcpServers: { - context7: { url: "https://mcp.context7.com/mcp" }, - }, - } - - await syncToCursor(config, tempRoot) - - const merged = JSON.parse(await fs.readFile(mcpPath, "utf8")) as { - mcpServers: Record<string, { command?: string; url?: string }> - } - - expect(merged.mcpServers.existing?.command).toBe("node") - expect(merged.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp") - }) - - test("does not write mcp.json when no MCP servers", async () => { - const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-cursor-nomcp-")) - const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") - - const config: ClaudeHomeConfig = { - skills: [ - { - name: "skill-one", - sourceDir: fixtureSkillDir, - skillPath: path.join(fixtureSkillDir, "SKILL.md"), - }, - ], - mcpServers: {}, - } - - await syncToCursor(config, tempRoot) - - const mcpExists = await fs.access(path.join(tempRoot, "mcp.json")).then(() => true).catch(() => false) - expect(mcpExists).toBe(false) - }) -}) diff --git a/tests/sync-droid.test.ts b/tests/sync-droid.test.ts index 5920f51..fec4d07 100644 --- a/tests/sync-droid.test.ts +++ b/tests/sync-droid.test.ts @@ -6,7 +6,7 @@ import { syncToDroid } from "../src/sync/droid" import type { ClaudeHomeConfig } from "../src/parsers/claude-home" describe("syncToDroid", () => { - test("symlinks skills to factory skills dir", async () => { + test("symlinks skills to factory skills dir and writes mcp.json", async () => { const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-droid-")) const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") @@ -29,9 +29,49 @@ describe("syncToDroid", () => { const linkedStat = await fs.lstat(linkedSkillPath) expect(linkedStat.isSymbolicLink()).toBe(true) - // Droid does not write MCP config - const mcpExists = await fs.access(path.join(tempRoot, "mcp.json")).then(() => true).catch(() => false) - expect(mcpExists).toBe(false) + const mcpConfig = JSON.parse( + await fs.readFile(path.join(tempRoot, "mcp.json"), "utf8"), + ) as { + mcpServers: Record<string, { type: string; url?: string; disabled: boolean }> + } + expect(mcpConfig.mcpServers.context7?.type).toBe("http") + expect(mcpConfig.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp") + expect(mcpConfig.mcpServers.context7?.disabled).toBe(false) + }) + + test("merges existing mcp.json and overwrites same-named servers from Claude", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-droid-merge-")) + await fs.writeFile( + path.join(tempRoot, "mcp.json"), + JSON.stringify({ + theme: "dark", + mcpServers: { + shared: { type: "http", url: "https://old.example.com", disabled: true }, + existing: { type: "stdio", command: "node", disabled: false }, + }, + }, null, 2), + ) + + const config: ClaudeHomeConfig = { + skills: [], + mcpServers: { + shared: { url: "https://new.example.com" }, + }, + } + + await syncToDroid(config, tempRoot) + + const mcpConfig = JSON.parse( + await fs.readFile(path.join(tempRoot, "mcp.json"), "utf8"), + ) as { + theme: string + mcpServers: Record<string, { type: string; url?: string; command?: string; disabled: boolean }> + } + + expect(mcpConfig.theme).toBe("dark") + expect(mcpConfig.mcpServers.existing?.command).toBe("node") + expect(mcpConfig.mcpServers.shared?.url).toBe("https://new.example.com") + expect(mcpConfig.mcpServers.shared?.disabled).toBe(false) }) test("skips skills with invalid names", async () => { diff --git a/tests/sync-gemini.test.ts b/tests/sync-gemini.test.ts new file mode 100644 index 0000000..3e2d303 --- /dev/null +++ b/tests/sync-gemini.test.ts @@ -0,0 +1,160 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import path from "path" +import os from "os" +import { syncToGemini } from "../src/sync/gemini" +import type { ClaudeHomeConfig } from "../src/parsers/claude-home" + +describe("syncToGemini", () => { + test("symlinks skills and writes settings.json", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-gemini-")) + const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") + + const config: ClaudeHomeConfig = { + skills: [ + { + name: "skill-one", + sourceDir: fixtureSkillDir, + skillPath: path.join(fixtureSkillDir, "SKILL.md"), + }, + ], + mcpServers: { + context7: { url: "https://mcp.context7.com/mcp" }, + local: { command: "echo", args: ["hello"], env: { FOO: "bar" } }, + }, + } + + await syncToGemini(config, tempRoot) + + // Check skill symlink + const linkedSkillPath = path.join(tempRoot, "skills", "skill-one") + const linkedStat = await fs.lstat(linkedSkillPath) + expect(linkedStat.isSymbolicLink()).toBe(true) + + // Check settings.json + const settingsPath = path.join(tempRoot, "settings.json") + const settings = JSON.parse(await fs.readFile(settingsPath, "utf8")) as { + mcpServers: Record<string, { url?: string; command?: string; args?: string[]; env?: Record<string, string> }> + } + + expect(settings.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp") + expect(settings.mcpServers.local?.command).toBe("echo") + expect(settings.mcpServers.local?.args).toEqual(["hello"]) + expect(settings.mcpServers.local?.env).toEqual({ FOO: "bar" }) + }) + + test("merges existing settings.json", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-gemini-merge-")) + const settingsPath = path.join(tempRoot, "settings.json") + + await fs.writeFile( + settingsPath, + JSON.stringify({ + theme: "dark", + mcpServers: { existing: { command: "node", args: ["server.js"] } }, + }, null, 2), + ) + + const config: ClaudeHomeConfig = { + skills: [], + mcpServers: { + context7: { url: "https://mcp.context7.com/mcp" }, + }, + } + + await syncToGemini(config, tempRoot) + + const merged = JSON.parse(await fs.readFile(settingsPath, "utf8")) as { + theme: string + mcpServers: Record<string, { command?: string; url?: string }> + } + + // Preserves existing settings + expect(merged.theme).toBe("dark") + // Preserves existing MCP servers + expect(merged.mcpServers.existing?.command).toBe("node") + // Adds new MCP servers + expect(merged.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp") + }) + + test("writes personal commands as Gemini TOML prompts", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-gemini-cmd-")) + + const config: ClaudeHomeConfig = { + skills: [], + commands: [ + { + name: "workflows:plan", + description: "Planning command", + argumentHint: "[goal]", + body: "Plan the work carefully.", + sourcePath: "/tmp/workflows/plan.md", + }, + ], + mcpServers: {}, + } + + await syncToGemini(config, tempRoot) + + const content = await fs.readFile( + path.join(tempRoot, "commands", "workflows", "plan.toml"), + "utf8", + ) + expect(content).toContain("Planning command") + expect(content).toContain("User request: {{args}}") + }) + + test("does not write settings.json when no MCP servers", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-gemini-nomcp-")) + const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") + + const config: ClaudeHomeConfig = { + skills: [ + { + name: "skill-one", + sourceDir: fixtureSkillDir, + skillPath: path.join(fixtureSkillDir, "SKILL.md"), + }, + ], + mcpServers: {}, + } + + await syncToGemini(config, tempRoot) + + // Skills should still be symlinked + const linkedSkillPath = path.join(tempRoot, "skills", "skill-one") + const linkedStat = await fs.lstat(linkedSkillPath) + expect(linkedStat.isSymbolicLink()).toBe(true) + + // But settings.json should not exist + const settingsExists = await fs.access(path.join(tempRoot, "settings.json")).then(() => true).catch(() => false) + expect(settingsExists).toBe(false) + }) + + test("skips mirrored ~/.agents skills when syncing to ~/.gemini and removes stale duplicate symlinks", async () => { + const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "sync-gemini-home-")) + const geminiRoot = path.join(tempHome, ".gemini") + const agentsSkillDir = path.join(tempHome, ".agents", "skills", "skill-one") + + await fs.mkdir(path.join(agentsSkillDir), { recursive: true }) + await fs.writeFile(path.join(agentsSkillDir, "SKILL.md"), "# Skill One\n", "utf8") + await fs.mkdir(path.join(geminiRoot, "skills"), { recursive: true }) + await fs.symlink(agentsSkillDir, path.join(geminiRoot, "skills", "skill-one")) + + const config: ClaudeHomeConfig = { + skills: [ + { + name: "skill-one", + sourceDir: agentsSkillDir, + skillPath: path.join(agentsSkillDir, "SKILL.md"), + }, + ], + mcpServers: {}, + } + + await syncToGemini(config, geminiRoot) + + const duplicateExists = await fs.access(path.join(geminiRoot, "skills", "skill-one")).then(() => true).catch(() => false) + expect(duplicateExists).toBe(false) + }) +}) diff --git a/tests/sync-kiro.test.ts b/tests/sync-kiro.test.ts new file mode 100644 index 0000000..72f2b11 --- /dev/null +++ b/tests/sync-kiro.test.ts @@ -0,0 +1,83 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import os from "os" +import path from "path" +import type { ClaudeHomeConfig } from "../src/parsers/claude-home" +import { syncToKiro } from "../src/sync/kiro" + +describe("syncToKiro", () => { + test("writes user-scope settings/mcp.json with local and remote servers", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-kiro-")) + const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") + + const config: ClaudeHomeConfig = { + skills: [ + { + name: "skill-one", + sourceDir: fixtureSkillDir, + skillPath: path.join(fixtureSkillDir, "SKILL.md"), + }, + ], + mcpServers: { + local: { command: "echo", args: ["hello"], env: { TOKEN: "secret" } }, + remote: { url: "https://example.com/mcp", headers: { Authorization: "Bearer token" } }, + }, + } + + await syncToKiro(config, tempRoot) + + expect((await fs.lstat(path.join(tempRoot, "skills", "skill-one"))).isSymbolicLink()).toBe(true) + + const content = JSON.parse( + await fs.readFile(path.join(tempRoot, "settings", "mcp.json"), "utf8"), + ) as { + mcpServers: Record<string, { + command?: string + args?: string[] + env?: Record<string, string> + url?: string + headers?: Record<string, string> + }> + } + + expect(content.mcpServers.local?.command).toBe("echo") + expect(content.mcpServers.local?.args).toEqual(["hello"]) + expect(content.mcpServers.local?.env).toEqual({ TOKEN: "secret" }) + expect(content.mcpServers.remote?.url).toBe("https://example.com/mcp") + expect(content.mcpServers.remote?.headers).toEqual({ Authorization: "Bearer token" }) + }) + + test("merges existing settings/mcp.json", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-kiro-merge-")) + await fs.mkdir(path.join(tempRoot, "settings"), { recursive: true }) + await fs.writeFile( + path.join(tempRoot, "settings", "mcp.json"), + JSON.stringify({ + note: "preserve", + mcpServers: { + existing: { command: "node" }, + }, + }, null, 2), + ) + + const config: ClaudeHomeConfig = { + skills: [], + mcpServers: { + remote: { url: "https://example.com/mcp" }, + }, + } + + await syncToKiro(config, tempRoot) + + const content = JSON.parse( + await fs.readFile(path.join(tempRoot, "settings", "mcp.json"), "utf8"), + ) as { + note: string + mcpServers: Record<string, { command?: string; url?: string }> + } + + expect(content.note).toBe("preserve") + expect(content.mcpServers.existing?.command).toBe("node") + expect(content.mcpServers.remote?.url).toBe("https://example.com/mcp") + }) +}) diff --git a/tests/sync-openclaw.test.ts b/tests/sync-openclaw.test.ts new file mode 100644 index 0000000..3acf683 --- /dev/null +++ b/tests/sync-openclaw.test.ts @@ -0,0 +1,51 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import os from "os" +import path from "path" +import type { ClaudeHomeConfig } from "../src/parsers/claude-home" +import { syncToOpenClaw } from "../src/sync/openclaw" + +describe("syncToOpenClaw", () => { + test("symlinks skills and warns instead of writing unvalidated MCP config", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-openclaw-")) + const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") + const warnings: string[] = [] + const originalWarn = console.warn + console.warn = (message?: unknown) => { + warnings.push(String(message)) + } + + try { + const config: ClaudeHomeConfig = { + skills: [ + { + name: "skill-one", + sourceDir: fixtureSkillDir, + skillPath: path.join(fixtureSkillDir, "SKILL.md"), + }, + ], + commands: [ + { + name: "workflows:plan", + description: "Planning command", + body: "Plan the work.", + sourcePath: "/tmp/workflows/plan.md", + }, + ], + mcpServers: { + remote: { url: "https://example.com/mcp" }, + }, + } + + await syncToOpenClaw(config, tempRoot) + } finally { + console.warn = originalWarn + } + + expect((await fs.lstat(path.join(tempRoot, "skills", "skill-one"))).isSymbolicLink()).toBe(true) + const openclawConfigExists = await fs.access(path.join(tempRoot, "openclaw.json")).then(() => true).catch(() => false) + expect(openclawConfigExists).toBe(false) + expect(warnings.some((warning) => warning.includes("OpenClaw personal command sync is skipped"))).toBe(true) + expect(warnings.some((warning) => warning.includes("OpenClaw MCP sync is skipped"))).toBe(true) + }) +}) diff --git a/tests/sync-qwen.test.ts b/tests/sync-qwen.test.ts new file mode 100644 index 0000000..60758e3 --- /dev/null +++ b/tests/sync-qwen.test.ts @@ -0,0 +1,75 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import os from "os" +import path from "path" +import type { ClaudeHomeConfig } from "../src/parsers/claude-home" +import { syncToQwen } from "../src/sync/qwen" + +describe("syncToQwen", () => { + test("defaults ambiguous remote URLs to httpUrl and warns", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-qwen-")) + const warnings: string[] = [] + const originalWarn = console.warn + console.warn = (message?: unknown) => { + warnings.push(String(message)) + } + + try { + const config: ClaudeHomeConfig = { + skills: [], + mcpServers: { + remote: { url: "https://example.com/mcp", headers: { Authorization: "Bearer token" } }, + }, + } + + await syncToQwen(config, tempRoot) + } finally { + console.warn = originalWarn + } + + const content = JSON.parse( + await fs.readFile(path.join(tempRoot, "settings.json"), "utf8"), + ) as { + mcpServers: Record<string, { httpUrl?: string; url?: string; headers?: Record<string, string> }> + } + + expect(content.mcpServers.remote?.httpUrl).toBe("https://example.com/mcp") + expect(content.mcpServers.remote?.url).toBeUndefined() + expect(content.mcpServers.remote?.headers).toEqual({ Authorization: "Bearer token" }) + expect(warnings.some((warning) => warning.includes("ambiguous remote transport"))).toBe(true) + }) + + test("uses legacy url only for explicit SSE servers and preserves existing settings", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-qwen-sse-")) + await fs.writeFile( + path.join(tempRoot, "settings.json"), + JSON.stringify({ + theme: "dark", + mcpServers: { + existing: { command: "node" }, + }, + }, null, 2), + ) + + const config: ClaudeHomeConfig = { + skills: [], + mcpServers: { + legacy: { type: "sse", url: "https://example.com/sse" }, + }, + } + + await syncToQwen(config, tempRoot) + + const content = JSON.parse( + await fs.readFile(path.join(tempRoot, "settings.json"), "utf8"), + ) as { + theme: string + mcpServers: Record<string, { command?: string; httpUrl?: string; url?: string }> + } + + expect(content.theme).toBe("dark") + expect(content.mcpServers.existing?.command).toBe("node") + expect(content.mcpServers.legacy?.url).toBe("https://example.com/sse") + expect(content.mcpServers.legacy?.httpUrl).toBeUndefined() + }) +}) diff --git a/tests/sync-windsurf.test.ts b/tests/sync-windsurf.test.ts new file mode 100644 index 0000000..63b1652 --- /dev/null +++ b/tests/sync-windsurf.test.ts @@ -0,0 +1,89 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import os from "os" +import path from "path" +import type { ClaudeHomeConfig } from "../src/parsers/claude-home" +import { syncToWindsurf } from "../src/sync/windsurf" + +describe("syncToWindsurf", () => { + test("writes stdio, http, and sse MCP servers", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-windsurf-")) + const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one") + + const config: ClaudeHomeConfig = { + skills: [ + { + name: "skill-one", + sourceDir: fixtureSkillDir, + skillPath: path.join(fixtureSkillDir, "SKILL.md"), + }, + ], + mcpServers: { + local: { command: "npx", args: ["serve"], env: { FOO: "bar" } }, + remoteHttp: { url: "https://example.com/mcp", headers: { Authorization: "Bearer a" } }, + remoteSse: { type: "sse", url: "https://example.com/sse" }, + }, + } + + await syncToWindsurf(config, tempRoot) + + expect((await fs.lstat(path.join(tempRoot, "skills", "skill-one"))).isSymbolicLink()).toBe(true) + + const content = JSON.parse( + await fs.readFile(path.join(tempRoot, "mcp_config.json"), "utf8"), + ) as { + mcpServers: Record<string, { + command?: string + args?: string[] + env?: Record<string, string> + serverUrl?: string + url?: string + }> + } + + expect(content.mcpServers.local).toEqual({ + command: "npx", + args: ["serve"], + env: { FOO: "bar" }, + }) + expect(content.mcpServers.remoteHttp?.serverUrl).toBe("https://example.com/mcp") + expect(content.mcpServers.remoteSse?.url).toBe("https://example.com/sse") + + const perms = (await fs.stat(path.join(tempRoot, "mcp_config.json"))).mode & 0o777 + expect(perms).toBe(0o600) + }) + + test("merges existing config and overwrites same-named servers", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-windsurf-merge-")) + await fs.writeFile( + path.join(tempRoot, "mcp_config.json"), + JSON.stringify({ + theme: "dark", + mcpServers: { + existing: { command: "node" }, + shared: { serverUrl: "https://old.example.com" }, + }, + }, null, 2), + ) + + const config: ClaudeHomeConfig = { + skills: [], + mcpServers: { + shared: { url: "https://new.example.com" }, + }, + } + + await syncToWindsurf(config, tempRoot) + + const content = JSON.parse( + await fs.readFile(path.join(tempRoot, "mcp_config.json"), "utf8"), + ) as { + theme: string + mcpServers: Record<string, { command?: string; serverUrl?: string }> + } + + expect(content.theme).toBe("dark") + expect(content.mcpServers.existing?.command).toBe("node") + expect(content.mcpServers.shared?.serverUrl).toBe("https://new.example.com") + }) +}) diff --git a/tests/windsurf-converter.test.ts b/tests/windsurf-converter.test.ts new file mode 100644 index 0000000..4264a17 --- /dev/null +++ b/tests/windsurf-converter.test.ts @@ -0,0 +1,573 @@ +import { describe, expect, test } from "bun:test" +import { convertClaudeToWindsurf, transformContentForWindsurf, normalizeName } from "../src/converters/claude-to-windsurf" +import type { ClaudePlugin } from "../src/types/claude" + +const fixturePlugin: ClaudePlugin = { + root: "/tmp/plugin", + manifest: { name: "fixture", version: "1.0.0" }, + agents: [ + { + name: "Security Reviewer", + description: "Security-focused agent", + capabilities: ["Threat modeling", "OWASP"], + model: "claude-sonnet-4-20250514", + body: "Focus on vulnerabilities.", + sourcePath: "/tmp/plugin/agents/security-reviewer.md", + }, + ], + commands: [ + { + name: "workflows:plan", + description: "Planning command", + argumentHint: "[FOCUS]", + model: "inherit", + allowedTools: ["Read"], + body: "Plan the work.", + sourcePath: "/tmp/plugin/commands/workflows/plan.md", + }, + ], + skills: [ + { + name: "existing-skill", + description: "Existing skill", + sourceDir: "/tmp/plugin/skills/existing-skill", + skillPath: "/tmp/plugin/skills/existing-skill/SKILL.md", + }, + ], + hooks: undefined, + mcpServers: { + local: { command: "echo", args: ["hello"] }, + }, +} + +const defaultOptions = { + agentMode: "subagent" as const, + inferTemperature: false, + permissions: "none" as const, +} + +describe("convertClaudeToWindsurf", () => { + test("converts agents to skills with correct name and description in SKILL.md", () => { + const bundle = convertClaudeToWindsurf(fixturePlugin, defaultOptions) + + const skill = bundle.agentSkills.find((s) => s.name === "security-reviewer") + expect(skill).toBeDefined() + expect(skill!.content).toContain("name: security-reviewer") + expect(skill!.content).toContain("description: Security-focused agent") + expect(skill!.content).toContain("Focus on vulnerabilities.") + }) + + test("agent capabilities included in skill content", () => { + const bundle = convertClaudeToWindsurf(fixturePlugin, defaultOptions) + const skill = bundle.agentSkills.find((s) => s.name === "security-reviewer") + expect(skill!.content).toContain("## Capabilities") + expect(skill!.content).toContain("- Threat modeling") + expect(skill!.content).toContain("- OWASP") + }) + + test("agent with empty description gets default description", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { + name: "my-agent", + body: "Do things.", + sourcePath: "/tmp/plugin/agents/my-agent.md", + }, + ], + commands: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.agentSkills[0].content).toContain("description: Converted from Claude agent my-agent") + }) + + test("agent model field silently dropped", () => { + const bundle = convertClaudeToWindsurf(fixturePlugin, defaultOptions) + const skill = bundle.agentSkills.find((s) => s.name === "security-reviewer") + expect(skill!.content).not.toContain("model:") + }) + + test("agent with empty body gets default body text", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { + name: "Empty Agent", + description: "An empty agent", + body: "", + sourcePath: "/tmp/plugin/agents/empty.md", + }, + ], + commands: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.agentSkills[0].content).toContain("Instructions converted from the Empty Agent agent.") + }) + + test("converts commands to workflows with description", () => { + const bundle = convertClaudeToWindsurf(fixturePlugin, defaultOptions) + + expect(bundle.commandWorkflows).toHaveLength(1) + const workflow = bundle.commandWorkflows[0] + expect(workflow.name).toBe("workflows-plan") + expect(workflow.description).toBe("Planning command") + expect(workflow.body).toContain("Plan the work.") + }) + + test("command argumentHint preserved as note in body", () => { + const bundle = convertClaudeToWindsurf(fixturePlugin, defaultOptions) + const workflow = bundle.commandWorkflows[0] + expect(workflow.body).toContain("> Arguments: [FOCUS]") + }) + + test("command with no description gets fallback", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + commands: [ + { + name: "my-command", + body: "Do things.", + sourcePath: "/tmp/plugin/commands/my-command.md", + }, + ], + agents: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.commandWorkflows[0].description).toBe("Converted from Claude command my-command") + }) + + test("command with disableModelInvocation is still included", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + commands: [ + { + name: "disabled-command", + description: "Disabled command", + disableModelInvocation: true, + body: "Disabled body.", + sourcePath: "/tmp/plugin/commands/disabled.md", + }, + ], + agents: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.commandWorkflows).toHaveLength(1) + expect(bundle.commandWorkflows[0].name).toBe("disabled-command") + }) + + test("command allowedTools silently dropped", () => { + const bundle = convertClaudeToWindsurf(fixturePlugin, defaultOptions) + const workflow = bundle.commandWorkflows[0] + expect(workflow.body).not.toContain("allowedTools") + }) + + test("skills pass through as directory references", () => { + const bundle = convertClaudeToWindsurf(fixturePlugin, defaultOptions) + + expect(bundle.skillDirs).toHaveLength(1) + expect(bundle.skillDirs[0].name).toBe("existing-skill") + expect(bundle.skillDirs[0].sourceDir).toBe("/tmp/plugin/skills/existing-skill") + }) + + test("name normalization handles various inputs", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { name: "My Cool Agent!!!", description: "Cool", body: "Body.", sourcePath: "/tmp/a.md" }, + { name: "UPPERCASE-AGENT", description: "Upper", body: "Body.", sourcePath: "/tmp/b.md" }, + { name: "agent--with--double-hyphens", description: "Hyphens", body: "Body.", sourcePath: "/tmp/c.md" }, + ], + commands: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.agentSkills[0].name).toBe("my-cool-agent") + expect(bundle.agentSkills[1].name).toBe("uppercase-agent") + expect(bundle.agentSkills[2].name).toBe("agent-with-double-hyphens") + }) + + test("name deduplication within agent skills", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { name: "reviewer", description: "First", body: "Body.", sourcePath: "/tmp/a.md" }, + { name: "Reviewer", description: "Second", body: "Body.", sourcePath: "/tmp/b.md" }, + ], + commands: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.agentSkills[0].name).toBe("reviewer") + expect(bundle.agentSkills[1].name).toBe("reviewer-2") + }) + + test("agent skill name deduplicates against pass-through skill names", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { name: "existing-skill", description: "Agent with same name as skill", body: "Body.", sourcePath: "/tmp/a.md" }, + ], + commands: [], + skills: [ + { + name: "existing-skill", + description: "Pass-through skill", + sourceDir: "/tmp/plugin/skills/existing-skill", + skillPath: "/tmp/plugin/skills/existing-skill/SKILL.md", + }, + ], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.agentSkills[0].name).toBe("existing-skill-2") + }) + + test("agent skill and command with same normalized name are NOT deduplicated (separate sets)", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { name: "review", description: "Agent", body: "Body.", sourcePath: "/tmp/a.md" }, + ], + commands: [ + { name: "review", description: "Command", body: "Body.", sourcePath: "/tmp/b.md" }, + ], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.agentSkills[0].name).toBe("review") + expect(bundle.commandWorkflows[0].name).toBe("review") + }) + + test("large agent skill does not emit 12K character limit warning (skills have no limit)", () => { + const warnings: string[] = [] + const originalWarn = console.warn + console.warn = (msg: string) => warnings.push(msg) + + const plugin: ClaudePlugin = { + ...fixturePlugin, + agents: [ + { + name: "large-agent", + description: "Large agent", + body: "x".repeat(12_000), + sourcePath: "/tmp/a.md", + }, + ], + commands: [], + skills: [], + } + + convertClaudeToWindsurf(plugin, defaultOptions) + console.warn = originalWarn + + expect(warnings.some((w) => w.includes("12000") || w.includes("limit"))).toBe(false) + }) + + test("hooks present emits console.warn", () => { + const warnings: string[] = [] + const originalWarn = console.warn + console.warn = (msg: string) => warnings.push(msg) + + const plugin: ClaudePlugin = { + ...fixturePlugin, + hooks: { hooks: { PreToolUse: [{ matcher: "*", hooks: [{ type: "command", command: "echo test" }] }] } }, + agents: [], + commands: [], + skills: [], + } + + convertClaudeToWindsurf(plugin, defaultOptions) + console.warn = originalWarn + + expect(warnings.some((w) => w.includes("Windsurf"))).toBe(true) + }) + + test("empty plugin produces empty bundle with null mcpConfig", () => { + const plugin: ClaudePlugin = { + root: "/tmp/empty", + manifest: { name: "empty", version: "1.0.0" }, + agents: [], + commands: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.agentSkills).toHaveLength(0) + expect(bundle.commandWorkflows).toHaveLength(0) + expect(bundle.skillDirs).toHaveLength(0) + expect(bundle.mcpConfig).toBeNull() + }) + + // MCP config tests + + test("stdio server produces correct mcpConfig JSON structure", () => { + const bundle = convertClaudeToWindsurf(fixturePlugin, defaultOptions) + expect(bundle.mcpConfig).not.toBeNull() + expect(bundle.mcpConfig!.mcpServers.local).toEqual({ + command: "echo", + args: ["hello"], + }) + }) + + test("stdio server with env vars includes actual values (not redacted)", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + mcpServers: { + myserver: { + command: "serve", + env: { + API_KEY: "secret123", + PORT: "3000", + }, + }, + }, + agents: [], + commands: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.mcpConfig!.mcpServers.myserver.env).toEqual({ + API_KEY: "secret123", + PORT: "3000", + }) + }) + + test("HTTP/SSE server produces correct mcpConfig with serverUrl", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + mcpServers: { + remote: { url: "https://example.com/mcp", headers: { Authorization: "Bearer abc" } }, + }, + agents: [], + commands: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.mcpConfig!.mcpServers.remote).toEqual({ + serverUrl: "https://example.com/mcp", + headers: { Authorization: "Bearer abc" }, + }) + }) + + test("mixed stdio and HTTP servers both included", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + mcpServers: { + local: { command: "echo", args: ["hello"] }, + remote: { url: "https://example.com/mcp" }, + }, + agents: [], + commands: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(Object.keys(bundle.mcpConfig!.mcpServers)).toHaveLength(2) + expect(bundle.mcpConfig!.mcpServers.local.command).toBe("echo") + expect(bundle.mcpConfig!.mcpServers.remote.serverUrl).toBe("https://example.com/mcp") + }) + + test("hasPotentialSecrets emits console.warn for sensitive env keys", () => { + const warnings: string[] = [] + const originalWarn = console.warn + console.warn = (...msgs: unknown[]) => warnings.push(msgs.map(String).join(" ")) + + const plugin: ClaudePlugin = { + ...fixturePlugin, + mcpServers: { + myserver: { + command: "serve", + env: { API_KEY: "secret123", PORT: "3000" }, + }, + }, + agents: [], + commands: [], + skills: [], + } + + convertClaudeToWindsurf(plugin, defaultOptions) + console.warn = originalWarn + + expect(warnings.some((w) => w.includes("secrets") && w.includes("myserver"))).toBe(true) + }) + + test("no secrets warning when env vars are safe", () => { + const warnings: string[] = [] + const originalWarn = console.warn + console.warn = (...msgs: unknown[]) => warnings.push(msgs.map(String).join(" ")) + + const plugin: ClaudePlugin = { + ...fixturePlugin, + mcpServers: { + myserver: { + command: "serve", + env: { PORT: "3000", HOST: "localhost" }, + }, + }, + agents: [], + commands: [], + skills: [], + } + + convertClaudeToWindsurf(plugin, defaultOptions) + console.warn = originalWarn + + expect(warnings.some((w) => w.includes("secrets"))).toBe(false) + }) + + test("no MCP servers produces null mcpConfig", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + mcpServers: undefined, + agents: [], + commands: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.mcpConfig).toBeNull() + }) + + test("server with no command and no URL is skipped with warning", () => { + const warnings: string[] = [] + const originalWarn = console.warn + console.warn = (...msgs: unknown[]) => warnings.push(msgs.map(String).join(" ")) + + const plugin: ClaudePlugin = { + ...fixturePlugin, + mcpServers: { + broken: {} as { command: string }, + }, + agents: [], + commands: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + console.warn = originalWarn + + expect(bundle.mcpConfig).toBeNull() + expect(warnings.some((w) => w.includes("broken") && w.includes("no command or URL"))).toBe(true) + }) + + test("server command without args omits args field", () => { + const plugin: ClaudePlugin = { + ...fixturePlugin, + mcpServers: { + simple: { command: "myserver" }, + }, + agents: [], + commands: [], + skills: [], + } + + const bundle = convertClaudeToWindsurf(plugin, defaultOptions) + expect(bundle.mcpConfig!.mcpServers.simple).toEqual({ command: "myserver" }) + expect(bundle.mcpConfig!.mcpServers.simple.args).toBeUndefined() + }) +}) + +describe("transformContentForWindsurf", () => { + test("transforms .claude/ paths to .windsurf/", () => { + const result = transformContentForWindsurf("Read .claude/settings.json for config.") + expect(result).toContain(".windsurf/settings.json") + expect(result).not.toContain(".claude/") + }) + + test("transforms ~/.claude/ paths to ~/.codeium/windsurf/", () => { + const result = transformContentForWindsurf("Check ~/.claude/config for settings.") + expect(result).toContain("~/.codeium/windsurf/config") + expect(result).not.toContain("~/.claude/") + }) + + test("transforms Task agent(args) to skill reference", () => { + const input = `Run these: + +- Task repo-research-analyst(feature_description) +- Task learnings-researcher(feature_description) + +Task best-practices-researcher(topic)` + + const result = transformContentForWindsurf(input) + expect(result).toContain("Use the @repo-research-analyst skill: feature_description") + expect(result).toContain("Use the @learnings-researcher skill: feature_description") + expect(result).toContain("Use the @best-practices-researcher skill: topic") + expect(result).not.toContain("Task repo-research-analyst") + }) + + test("keeps @agent references as-is for known agents (Windsurf skill invocation syntax)", () => { + const result = transformContentForWindsurf("Ask @security-sentinel for a review.", ["security-sentinel"]) + expect(result).toContain("@security-sentinel") + expect(result).not.toContain("/agents/") + }) + + test("does not transform @unknown-name when not in known agents", () => { + const result = transformContentForWindsurf("Contact @someone-else for help.", ["security-sentinel"]) + expect(result).toContain("@someone-else") + }) + + test("transforms slash command refs to /{workflow-name} (per spec)", () => { + const result = transformContentForWindsurf("Run /workflows:plan to start planning.") + expect(result).toContain("/workflows-plan") + expect(result).not.toContain("/commands/") + }) + + test("does not transform partial .claude paths in middle of word", () => { + const result = transformContentForWindsurf("Check some-package/.claude-config/settings") + expect(result).toContain("some-package/") + }) + + test("handles case sensitivity in @agent-name matching", () => { + const result = transformContentForWindsurf("Delegate to @My-Agent for help.", ["my-agent"]) + // @My-Agent won't match my-agent since regex is case-sensitive on the known names + expect(result).toContain("@My-Agent") + }) + + test("handles multiple occurrences of same transform", () => { + const result = transformContentForWindsurf( + "Use .claude/foo and .claude/bar for config.", + ) + expect(result).toContain(".windsurf/foo") + expect(result).toContain(".windsurf/bar") + expect(result).not.toContain(".claude/") + }) +}) + +describe("normalizeName", () => { + test("lowercases and hyphenates spaces", () => { + expect(normalizeName("Security Reviewer")).toBe("security-reviewer") + }) + + test("replaces colons with hyphens", () => { + expect(normalizeName("workflows:plan")).toBe("workflows-plan") + }) + + test("collapses consecutive hyphens", () => { + expect(normalizeName("agent--with--double-hyphens")).toBe("agent-with-double-hyphens") + }) + + test("strips leading/trailing hyphens", () => { + expect(normalizeName("-leading-and-trailing-")).toBe("leading-and-trailing") + }) + + test("empty string returns item", () => { + expect(normalizeName("")).toBe("item") + }) + + test("non-letter start returns item", () => { + expect(normalizeName("123-agent")).toBe("item") + }) +}) diff --git a/tests/windsurf-writer.test.ts b/tests/windsurf-writer.test.ts new file mode 100644 index 0000000..9d1129c --- /dev/null +++ b/tests/windsurf-writer.test.ts @@ -0,0 +1,359 @@ +import { describe, expect, test } from "bun:test" +import { promises as fs } from "fs" +import path from "path" +import os from "os" +import { writeWindsurfBundle } from "../src/targets/windsurf" +import type { WindsurfBundle } from "../src/types/windsurf" + +async function exists(filePath: string): Promise<boolean> { + try { + await fs.access(filePath) + return true + } catch { + return false + } +} + +const emptyBundle: WindsurfBundle = { + agentSkills: [], + commandWorkflows: [], + skillDirs: [], + mcpConfig: null, +} + +describe("writeWindsurfBundle", () => { + test("creates correct directory structure with all components", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-test-")) + const bundle: WindsurfBundle = { + agentSkills: [ + { + name: "security-reviewer", + content: "---\nname: security-reviewer\ndescription: Security-focused agent\n---\n\n# security-reviewer\n\nReview code for vulnerabilities.\n", + }, + ], + commandWorkflows: [ + { + name: "workflows-plan", + description: "Planning command", + body: "> Arguments: [FOCUS]\n\nPlan the work.", + }, + ], + skillDirs: [ + { + name: "skill-one", + sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"), + }, + ], + mcpConfig: { + mcpServers: { + local: { command: "echo", args: ["hello"] }, + }, + }, + } + + await writeWindsurfBundle(tempRoot, bundle) + + // No AGENTS.md — removed in v0.11.0 + expect(await exists(path.join(tempRoot, "AGENTS.md"))).toBe(false) + + // Agent skill written as skills/<name>/SKILL.md + const agentSkillPath = path.join(tempRoot, "skills", "security-reviewer", "SKILL.md") + expect(await exists(agentSkillPath)).toBe(true) + const agentContent = await fs.readFile(agentSkillPath, "utf8") + expect(agentContent).toContain("name: security-reviewer") + expect(agentContent).toContain("description: Security-focused agent") + expect(agentContent).toContain("Review code for vulnerabilities.") + + // No workflows/agents/ or workflows/commands/ subdirectories (flat per spec) + expect(await exists(path.join(tempRoot, "workflows", "agents"))).toBe(false) + expect(await exists(path.join(tempRoot, "workflows", "commands"))).toBe(false) + + // Command workflow flat in outputRoot/workflows/ (per spec) + const cmdWorkflowPath = path.join(tempRoot, "workflows", "workflows-plan.md") + expect(await exists(cmdWorkflowPath)).toBe(true) + const cmdContent = await fs.readFile(cmdWorkflowPath, "utf8") + expect(cmdContent).toContain("description: Planning command") + expect(cmdContent).toContain("Plan the work.") + + // Copied skill directly in outputRoot/skills/ + expect(await exists(path.join(tempRoot, "skills", "skill-one", "SKILL.md"))).toBe(true) + + // MCP config directly in outputRoot/ + const mcpPath = path.join(tempRoot, "mcp_config.json") + expect(await exists(mcpPath)).toBe(true) + const mcpContent = JSON.parse(await fs.readFile(mcpPath, "utf8")) + expect(mcpContent.mcpServers.local).toEqual({ command: "echo", args: ["hello"] }) + }) + + test("writes directly into outputRoot without nesting", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-direct-")) + const bundle: WindsurfBundle = { + ...emptyBundle, + agentSkills: [ + { + name: "reviewer", + content: "---\nname: reviewer\ndescription: A reviewer\n---\n\n# reviewer\n\nReview content.\n", + }, + ], + } + + await writeWindsurfBundle(tempRoot, bundle) + + // Skill should be directly in outputRoot/skills/reviewer/SKILL.md + expect(await exists(path.join(tempRoot, "skills", "reviewer", "SKILL.md"))).toBe(true) + // Should NOT create a .windsurf subdirectory + expect(await exists(path.join(tempRoot, ".windsurf"))).toBe(false) + }) + + test("handles empty bundle gracefully", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-empty-")) + + await writeWindsurfBundle(tempRoot, emptyBundle) + expect(await exists(tempRoot)).toBe(true) + // No mcp_config.json for null mcpConfig + expect(await exists(path.join(tempRoot, "mcp_config.json"))).toBe(false) + }) + + test("path traversal in agent skill name is rejected", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-traversal-")) + const bundle: WindsurfBundle = { + ...emptyBundle, + agentSkills: [ + { name: "../escape", content: "Bad content." }, + ], + } + + expect(writeWindsurfBundle(tempRoot, bundle)).rejects.toThrow("unsafe path") + }) + + test("path traversal in command workflow name is rejected", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-traversal2-")) + const bundle: WindsurfBundle = { + ...emptyBundle, + commandWorkflows: [ + { name: "../escape", description: "Malicious", body: "Bad content." }, + ], + } + + expect(writeWindsurfBundle(tempRoot, bundle)).rejects.toThrow("unsafe path") + }) + + test("skill directory containment check prevents escape", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-skill-escape-")) + const bundle: WindsurfBundle = { + ...emptyBundle, + skillDirs: [ + { name: "../escape", sourceDir: "/tmp/fake-skill" }, + ], + } + + expect(writeWindsurfBundle(tempRoot, bundle)).rejects.toThrow("unsafe path") + }) + + test("agent skill files have YAML frontmatter with name and description", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-fm-")) + const bundle: WindsurfBundle = { + ...emptyBundle, + agentSkills: [ + { + name: "test-agent", + content: "---\nname: test-agent\ndescription: Test agent description\n---\n\n# test-agent\n\nDo test things.\n", + }, + ], + } + + await writeWindsurfBundle(tempRoot, bundle) + + const skillPath = path.join(tempRoot, "skills", "test-agent", "SKILL.md") + const content = await fs.readFile(skillPath, "utf8") + expect(content).toContain("---") + expect(content).toContain("name: test-agent") + expect(content).toContain("description: Test agent description") + expect(content).toContain("# test-agent") + expect(content).toContain("Do test things.") + }) + + // MCP config merge tests + + test("writes mcp_config.json to outputRoot", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-mcp-")) + const bundle: WindsurfBundle = { + ...emptyBundle, + mcpConfig: { + mcpServers: { + myserver: { command: "serve", args: ["--port", "3000"] }, + }, + }, + } + + await writeWindsurfBundle(tempRoot, bundle) + + const mcpPath = path.join(tempRoot, "mcp_config.json") + expect(await exists(mcpPath)).toBe(true) + const content = JSON.parse(await fs.readFile(mcpPath, "utf8")) + expect(content.mcpServers.myserver.command).toBe("serve") + expect(content.mcpServers.myserver.args).toEqual(["--port", "3000"]) + }) + + test("merges with existing mcp_config.json preserving user servers", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-merge-")) + const mcpPath = path.join(tempRoot, "mcp_config.json") + + // Write existing config with a user server + await fs.writeFile(mcpPath, JSON.stringify({ + mcpServers: { + "user-server": { command: "my-tool", args: ["--flag"] }, + }, + }, null, 2)) + + const bundle: WindsurfBundle = { + ...emptyBundle, + mcpConfig: { + mcpServers: { + "plugin-server": { command: "plugin-tool" }, + }, + }, + } + + await writeWindsurfBundle(tempRoot, bundle) + + const content = JSON.parse(await fs.readFile(mcpPath, "utf8")) + // Both servers should be present + expect(content.mcpServers["user-server"].command).toBe("my-tool") + expect(content.mcpServers["plugin-server"].command).toBe("plugin-tool") + }) + + test("backs up existing mcp_config.json before overwrite", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-backup-")) + const mcpPath = path.join(tempRoot, "mcp_config.json") + + await fs.writeFile(mcpPath, '{"mcpServers":{}}') + + const bundle: WindsurfBundle = { + ...emptyBundle, + mcpConfig: { + mcpServers: { new: { command: "new-tool" } }, + }, + } + + await writeWindsurfBundle(tempRoot, bundle) + + // A backup file should exist + const files = await fs.readdir(tempRoot) + const backupFiles = files.filter((f) => f.startsWith("mcp_config.json.bak.")) + expect(backupFiles.length).toBeGreaterThanOrEqual(1) + }) + + test("handles corrupted existing mcp_config.json with warning", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-corrupt-")) + const mcpPath = path.join(tempRoot, "mcp_config.json") + + await fs.writeFile(mcpPath, "not valid json{{{") + + const warnings: string[] = [] + const originalWarn = console.warn + console.warn = (...msgs: unknown[]) => warnings.push(msgs.map(String).join(" ")) + + const bundle: WindsurfBundle = { + ...emptyBundle, + mcpConfig: { + mcpServers: { new: { command: "new-tool" } }, + }, + } + + await writeWindsurfBundle(tempRoot, bundle) + console.warn = originalWarn + + expect(warnings.some((w) => w.includes("could not be parsed"))).toBe(true) + const content = JSON.parse(await fs.readFile(mcpPath, "utf8")) + expect(content.mcpServers.new.command).toBe("new-tool") + }) + + test("handles existing mcp_config.json with array at root", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-array-")) + const mcpPath = path.join(tempRoot, "mcp_config.json") + + await fs.writeFile(mcpPath, "[1,2,3]") + + const bundle: WindsurfBundle = { + ...emptyBundle, + mcpConfig: { + mcpServers: { new: { command: "new-tool" } }, + }, + } + + await writeWindsurfBundle(tempRoot, bundle) + + const content = JSON.parse(await fs.readFile(mcpPath, "utf8")) + expect(content.mcpServers.new.command).toBe("new-tool") + // Array root should be replaced with object + expect(Array.isArray(content)).toBe(false) + }) + + test("preserves non-mcpServers keys in existing file", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-preserve-")) + const mcpPath = path.join(tempRoot, "mcp_config.json") + + await fs.writeFile(mcpPath, JSON.stringify({ + customSetting: true, + version: 2, + mcpServers: { old: { command: "old-tool" } }, + }, null, 2)) + + const bundle: WindsurfBundle = { + ...emptyBundle, + mcpConfig: { + mcpServers: { new: { command: "new-tool" } }, + }, + } + + await writeWindsurfBundle(tempRoot, bundle) + + const content = JSON.parse(await fs.readFile(mcpPath, "utf8")) + expect(content.customSetting).toBe(true) + expect(content.version).toBe(2) + expect(content.mcpServers.new.command).toBe("new-tool") + expect(content.mcpServers.old.command).toBe("old-tool") + }) + + test("server name collision: plugin entry wins", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-collision-")) + const mcpPath = path.join(tempRoot, "mcp_config.json") + + await fs.writeFile(mcpPath, JSON.stringify({ + mcpServers: { shared: { command: "old-version" } }, + }, null, 2)) + + const bundle: WindsurfBundle = { + ...emptyBundle, + mcpConfig: { + mcpServers: { shared: { command: "new-version" } }, + }, + } + + await writeWindsurfBundle(tempRoot, bundle) + + const content = JSON.parse(await fs.readFile(mcpPath, "utf8")) + expect(content.mcpServers.shared.command).toBe("new-version") + }) + + test("mcp_config.json written with restrictive permissions", async () => { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "windsurf-perms-")) + const bundle: WindsurfBundle = { + ...emptyBundle, + mcpConfig: { + mcpServers: { server: { command: "tool" } }, + }, + } + + await writeWindsurfBundle(tempRoot, bundle) + + const mcpPath = path.join(tempRoot, "mcp_config.json") + const stat = await fs.stat(mcpPath) + // On Unix: 0o600 = owner read+write only. On Windows, permissions work differently. + if (process.platform !== "win32") { + const mode = stat.mode & 0o777 + expect(mode).toBe(0o600) + } + }) +})