refactor(install): prefer native plugin install across targets (#609)
Some checks failed
CI / pr-title (push) Has been cancelled
CI / test (push) Has been cancelled
Release PR / release-pr (push) Has been cancelled
Release PR / publish-cli (push) Has been cancelled

Co-authored-by: John Cavanaugh <cavanaug@users.noreply.github.com>
This commit is contained in:
Trevin Chow
2026-04-20 18:47:07 -07:00
committed by GitHub
parent 9497a00d90
commit c2d60b47be
104 changed files with 7073 additions and 7068 deletions

187
README.md
View File

@@ -19,7 +19,7 @@ Compound engineering inverts this. 80% is in planning and review, 20% is in exec
**Learn more**
- [Full component reference](plugins/compound-engineering/README.md) - all agents, commands, skills
- [Full component reference](plugins/compound-engineering/README.md) - all agents and skills
- [Compound engineering: how Every codes with agents](https://every.to/chain-of-thought/compound-engineering-how-every-codes-with-agents)
- [The story behind compounding engineering](https://every.to/source-code/my-ai-had-already-fixed-the-code-before-i-saw-it)
@@ -31,7 +31,7 @@ Brainstorm -> Plan -> Work -> Review -> Compound -> Repeat
Ideate (optional -- when you need ideas)
```
| Command | Purpose |
| Skill | Purpose |
|---------|---------|
| `/ce-ideate` | Discover high-impact project improvements through divergent ideation and adversarial filtering |
| `/ce-brainstorm` | Explore requirements and approaches before planning |
@@ -63,13 +63,73 @@ After installing, run `/ce-setup` in any project. It checks your environment, in
### Cursor
In Cursor Agent chat, install from the plugin marketplace:
```text
/add-plugin compound-engineering
```
### OpenCode, Codex, Droid, Pi, Gemini, Copilot, Kiro, Windsurf, OpenClaw & Qwen (experimental)
Or search for "compound engineering" in the plugin marketplace.
This repo includes a Bun/TypeScript CLI that converts Claude Code plugins to OpenCode, Codex, Factory Droid, Pi, Gemini CLI, GitHub Copilot, Kiro CLI, Windsurf, OpenClaw, and Qwen Code.
### GitHub Copilot CLI
Inside Copilot CLI:
```text
/plugin marketplace add EveryInc/compound-engineering-plugin
/plugin install compound-engineering@compound-engineering-plugin
```
From a shell:
```bash
copilot plugin marketplace add EveryInc/compound-engineering-plugin
copilot plugin install compound-engineering@compound-engineering-plugin
```
Copilot CLI reads the existing `.claude-plugin/marketplace.json` and plugin manifests, so no separate Bun install step is needed.
If you previously used the old Bun Copilot install, back up stale CE artifacts before switching to the native plugin:
```bash
bunx @every-env/compound-plugin cleanup --target copilot
```
This also backs up CE-owned skills in `~/.agents/skills` that would shadow Copilot's native plugin skills.
### Factory Droid
```bash
droid plugin marketplace add https://github.com/EveryInc/compound-engineering-plugin
droid plugin install compound-engineering@compound-engineering-plugin
```
Droid installs the existing Claude Code-compatible plugin marketplace and translates the plugin format automatically, so no Bun install step is needed.
If you previously used the old Bun Droid install, back up stale CE artifacts before switching to the native plugin:
```bash
bunx @every-env/compound-plugin cleanup --target droid
```
### Qwen Code
```bash
qwen extensions install EveryInc/compound-engineering-plugin:compound-engineering
```
Qwen Code installs Claude Code-compatible plugins directly from GitHub and converts the plugin format during install, so no Bun install step is needed.
If you previously used the old Bun Qwen install, back up stale CE artifacts before switching to the native extension:
```bash
bunx @every-env/compound-plugin cleanup --target qwen
```
### OpenCode, Codex, Pi, Gemini & Kiro (experimental)
This repo includes a Bun/TypeScript CLI that converts Claude Code plugins to OpenCode, Codex, Pi, Gemini CLI, and Kiro CLI.
Use the native plugin install instructions above for Claude Code, Cursor, GitHub Copilot CLI, Factory Droid, and Qwen Code.
```bash
# convert the compound-engineering plugin into OpenCode format
@@ -78,52 +138,45 @@ bunx @every-env/compound-plugin install compound-engineering --to opencode
# convert to Codex format
bunx @every-env/compound-plugin install compound-engineering --to codex
# convert to Factory Droid format
bunx @every-env/compound-plugin install compound-engineering --to droid
# convert to Pi format
bunx @every-env/compound-plugin install compound-engineering --to pi
# convert to Gemini CLI format
bunx @every-env/compound-plugin install compound-engineering --to gemini
# convert to GitHub Copilot format
bunx @every-env/compound-plugin install compound-engineering --to copilot
# convert to Kiro CLI format
bunx @every-env/compound-plugin install compound-engineering --to kiro
# convert to OpenClaw format
bunx @every-env/compound-plugin install compound-engineering --to openclaw
# convert to Windsurf format (global scope by default)
bunx @every-env/compound-plugin install compound-engineering --to windsurf
# convert to Windsurf workspace scope
bunx @every-env/compound-plugin install compound-engineering --to windsurf --scope workspace
# convert to Qwen Code format
bunx @every-env/compound-plugin install compound-engineering --to qwen
# auto-detect installed tools and install to all
# auto-detect custom-install targets and install to all
bunx @every-env/compound-plugin install compound-engineering --to all
```
The custom install targets run CE legacy cleanup during install. To run cleanup manually for a specific target:
```bash
bunx @every-env/compound-plugin cleanup --target codex
bunx @every-env/compound-plugin cleanup --target opencode
bunx @every-env/compound-plugin cleanup --target pi
bunx @every-env/compound-plugin cleanup --target gemini
bunx @every-env/compound-plugin cleanup --target kiro
bunx @every-env/compound-plugin cleanup --target copilot # old Bun installs only
bunx @every-env/compound-plugin cleanup --target droid # old Bun installs only
bunx @every-env/compound-plugin cleanup --target qwen # old Bun installs only
bunx @every-env/compound-plugin cleanup --target windsurf # deprecated legacy installs only
```
Cleanup moves known CE artifacts into a `compound-engineering/legacy-backup/` directory under the target root.
<details>
<summary>Output format details per target</summary>
| Target | Output path | Notes |
|--------|------------|-------|
| `opencode` | `~/.config/opencode/` | Commands as `.md` files; `opencode.json` MCP config deep-merged; backups made before overwriting |
| `codex` | `~/.codex/prompts` + `~/.codex/skills` | Claude commands become prompt + skill pairs; all skills copied directly; deprecated `workflows:*` aliases are omitted |
| `droid` | `~/.factory/` | Tool names mapped (`Bash`->`Execute`, `Write`->`Create`); namespace prefixes stripped |
| `opencode` | `~/.config/opencode/` | Skills and agents are written to OpenCode discovery roots; `opencode.json` MCP config is deep-merged; source commands, if present, are written as `.md` files |
| `codex` | `~/.codex/prompts` + `~/.codex/skills/<plugin>/` + `~/.codex/agents/<plugin>/` | CE skills install under a namespaced Codex skill root; Claude agents become Codex TOML custom agents; Claude source commands, if present, become prompt + skill pairs; deprecated `workflows:*` aliases are omitted; legacy CE `.agents` symlinks are cleaned up but no new `.agents` files are written |
| `pi` | `~/.pi/agent/` | Prompts, skills, extensions, and `mcporter.json` for MCPorter interoperability |
| `gemini` | `.gemini/` | Skills from agents; commands as `.toml`; namespaced commands become directories (`workflows:plan` -> `commands/workflows/plan.toml`) |
| `copilot` | `.github/` | Agents as `.agent.md` with Copilot frontmatter; MCP env vars prefixed with `COPILOT_MCP_` |
| `gemini` | `~/.gemini/` | Skills under `skills/` and subagents under `agents/`; source commands, if present, are written as `.toml` |
| `kiro` | `.kiro/` | Agents as JSON configs + prompt `.md` files; only stdio MCP servers supported |
| `openclaw` | `~/.openclaw/extensions/<plugin>/` | Entry-point TypeScript skill file; `openclaw-extension.json` for MCP servers |
| `windsurf` | `~/.codeium/windsurf/` (global) or `.windsurf/` (workspace) | Agents become skills; commands become flat workflows; `mcp_config.json` merged |
| `qwen` | `~/.qwen/extensions/<plugin>/` | Agents as `.yaml`; env vars with placeholders extracted as settings; colon separator for nested commands |
All provider targets are experimental and may change as the formats evolve.
@@ -223,74 +276,4 @@ ccb feat/new-agents --verbose # extra flags forwarded to claude
codex-ceb feat/new-agents # install a pushed branch to Codex
```
---
## Sync Personal Config
Sync your personal Claude Code config (`~/.claude/`) to other AI coding tools. Omit `--target` to sync to all detected supported tools automatically:
```bash
# Sync to all detected tools (default)
bunx @every-env/compound-plugin sync
# Sync skills and MCP servers to OpenCode
bunx @every-env/compound-plugin sync --target opencode
# Sync to Codex
bunx @every-env/compound-plugin sync --target codex
# Sync to Pi
bunx @every-env/compound-plugin sync --target pi
# Sync to Droid
bunx @every-env/compound-plugin sync --target droid
# Sync to GitHub Copilot (skills + MCP servers)
bunx @every-env/compound-plugin sync --target copilot
# Sync to Gemini (skills + MCP servers)
bunx @every-env/compound-plugin sync --target gemini
# Sync to Windsurf
bunx @every-env/compound-plugin sync --target windsurf
# Sync to Kiro
bunx @every-env/compound-plugin sync --target kiro
# Sync to Qwen
bunx @every-env/compound-plugin sync --target qwen
# Sync to OpenClaw (skills only; MCP is validation-gated)
bunx @every-env/compound-plugin sync --target openclaw
# Sync to all detected tools
bunx @every-env/compound-plugin sync --target all
```
This syncs:
- Personal skills from `~/.claude/skills/` (as symlinks)
- Personal slash commands from `~/.claude/commands/` (as provider-native prompts, workflows, or converted skills where supported)
- MCP servers from `~/.claude/settings.json`
Skills are symlinked (not copied) so changes in Claude Code are reflected immediately.
Supported sync targets:
- `opencode`
- `codex`
- `pi`
- `droid`
- `copilot`
- `gemini`
- `windsurf`
- `kiro`
- `qwen`
- `openclaw`
Notes:
- Codex sync preserves non-managed `config.toml` content and now includes remote MCP servers.
- Command sync reuses each provider's existing Claude command conversion, so some targets receive prompts or workflows while others receive converted skills.
- Copilot sync writes personal skills to `~/.copilot/skills/` and MCP config to `~/.copilot/mcp-config.json`.
- Gemini sync writes MCP config to `~/.gemini/` and avoids mirroring skills that Gemini already discovers from `~/.agents/skills`, which prevents duplicate-skill warnings.
- Droid, Windsurf, Kiro, and Qwen sync merge MCP servers into the provider's documented user config.
- OpenClaw currently syncs skills only. Personal command sync is skipped because this repo does not yet have a documented user-level OpenClaw command surface, and MCP sync is skipped because the current official OpenClaw docs do not clearly document an MCP server config contract.
Codex installs keep generated plugin skills isolated under `~/.codex/skills/compound-engineering/` and do not write new files into `~/.agents`. The installer removes old CE-managed `.agents/skills` symlinks when it can prove they point back to CE's Codex-managed store, which prevents stale Codex installs from shadowing Copilot's native plugin install.

View File

@@ -0,0 +1,354 @@
---
title: "refactor: Recenter installs on native packages and shared skill cleanup"
type: refactor
status: active
date: 2026-04-18
---
# Recenter Installs on Native Packages and Shared Skill Cleanup
## Overview
Rework the install strategy around current agent-harness behavior:
- Use native package/plugin installers where they can install the full Compound Engineering payload.
- Avoid `~/.agents` for CE-owned installs because shared skills there can shadow native plugin installs such as Copilot.
- Keep agents target-native unless the harness's package format explicitly supports bundled agents.
- Add a first-class cleanup path for old CE-owned flat installs, renamed skills, removed skills, converted-agent skills, prompts, commands, and target-specific artifacts.
This plan supersedes the Copilot-only native plugin plan because the same decision now affects Codex, Gemini, Pi, OpenCode, and every retained custom converter target.
## Problem Frame
The current CLI grew when most targets did not have native package/plugin support. That is no longer uniformly true:
- Claude Code has native plugin marketplaces.
- Copilot CLI has plugin marketplaces and can install repo-hosted plugins.
- Gemini CLI has native extensions and shared `~/.agents/skills` skill discovery.
- Pi has native packages via `pi install` and also reads `~/.agents/skills`.
- Codex has native plugins, but current public docs still make non-official distribution depend on local/repo/personal marketplace files.
- OpenCode also reads `~/.agents/skills`, but CE should avoid that root by default because it can shadow Copilot plugin skills.
- Windsurf no longer needs active support and should be deprecated from user-facing conversion/install flows while preserving cleanup for old CE artifacts.
At the same time, our legacy installs leave stale flat artifacts behind. Examples include removed skills such as `reproduce-bug`, renamed workflows such as `workflows:*` -> `ce:*`, old prompt files, and agents that older converters flattened into skills. We cannot delete all of `~/.agents/skills` or `~/.codex/skills` because users may have non-CE skills there.
## Requirements Trace
- R1. Prefer native installers when they install the full useful payload with a reasonable user flow.
- R2. Do not write CE-owned installs to `~/.agents`; treat it as a legacy cleanup surface only.
- R3. Preserve target-specific agent behavior where the harness supports agents.
- R4. Continue converting agents to skills only for targets that lack compatible agent packaging or invocation.
- R5. Track all CE legacy skills, agents, commands, prompts, and generated aliases so cleanup can remove stale CE-owned artifacts without touching user-owned items.
- R6. Any remaining custom install path must run legacy cleanup on every install.
- R7. Native-install targets must have a documented one-time cleanup command users can run before switching from old Bun installs.
- R8. Forward installs must write a manifest so removed or renamed artifacts can be cleaned without expanding the hand-maintained legacy list forever.
- R9. The README and target specs must clearly distinguish native installer paths from legacy/custom converter paths.
- R10. Deprecate Windsurf support and preserve cleanup for old CE Windsurf installs.
## External Research Summary
| Harness | Shared `~/.agents/skills` | Native package/plugin install | Agent support path | Planning conclusion |
| --- | --- | --- | --- | --- |
| Claude Code | Not the primary install path for this repo | Yes, `/plugin marketplace add` + `/plugin install` | Plugin `agents/` | Keep Claude native plugin as canonical. No Bun install needed for Claude. |
| Codex | Yes, but CE should avoid it to prevent Copilot plugin shadowing. Codex also discovers `~/.codex/skills` in current local behavior. | Yes, but current docs describe official plugin directory plus local repo/personal marketplace files. | Custom agents are TOML under `~/.codex/agents` or `.codex/agents`, not `~/.agents/agents`. | Keep custom Codex install. Write CE skills under `~/.codex/skills/compound-engineering` and convert Claude agents to flat Codex TOML custom agents under `~/.codex/agents`. |
| Copilot CLI | Yes. Docs list project `.agents/skills` and personal `~/.agents/skills`. | Yes. `copilot plugin marketplace add OWNER/REPO`, then `copilot plugin install NAME@MARKETPLACE`. Copilot can read existing `.claude-plugin/marketplace.json` and `.claude-plugin/plugin.json`. | Personal `~/.copilot/agents`, project `.github/agents`, Claude-compatible `~/.claude/agents` / `.claude/agents`, and plugin `agents/`. No documented `~/.agents/agents`. | Move Copilot to native plugin distribution using the existing Claude plugin metadata. Remove user-facing Bun install. |
| Gemini CLI | Yes, but CE should avoid it to prevent Copilot plugin shadowing. | Yes. `gemini extensions install <github-url-or-local-path>`, but monorepo subdirectory install is not documented. | Project `.gemini/agents`, user `~/.gemini/agents`, and extension `agents/`. The verified `.agents/*` alias is for skills, not subagents. | Keep custom Bun install to `~/.gemini/{skills,agents,commands}` for now; revisit native extension distribution later. |
| Pi | Yes. Docs list `~/.agents/skills` and `.agents/skills`. | Yes. `pi install npm:...`, `pi install git:...`, URL, or local path. | Core Pi has no built-in subagents; subagents are extension/package-provided. Packages can bundle extensions, skills, prompts, themes. | Prefer a Pi package if we can package the existing compat extension, prompts, and skills cleanly. Until then, keep custom writer and cleanup. |
| OpenCode | Yes, but CE should avoid it to prevent Copilot plugin shadowing. | Partial. OpenCode has plugins/config, but no equivalent repo marketplace install for our full payload in current target design. | Agents are OpenCode markdown/config under `~/.config/opencode/agents` or `.opencode/agents`. | Keep custom writer for agents/config; do not share pass-through skills via `~/.agents/skills` by default. |
| Factory Droid | No confirmed `~/.agents/skills`; docs mention `.factory/skills`, `~/.factory/skills`, and project `.agent/skills` compatibility. | Yes. `droid plugin marketplace add <repo>`, then `droid plugin install NAME@MARKETPLACE`. Droid can install Claude Code-compatible plugins directly. | Plugin agents load through the native plugin translation path. | Move Droid to native plugin distribution and remove user-facing Bun install. |
| Kiro | No confirmed `~/.agents/skills` in current docs. | Has import flows, but not a CE-wide plugin install path in current target. | Agents are `.kiro/agents` JSON + prompt files. | Keep custom writer. |
| Windsurf | No longer relevant for CE support. | N/A | Current converter maps agents to skills. | Deprecate/remove user-facing support; keep legacy cleanup for old CE Windsurf installs. |
| Qwen Code | No shared `~/.agents` conclusion needed. | Extension-oriented target already has per-plugin root. | Qwen supports target-native agents. | Keep custom writer/package output. |
Sources checked:
- Codex skills: `https://developers.openai.com/codex/skills`
- Codex plugins: `https://developers.openai.com/codex/plugins` and `https://developers.openai.com/codex/plugins/build`
- Codex subagents: `https://developers.openai.com/codex/subagents`
- Copilot agents/skills/plugins: `https://docs.github.com/en/copilot/how-tos/copilot-cli/customize-copilot/create-custom-agents-for-cli`, `https://docs.github.com/en/copilot/how-tos/copilot-cli/customize-copilot/add-skills`, `https://docs.github.com/en/copilot/reference/copilot-cli-reference/cli-plugin-reference`
- Gemini skills/subagents/extensions: `https://geminicli.com/docs/cli/skills/`, `https://geminicli.com/docs/core/subagents/`, `https://geminicli.com/docs/extensions/reference/`, `https://developers.googleblog.com/subagents-have-arrived-in-gemini-cli/`
- Pi skills/packages: `https://buildwithpi.ai/README.md`, `https://github.com/badlogic/pi-mono/blob/main/packages/coding-agent/docs/skills.md`, `https://github.com/badlogic/pi-mono/blob/main/packages/coding-agent/docs/packages.md`
- OpenCode skills/agents: `https://opencode.ai/docs/skills`, `https://opencode.ai/docs/agents/`
- Factory Droid skills: `https://docs.factory.ai/cli/configuration/skills`
- Kiro skills/agents: `https://kiro.dev/docs/skills/`, `https://kiro.dev/docs/cli/custom-agents/configuration-reference/`
## Key Decisions
### 1. Do not make `~/.agents` a CE-managed install root
`~/.agents/plugins/marketplace.json` is documented by Codex as a personal marketplace file, not as a cross-harness plugin installation convention. Copilot installs plugins under `~/.copilot/installed-plugins`, Gemini installs extensions under `~/.gemini/extensions`, and Pi packages install through Pi settings plus npm/git/local package storage.
`~/.agents/skills` is also unsafe as a CE-managed install root. Copilot loads personal/project skills before plugin skills and deduplicates by `SKILL.md` `name`. A CE skill installed into `~/.agents/skills` for another target can silently shadow the same skill from Copilot's native plugin.
Treat `~/.agents` as a legacy cleanup surface, not a forward install surface.
### 2. Use native package distribution by target, not one universal folder
Native targets should have target-native packaging:
- Claude: existing `.claude-plugin` marketplace/plugin.
- Copilot: reuse existing `.claude-plugin` marketplace/plugin metadata. Do not add a parallel `.github/plugin` surface unless a future Copilot-only manifest field becomes necessary.
- Gemini: custom Bun install to `~/.gemini/{skills,agents,commands}` for now; future `gemini-extension.json` distribution remains possible.
- Pi: npm/git/local package with `package.json` `pi` manifest.
- Codex: `~/.codex/skills/compound-engineering`, `~/.codex/agents`, and optional future `.codex-plugin/plugin.json`, but do not retire custom install until remote install UX is verified.
### 3. Agents are not portable via `~/.agents`
`~/.agents/skills` is increasingly common. `~/.agents/agents` is not documented by the primary sources checked for Codex, Copilot, or Gemini. Agent support must remain per target:
- Copilot agents: markdown agent files under `~/.copilot/agents`, `.github/agents`, Claude-compatible `.claude/agents` / `~/.claude/agents`, or plugin `agents`.
- Gemini sub-agents: markdown files under `.gemini/agents`, `~/.gemini/agents`, or extension `agents/`.
- Codex custom agents: TOML files under `.codex/agents` / `~/.codex/agents`. CE should generate these from Claude Markdown agents instead of degrading them into skills.
- OpenCode agents: markdown/config under `.opencode/agents` / `~/.config/opencode/agents`.
- Kiro agents: JSON configs and prompt files under `.kiro/agents`.
- Pi: no built-in subagents; package an extension if CE needs subagent behavior.
This means the previous "convert agents to skills" behavior remains legitimate for targets without compatible agent packaging, but it should not be applied to Copilot and Gemini unless intentionally degraded. Gemini's April 2026 subagent support makes this more important: Gemini output should package CE agents as subagents under Gemini-owned roots, while `~/.agents` remains cleanup-only.
### 4. Cleanup must be a product feature, not incidental writer behavior
Current cleanup work in `src/data/plugin-legacy-artifacts.ts` is the right direction, but it is too writer-bound. We need a standalone cleanup command that can run before switching users from old Bun installs to native harness installers.
Custom writers should still invoke cleanup automatically. Native installers cannot clean old CE artifacts in unrelated roots, so users need an explicit CE cleanup command.
### 5. Legacy inventory should be generated and validated against git history
The hand-maintained legacy list should be backed by a script that scans historical plugin inventories from git history:
- `plugins/compound-engineering/skills/*`
- `plugins/compound-engineering/agents/*`
- `plugins/compound-engineering/commands/*`
- historical `prompts/*` or converted command outputs
- renamed colon/underscore/hyphen variants per target
The result should be committed as data, and tests should fail when the current or historical source inventory includes an untracked CE artifact.
## Implementation Units
- [ ] **Unit 1: Add a platform install strategy spec**
**Goal:** Replace ad hoc target assumptions with one repo-owned matrix for native vs custom install, shared-skill support, and agent support.
**Requirements:** R1, R2, R3, R4, R9
**Files:**
- Create: `docs/solutions/integrations/native-plugin-install-strategy-2026-04-19.md`
- Modify: `README.md`
- Modify as needed: `docs/specs/codex.md`, `docs/specs/copilot.md`, `docs/specs/gemini.md`, `docs/specs/opencode.md`
**Approach:**
- Document why CE avoids `~/.agents/skills` despite broad discovery support.
- Document target-native package locations and install commands.
- Mark each current target as `native-primary`, `custom-primary`, or `hybrid`.
- Explicitly list whether source Claude agents become target agents or generated skills.
**Test scenarios:**
- README no longer implies all targets require the same Bun install path.
- Target specs agree on whether a target uses native install or custom writer.
---
- [ ] **Unit 2: Build a standalone CE cleanup command**
**Goal:** Give users one command to remove stale CE-owned artifacts from old installs before or during migration to native installers.
**Requirements:** R5, R6, R7, R8
**Files:**
- Create: `src/commands/cleanup.ts`
- Create or Modify: `src/cleanup/*`
- Modify: `src/index.ts`
- Modify: `src/targets/*` custom writers to call shared cleanup helpers
- Modify: `tests/cli.test.ts`
- Add targeted cleanup tests under `tests/`
**Approach:**
- Add a command such as `compound cleanup compound-engineering --targets codex,copilot,gemini,pi,opencode,droid --apply`.
- Default to dry-run unless the existing CLI convention strongly favors direct action.
- Move matched legacy artifacts to a timestamped backup rather than hard-deleting.
- Only touch known CE-owned artifacts, existing install-manifest entries, and symlinks whose targets are CE-managed.
- Cover `~/.agents/skills`, `~/.codex/skills`, `~/.codex/prompts`, `~/.copilot/skills`, `~/.copilot/agents`, `~/.gemini/skills`, `~/.gemini/agents`, `~/.gemini/commands`, `~/.pi/agent/{skills,prompts,extensions}`, `~/.config/opencode/{skills,agents,commands,plugins}`, `~/.factory/{skills,commands,droids}`, deprecated `~/.codeium/windsurf/{skills,workflows,mcp_config.json}`, and other current writer roots.
**Test scenarios:**
- Dry run reports stale `reproduce-bug` without moving it.
- Apply moves stale CE artifacts to backup.
- Non-CE skill with the same parent directory root is preserved.
- A CE-managed symlink in `~/.agents/skills` is removed or moved safely.
- A real user-owned directory at a CE-looking path is skipped unless manifest/history proves CE ownership.
---
- [ ] **Unit 3: Generate and validate the historical CE artifact manifest**
**Goal:** Prevent future cleanup gaps when skills or agents are removed, renamed, or converted.
**Requirements:** R5, R8
**Files:**
- Modify: `src/data/plugin-legacy-artifacts.ts`
- Create: `scripts/generate-legacy-artifacts.ts` or similar
- Create: `tests/plugin-legacy-artifacts-history.test.ts`
- Modify: existing `tests/plugin-legacy-artifacts.test.ts`
**Approach:**
- Scan git history for CE plugin directories and normalize names per target.
- Preserve hand-added aliases only for cases not recoverable from source directory history.
- Commit generated data in a stable sorted form.
- Test that current source artifacts and known removed artifacts are included.
**Test scenarios:**
- Removed `reproduce-bug` remains in cleanup data.
- If `document-review` is renamed to `ce-doc-review`, both old and new cleanup-relevant names are tracked.
- Historical `prompts` outputs remain cleanup candidates.
- Colon, underscore, and hyphen variants normalize correctly for Codex, Gemini, Pi, and OpenCode.
---
- [ ] **Unit 4: Move Copilot to native plugin distribution through existing Claude metadata**
**Goal:** Replace user-facing `bunx ... --to copilot` with Copilot marketplace/plugin install.
**Requirements:** R1, R3, R4, R7, R9
**Files:**
- Modify: `README.md`
- Modify: `docs/specs/copilot.md`
- Modify: CLI target registration/tests if direct install is deprecated
- Reassess/remove: `src/converters/claude-to-copilot.ts`, `src/targets/copilot.ts`, `src/types/copilot.ts`, and Copilot writer/converter tests if they no longer serve release validation
**Approach:**
- Use the existing root `.claude-plugin/marketplace.json`; Copilot CLI explicitly looks there for marketplace metadata.
- Use the existing plugin-local `.claude-plugin/plugin.json`; Copilot CLI can discover plugin manifests from `.claude-plugin/plugin.json`.
- Document Copilot native install instructions:
- `copilot plugin marketplace add EveryInc/compound-engineering-plugin`
- `copilot plugin install compound-engineering@compound-engineering-plugin`
- Keep plugin agents as agents, not generated skills.
- Do not create parallel `.github/plugin` metadata or `agents-copilot/` output unless a real compatibility failure is proven.
- Run or recommend `compound cleanup compound-engineering --targets copilot,codex --apply` before switching old installs.
- Treat stale Copilot skills as a shadowing risk, not only a duplicate-display risk. Copilot deduplicates skills by `SKILL.md` `name` with first-found-wins precedence, and personal/project skill roots such as `~/.agents/skills` load before plugin skills.
**Test scenarios:**
- Existing `.claude-plugin/marketplace.json` parses and has a `compound-engineering` entry whose `source` points at `plugins/compound-engineering`.
- Existing `plugins/compound-engineering/.claude-plugin/plugin.json` parses and is valid enough for both Claude and Copilot.
- Copilot docs/spec record the native install commands and the `.claude-plugin` compatibility.
- README does not advertise old direct Copilot Bun install as the primary path.
- If possible, a local-path Copilot plugin install in a temporary config directory succeeds without modifying the user's real Copilot home.
- A seeded stale `~/.agents/skills/ce-plan/SKILL.md` shadows a plugin-provided `ce-plan` in docs/tests or manual verification, proving cleanup is required even when Copilot does not show duplicate skills.
---
- [ ] **Unit 5: Update Gemini custom install and defer extension packaging**
**Goal:** Keep Gemini on the custom Bun installer for now, but make it write Gemini-native skills and subagents under `~/.gemini` without using `~/.agents`.
**Requirements:** R1, R3, R4, R7, R9
**Files:**
- Create or Generate: Gemini skill/agent/command payloads as needed
- Modify: `docs/specs/gemini.md`
- Modify: `README.md`
- Reassess: `src/converters/claude-to-gemini.ts`, `src/targets/gemini.ts`
**Approach:**
- Write pass-through skills to `~/.gemini/skills`.
- Write normalized flat Gemini subagents to `~/.gemini/agents`.
- Write command TOML files to `~/.gemini/commands` if CE ships commands again.
- Write a managed manifest to `~/.gemini/compound-engineering/install-manifest.json`.
- Do not write CE-owned Gemini artifacts to `~/.agents/skills`.
- Do not assume `gemini extensions install` supports `--path` for a monorepo subdirectory. Current docs and local help list GitHub repository URL or local path sources, while `--path` is documented for `gemini skills install`.
- Defer native extension distribution until we choose a shape where the installed source root contains `gemini-extension.json`: dedicated Gemini extension repo, generated distribution branch/package, or release asset.
- Preserve agent prompt bodies where possible; the necessary work is flattening agent files into direct `agents/*.md` entries and stripping/translating Claude-specific frontmatter such as `color` and string-form `tools`.
**Test scenarios:**
- Bun install writes to Gemini-owned roots and does not write to `~/.agents/skills`.
- Gemini-specific agents are packaged as extension sub-agents, not flattened into skills unless deliberately configured.
- Generated Gemini agents are flat direct files under `~/.gemini/agents`, contain strict Gemini-compatible frontmatter, and load without validation errors.
- Legacy `.gemini` direct install cleanup still runs from the cleanup command.
---
- [ ] **Unit 6: Add or defer Pi package distribution**
**Goal:** Decide whether CE can be installed with `pi install` and, if yes, package the existing Pi output as a real Pi package.
**Requirements:** R1, R4, R6, R7, R9
**Files:**
- Create or Modify: package metadata for Pi package distribution
- Modify: `docs/specs/pi.md` if created, otherwise add one
- Modify: `README.md`
- Reassess: `src/converters/claude-to-pi.ts`, `src/targets/pi.ts`
**Approach:**
- Prefer npm package distribution if we want to avoid asking users to manually clone a repository.
- Package Pi resources with `package.json` `pi` manifest: `skills`, `prompts`, and `extensions`.
- Resolve the existing compat-extension conflict risk before promoting Pi native package as primary.
- Until packaged and tested, keep the custom Pi writer and have it call shared cleanup every install.
**Test scenarios:**
- Pi package manifest includes skills/prompts/extensions.
- Existing `compound-engineering-compat.ts` does not conflict with popular subagent packages or is made conditional.
- Cleanup removes old direct writer artifacts under `~/.pi/agent`.
---
- [x] **Unit 7: Rationalize remaining custom targets and deprecate Windsurf**
**Goal:** Make explicit which targets still need the Bun converter/install path, remove Windsurf from active support, and ensure each retained or deprecated target has cleanup coverage.
**Requirements:** R4, R6, R8, R9, R10
**Files:**
- Modify: `src/targets/index.ts`
- Modify: `src/targets/{codex,opencode,kiro,qwen}.ts`
- Delete: custom plugin install writers for native-marketplace targets such as Droid and Copilot
- Delete: `src/converters/claude-to-windsurf.ts`, `src/types/windsurf.ts`, `src/targets/windsurf.ts`, `src/sync/windsurf.ts`, `tests/windsurf-*.test.ts`
- Modify: README target table
- Modify: target writer tests
**Approach:**
- Keep custom targets where native install does not cover the full payload or is not documented enough.
- Run shared cleanup for each custom install.
- Deprecate Windsurf from user-facing `convert`, `install`, `sync`, README, and target lists.
- Preserve Windsurf cleanup support so old CE artifacts can be removed from `~/.codeium/windsurf/` even after active support is gone.
- For Codex, keep current custom install as primary until native plugin distribution from a GitHub repo is as simple as Copilot/Gemini/Pi or until official directory publishing is available.
- For Codex skills, write to `~/.codex/skills/compound-engineering/<skill>` with a manifest under `~/.codex/compound-engineering/`; do not write to `~/.agents/skills`.
- For Codex agents, convert Claude Markdown agents to flat TOML custom agents under `~/.codex/agents` using CE-prefixed names such as `ce-review-correctness-reviewer`, and update converted skill content so `Task`/agent references explicitly ask Codex to spawn the named custom agent.
- The Codex skill-plus-agent split was smoke-tested on 2026-04-18: a skill in `~/.agents/skills/ce-codex-agent-smoke` successfully spawned a TOML custom agent from `~/.codex/agents/ce-codex-agent-smoke.toml` and returned `CODEX_TOML_AGENT_SMOKE_OK`.
- Codex duplicate discovery was also smoke-tested on 2026-04-18: the same skill name installed under both `~/.agents/skills` and legacy `~/.codex/skills` appeared twice in the skill picker. Codex cleanup must remove old CE-owned skills from both roots before writing the namespaced `~/.codex/skills/compound-engineering` install.
- Shared skill nesting was smoke-tested on 2026-04-18: Codex discovered flat, nested, and Superpowers-style symlink-pack skills under `~/.agents/skills`, but Copilot and Gemini only discovered the flat direct `~/.agents/skills/<skill>/SKILL.md` shape. CE should avoid this root anyway because of Copilot shadowing.
- For OpenCode, do not share pass-through skills via `~/.agents/skills` unless the user explicitly opts into cross-harness shared skills and understands Copilot shadowing.
**Test scenarios:**
- Each custom writer calls cleanup with the correct target roots.
- Target writer manifests remove artifacts that disappear between installs.
- Windsurf is no longer advertised or selectable as an active install target.
- Cleanup can still identify and back up old CE Windsurf artifacts.
- README table matches registered target behavior.
## Sequencing
1. Land the strategy spec and cleanup command first. This reduces migration risk no matter which native packaging target lands next.
2. Promote Copilot native install next because its plugin marketplace flow is documented and closest to Claude's model.
3. Add Gemini extension packaging after Copilot because Gemini can bundle skills, commands, and preview sub-agents through extensions.
4. Decide Pi packaging after resolving the extension conflict and npm-package shape.
5. Revisit Codex native plugins last; the platform supports plugins, but the public distribution UX still appears less direct than Copilot/Gemini/Pi for a GitHub-hosted third-party plugin.
6. Deprecate Windsurf and keep the remaining custom targets, with cleanup mandatory and manifest-backed.
## Open Questions
- Should the cleanup command default to dry-run or apply? Recommendation: dry-run for standalone use, apply automatically inside custom install writers.
- Should native package payloads be checked in or generated during release validation? Recommendation: generated but checked for determinism in CI if the target package must be present in the repo.
- Should the existing `@every-env/compound-plugin` npm package also become the Pi package, or should Pi get a smaller dedicated npm package? Recommendation: investigate package contents first; avoid bloating Pi installs with converter-only code if avoidable.
- Should Codex native plugin support be documented as experimental alongside custom install? Recommendation: yes, but do not retire custom install until remote marketplace install is verified end to end.
## Verification
- `bun test` after implementation units touching CLI, writers, or conversion.
- `bun run release:validate` after native package manifests or plugin inventory changes.
- Manual smoke tests for native installers:
- Claude: `/plugin install compound-engineering`
- Copilot: `copilot plugin marketplace add EveryInc/compound-engineering-plugin` then install
- Gemini: `gemini extensions install <repo-url-or-local-path>`
- Pi: `pi install npm:<package>` or local package path
- Cleanup smoke test with seeded temp homes for `~/.agents`, `~/.codex`, `~/.copilot`, `~/.gemini`, `~/.pi`, `~/.config/opencode`, and `~/.factory`.

View File

@@ -0,0 +1,686 @@
---
title: "Native plugin install strategy for supported harnesses"
date: 2026-04-19
category: integrations
module: installer
problem_type: integration_decision
component: installer
symptoms:
- "Multiple harnesses can discover the same CE skills from shared roots and create duplicates or shadowing"
- "Some harnesses now support native Claude-compatible plugin installs, making custom Bun installs redundant"
- "Old manual installs can leave stale skills and agents after CE renames or deprecations"
root_cause: evolving_platform_install_surfaces
resolution_type: install_strategy
severity: medium
tags:
- install-strategy
- native-plugins
- legacy-cleanup
- cursor
- codex
- copilot
- droid
- qwen
- gemini
- opencode
- kiro
---
# Native Plugin Install Strategy
Last verified: 2026-04-19
This document records the intended install model by harness. The current priority is separating native marketplace installs from custom Bun installs so CE does not create duplicate or shadowing skills across tools.
## Summary
| Harness | Intended install path | Custom Bun install? | Legacy cleanup needed? | Notes |
| --- | --- | --- | --- | --- |
| Claude Code | Native plugin marketplace using existing `.claude-plugin/marketplace.json` and `plugins/compound-engineering/.claude-plugin/plugin.json` | No | Only for old/manual non-native installs, if any | Current repo shape already satisfies Claude Code. |
| Cursor | Native Cursor Plugin Marketplace using existing `.cursor-plugin/marketplace.json` and `plugins/compound-engineering/.cursor-plugin/plugin.json` | No, CE plugin install/convert target removed | No for marketplace installs; add targeted cleanup only if historical custom Cursor artifacts are confirmed | Users install from Cursor Agent chat with `/add-plugin compound-engineering` or by searching the plugin marketplace. |
| GitHub Copilot CLI | Native plugin marketplace using the same existing `.claude-plugin` metadata | No, CE plugin install/convert target removed | Yes, before or during migration from previous `.github/` custom installs | Tested manually: Copilot can install from the existing CE marketplace and load agents. |
| Factory Droid | Native plugin marketplace pointed at the CE GitHub repository | No, CE plugin install/convert target removed | Yes, before or during migration from previous `~/.factory` custom installs | Droid docs say Claude Code plugins install directly and are translated automatically; `ce-doc-review` was manually tested in Droid. |
| Qwen Code | Native extension install from the CE GitHub repository and existing Claude plugin metadata | No, CE plugin install/convert target removed | Yes, before or during migration from previous `~/.qwen` custom installs | Qwen docs say Claude Code extensions install directly from GitHub and are converted automatically; native install was manually tested on 2026-04-19. |
| OpenCode | Custom CE install to `~/.config/opencode/{skills,agents,plugins}` plus merged `opencode.json`; source commands are written only if present | Yes | Yes, every install | OpenCode plugins are JS/TS or npm hooks/tools, not a Claude-compatible marketplace install path for CE's full plugin payload. |
| Pi | Custom CE install to `~/.pi/agent/{skills,prompts,extensions}` plus MCPorter config; source commands are written only if present | Yes, until CE ships and tests a Pi package | Yes, every install | Pi has package install support, but CE has not yet packaged the compat extension, generated skills, prompts, and MCPorter config into a tested Pi package. |
| Codex | Custom CE install to `~/.codex/skills/compound-engineering/<skill>` and `~/.codex/agents/compound-engineering/<agent>.toml` | Yes, because native Codex plugins do not currently register bundled custom agents | Yes, every install | Avoid `~/.agents/skills` so Codex installs do not shadow Copilot's native plugin skills. Claude agents are converted to Codex TOML custom agents. |
| Gemini CLI | Custom CE install to `~/.gemini/{skills,agents}` for now; source commands are written only if present; native extension packaging exists but does not fit CE's current repo/package layout | Yes, until CE ships a Gemini extension root, release artifact, or dedicated distribution branch/repo | Yes, every install | Avoid `~/.agents/skills`; write normalized Gemini agents to `~/.gemini/agents`. |
| Kiro CLI | Custom CE install to project `.kiro/{skills,agents,steering,settings}` | Yes | Yes, every install; manual `cleanup --target kiro` also exists | Kiro has its own JSON agent format and project-local install root. |
Deprecated targets:
- Windsurf is no longer an active CE install, convert, or sync target. `cleanup --target windsurf` remains available only to back up old CE-owned files from previous Bun installs under `~/.codeium/windsurf/` or workspace `.windsurf/`.
Removed capabilities:
- Personal Claude Code home sync (`bunx @every-env/compound-plugin sync`) has been removed. Syncing arbitrary `~/.claude` skills, commands, agents, and MCP config across unrelated harnesses is not a bounded compatibility surface; CE only supports installing the CE plugin and cleaning up old CE-owned artifacts.
Current CE command posture:
- The `compound-engineering` plugin currently ships no Claude `commands/` files. Its workflow entry points are skills invoked with slash syntax, such as `/ce-plan`, `/ce-work`, and `/ce-doc-review`.
- The CLI still understands source plugin commands for legacy cleanup and for converting non-CE Claude plugins that still ship commands. CE install docs should not describe commands as part of the current CE payload except as legacy/source-plugin compatibility.
## Global Decision: Avoid `~/.agents` For CE-Owned Installs
Do not install CE-owned skills or agents into `~/.agents` for normal target installs.
Several harnesses read `~/.agents/skills`, but Copilot CLI gives personal/project skill roots precedence over plugin skills. A CE skill written for Codex, Gemini, Pi, or another target into `~/.agents/skills` can silently shadow the same skill from Copilot's native plugin install. That makes `~/.agents` unsafe as a shared CE-managed install root.
Use target-owned roots instead:
```text
OpenCode: ~/.config/opencode/skills/<skill>/SKILL.md
~/.config/opencode/agents/<agent>.md
~/.config/opencode/commands/*.md # source commands only, if present
~/.config/opencode/opencode.json
Pi: ~/.pi/agent/skills/<skill>/SKILL.md
~/.pi/agent/prompts/*.md # source commands only, if present
~/.pi/agent/extensions/*.ts
~/.pi/agent/compound-engineering/mcporter.json
Codex: ~/.codex/skills/compound-engineering/<skill>/SKILL.md
~/.codex/agents/compound-engineering/<agent>.toml
Gemini: ~/.gemini/skills/<skill>/SKILL.md
~/.gemini/agents/<agent>.md
~/.gemini/commands/*.toml # source commands only, if present
Copilot: managed by native plugin install under ~/.copilot
Cursor: managed by native Cursor Plugin Marketplace install
Droid: managed by native plugin install under ~/.factory for user scope
Qwen: managed by native extension install under ~/.qwen
```
`~/.agents/skills` remains a cleanup target only, because prior CE installs or experiments may have left shadowing skills there.
## Claude Code
### Decision
Claude Code is already satisfied by the current repo layout:
- Root marketplace: `.claude-plugin/marketplace.json`
- Plugin root: `plugins/compound-engineering/`
- Plugin manifest: `plugins/compound-engineering/.claude-plugin/plugin.json`
- Plugin components: `agents/`, `skills/`, and related files under the plugin root. Claude `commands/` would be supported if reintroduced, but CE does not currently ship them.
Users install with:
```text
/plugin marketplace add EveryInc/compound-engineering-plugin
/plugin install compound-engineering
```
No custom Bun install or conversion should be used for Claude Code.
### Cleanup
Native Claude plugin installs are owned by Claude Code. The CE cleanup command should not delete Claude Code's plugin cache. It should only handle explicitly known old/manual CE artifacts if we discover any historical non-native Claude install path.
## Cursor
### Decision
Cursor should use the native Cursor Plugin Marketplace, not `bunx @every-env/compound-plugin install compound-engineering --to cursor`.
The custom Cursor plugin install/convert target has been removed from the CLI target registry.
The repo publishes Cursor marketplace metadata separately from the Claude marketplace:
- Root marketplace: `.cursor-plugin/marketplace.json`
- Plugin manifest: `plugins/compound-engineering/.cursor-plugin/plugin.json`
Users install from Cursor Agent chat with:
```text
/add-plugin compound-engineering
```
They can also search for "compound engineering" in the plugin marketplace.
No custom Bun install or conversion should be used for Cursor.
### Cleanup
Cursor marketplace installs are owned by Cursor. CE should not delete Cursor's plugin marketplace cache.
If we discover historical CE-owned Cursor artifacts from the old custom writer that can shadow marketplace installs, add a targeted cleanup path for those known artifacts. Do not reintroduce Cursor as an active `convert` or `install` target.
## GitHub Copilot CLI
### Decision
Copilot should use native plugin install, not `bunx @every-env/compound-plugin install compound-engineering --to copilot`.
The custom Copilot plugin install/convert target has been removed from the CLI target registry.
Copilot CLI can read:
- Marketplace manifests from `.claude-plugin/marketplace.json`
- Plugin manifests from `.claude-plugin/plugin.json`
- Plugin agents from the plugin `agents/` directory
- Plugin skills from the plugin `skills/` directory
Users install inside Copilot CLI with:
```text
/plugin marketplace add EveryInc/compound-engineering-plugin
/plugin install compound-engineering@compound-engineering-plugin
```
Shell equivalents:
```bash
copilot plugin marketplace add EveryInc/compound-engineering-plugin
copilot plugin install compound-engineering@compound-engineering-plugin
```
Do not add a parallel `.github/plugin/marketplace.json`, `.github/plugin/plugin.json`, or generated `agents-copilot/` directory unless a real compatibility failure appears. Manual testing showed Copilot can install from the existing CE marketplace and load CE agents.
Copilot skill conflicts are not displayed like Codex duplicate skills. Copilot deduplicates skills by the `name` field in `SKILL.md` using first-found-wins precedence. Project and personal skill locations, including `~/.agents/skills`, load before plugin skills. Therefore a stale `~/.agents/skills/ce-plan/SKILL.md` with `name: ce-plan` would shadow the plugin's `ce-plan` and the plugin skill would be silently ignored.
### Cleanup
The old custom Copilot target wrote generated files under `.github/`-style output. Users who installed that way should run CE legacy cleanup before or during migration so they do not have duplicate agents or skills from both the old Bun output and the native plugin.
For Copilot, "duplicate" often means silent shadowing rather than two visible entries. Cleanup must remove CE-owned stale skills from project and personal skill roots before switching to native plugin install, otherwise users can appear to have the native plugin installed while actually running an old flat skill.
Run:
```bash
bunx @every-env/compound-plugin cleanup --target copilot
```
The cleanup command backs up known CE-owned Copilot artifacts such as:
- Generated `.github/agents/*.agent.md` files from old installs
- Generated `.github/skills/*/SKILL.md` directories from old installs
- Generated `~/.copilot/{agents,skills}` files from personal old installs
- Shared `~/.agents/skills/*` CE skills that would shadow native Copilot plugin skills
- Any tracked install-manifest entries from the old writer
It must not delete user-authored `.github/agents` or `.github/skills` content unless manifest/history proves CE ownership.
## Factory Droid
### Decision
Droid should use native plugin marketplace install, not `bunx @every-env/compound-plugin install compound-engineering --to droid`.
The custom Droid plugin install/convert target has been removed from the CLI target registry.
Users install with:
```bash
droid plugin marketplace add https://github.com/EveryInc/compound-engineering-plugin
droid plugin install compound-engineering@compound-engineering-plugin
```
Factory's docs describe GitHub marketplace installation, user/project/org plugin scopes, and direct Claude Code plugin compatibility. They explicitly say Droid can install a Claude Code plugin directly and automatically translate the format. Manual testing on 2026-04-19 confirmed Droid could run `ce-doc-review` from the CE plugin and load both the skill and agents.
This means Droid is now in the same category as Claude Code and Copilot for CE distribution: use the native marketplace/plugin install path, not a generated custom Bun install.
### Cleanup
The old custom Droid target wrote CE-owned artifacts under `~/.factory`, especially:
- `~/.factory/skills/*`
- `~/.factory/droids/*.md`
- `~/.factory/commands/*.md`
- any CE install manifest or managed backup directory created by the old writer
Before users migrate from the old Bun install to the native Droid plugin, legacy cleanup should remove or back up CE-owned generated files so the native plugin is not shadowed by stale local artifacts.
Run:
```bash
bunx @every-env/compound-plugin cleanup --target droid
```
The cleanup command must not delete Droid's native plugin cache or user-authored Droid files. It should only remove artifacts proven to be CE-owned by an install manifest, known historical CE names, or generated CE metadata.
## Qwen Code
### Decision
Qwen should use native extension install, not `bunx @every-env/compound-plugin install compound-engineering --to qwen`.
The custom Qwen plugin install/convert target has been removed from the CLI target registry.
Users install with:
```bash
qwen extensions install EveryInc/compound-engineering-plugin:compound-engineering
```
Qwen Code's extension docs say it can install Claude Code extensions directly from GitHub and convert Claude plugin metadata to Qwen extension metadata automatically. Manual testing on 2026-04-19 confirmed the CE plugin installed successfully through Qwen's native path.
This is a better fit than the old custom writer because Qwen now owns the Claude-plugin compatibility layer. The old writer duplicated that logic and did not fully rewrite CE's agent-heavy skill content into Qwen subagent invocation syntax.
### Cleanup
The old custom Qwen target wrote CE-owned artifacts under `~/.qwen`, especially:
- `~/.qwen/extensions/compound-engineering/` with CE-managed tracking keys in `qwen-extension.json`
- `~/.qwen/skills/*`
- `~/.qwen/agents/*.yaml`
- `~/.qwen/agents/*.md`
- `~/.qwen/commands/*.md`
Before users migrate from the old Bun install to the native Qwen extension, legacy cleanup should remove or back up CE-owned generated files so the native extension is not shadowed by stale local artifacts.
Run:
```bash
bunx @every-env/compound-plugin cleanup --target qwen
```
Cleanup only backs up the old extension root when it finds the CE-managed tracking keys written by the legacy writer. This avoids deleting Qwen's current native extension cache after a successful native install.
## OpenCode
### Current Platform Facts
OpenCode's current install/discovery model is file-based:
- Skills are direct child directories with `SKILL.md` under `.opencode/skills/<name>/`, `~/.config/opencode/skills/<name>/`, `.claude/skills/<name>/`, `~/.claude/skills/<name>/`, `.agents/skills/<name>/`, or `~/.agents/skills/<name>/`.
- Agents can be configured in `opencode.json` or as Markdown files under `~/.config/opencode/agents/` or `.opencode/agents/`.
- Commands can be configured in `opencode.json` or as Markdown files under `~/.config/opencode/commands/` or `.opencode/commands/`.
- Plugins are JavaScript/TypeScript modules loaded from `.opencode/plugins/` or `~/.config/opencode/plugins/`, or npm packages listed in the `plugin` option in `opencode.json`.
OpenCode has a plugin system, but it is not equivalent to Claude/Copilot/Droid plugin marketplaces. The official docs describe JS/TS hooks, custom tools, local plugin files, and npm package loading. They do not document a native marketplace command that can point at the CE GitHub repository, read `.claude-plugin/marketplace.json`, and install CE skills and agents as a complete plugin.
### Decision
Keep the custom CE OpenCode writer for now:
```text
~/.config/opencode/opencode.json
~/.config/opencode/skills/<skill>/SKILL.md
~/.config/opencode/agents/<agent>.md
~/.config/opencode/commands/*.md # source commands only, if present
~/.config/opencode/plugins/*.ts
~/.config/opencode/compound-engineering/install-manifest.json
```
This matches OpenCode's documented global config root and lets CE convert the full Claude-authored payload: skills, agents, hooks/plugins, MCP config, and source commands if a plugin ships them. An npm OpenCode plugin could be useful later for hooks/tools, but it would not replace the need to place CE skills and agents into OpenCode's discovery roots unless OpenCode adds a richer package/install surface.
Avoid `~/.agents/skills` for CE-managed OpenCode installs for the same reason as Codex and Gemini: OpenCode can read that shared root, but Copilot can also read it and shadow native plugin skills.
### Cleanup
The OpenCode custom writer should continue to track and clean CE-owned files on every install:
- Old CE-owned `~/.config/opencode/skills/*`
- Old CE-owned `~/.config/opencode/agents/*`
- Old CE-owned `~/.config/opencode/commands/*`
- Old CE-owned `~/.config/opencode/plugins/*`
- Old CE-owned shared skills under `~/.agents/skills/*` from previous experiments or installs
- Manifest-tracked files that disappeared because a skill, agent, or command was renamed or removed
## Pi
### Current Platform Facts
Pi supports file-based skills and package installs. Its package surface can bundle skills, prompts, extensions, and related package metadata, and `pi install` can install from package sources such as npm, git, URLs, or local paths.
Pi also has shared skill discovery through `~/.agents/skills` and `.agents/skills`, but CE should not use those shared roots for the same reason as OpenCode, Codex, and Gemini: Copilot can read shared personal/project skills before plugin skills, so a CE skill installed there for Pi could shadow Copilot's native plugin install.
CE's current Pi compatibility is not a raw Claude-compatible plugin install. The converter currently:
- Copies platform-compatible CE skills.
- Converts Claude agents into generated Pi skills, because Pi does not provide a Claude-style plugin `agents/` runtime equivalent for this payload today.
- Writes a `compound-engineering-compat.ts` extension that provides compatibility tools such as subagent invocation and MCPorter access.
- Converts Claude MCP server config into `compound-engineering/mcporter.json` for MCPorter.
- Writes source commands as prompts only if a source plugin ships commands.
### Decision
Keep the custom CE Pi writer for now:
```text
~/.pi/agent/skills/<skill-name>/SKILL.md
~/.pi/agent/prompts/*.md
~/.pi/agent/extensions/compound-engineering-compat.ts
~/.pi/agent/compound-engineering/mcporter.json
~/.pi/agent/compound-engineering/install-manifest.json
~/.pi/agent/AGENTS.md # CE-managed compatibility block
```
This is a pragmatic install target, not the desired long-term distribution shape. The long-term direction should be a real Pi package that can be installed with `pi install`, but CE should not promote that as the primary path until we package and test the full payload: copied skills, generated agent skills, prompts, the compatibility extension, MCPorter config, and cleanup behavior.
Do not install CE Pi artifacts into `~/.agents/skills`.
### Cleanup
The Pi custom writer should continue to track and clean CE-owned files on every install:
- Old CE-owned `~/.pi/agent/skills/*`
- Old CE-owned `~/.pi/agent/prompts/*`
- Old CE-owned `~/.pi/agent/extensions/*`
- Old generated agent-as-skill artifacts from prior CE installs
- Manifest-tracked files that disappeared because a skill, prompt, generated agent skill, or extension was renamed or removed
Manual cleanup is also available:
```bash
bunx @every-env/compound-plugin cleanup --target pi
```
Future Pi package work should preserve the same cleanup semantics before switching users from the current custom writer to a native `pi install` package.
## Codex
### Current Platform Facts
Current Codex docs describe user skills under `~/.agents/skills` and repo skills under `.agents/skills`. Codex also reads admin skills from `/etc/codex/skills` and system skills bundled by OpenAI. Codex supports symlinked skill folders and follows symlink targets.
Empirical note: Codex also still discovers legacy `~/.codex/skills` entries. On 2026-04-18, we created the same skill name in both `~/.agents/skills/ce-duplicate-discovery-smoke` and `~/.codex/skills/ce-duplicate-discovery-smoke`; the Codex skill picker showed both entries.
Despite current Codex docs favoring `~/.agents/skills`, CE should not write there because those files can shadow Copilot's native plugin skills. CE should use the Codex-specific compatibility root:
```text
~/.codex/skills/compound-engineering/<skill-name>/SKILL.md
```
This shape keeps CE Codex skills isolated from Copilot/Gemini shared discovery roots while still giving Codex a namespaced skill pack.
Codex also has custom agents and a plugin model:
- Custom agents are standalone TOML files under `~/.codex/agents/` or `.codex/agents/`.
- Each custom agent requires `name`, `description`, and `developer_instructions`.
- Codex only spawns subagents when explicitly asked.
Codex plugins exist, but current public distribution is still local/personal:
- Repo marketplace: `$REPO_ROOT/.agents/plugins/marketplace.json`
- Personal marketplace: `~/.agents/plugins/marketplace.json`
- Typical personal plugin storage: `~/.codex/plugins/<plugin-name>`
- Installed plugin cache: `~/.codex/plugins/cache/<marketplace>/<plugin>/<version>/`
- Official public plugin publishing is still marked as coming soon.
This means Codex has a plugin model, but not yet a Copilot-style "point at GitHub marketplace repo and install globally" distribution path that is good enough to replace our CE custom install for normal users.
### What Superpowers Does
Superpowers' Codex install guide is a skill-discovery install, not a Codex plugin install:
```bash
git clone https://github.com/obra/superpowers.git ~/.codex/superpowers
mkdir -p ~/.agents/skills
ln -s ~/.codex/superpowers/skills ~/.agents/skills/superpowers
```
The real content lives under:
```text
~/.codex/superpowers
```
The discovery entry lives under:
```text
~/.agents/skills/superpowers -> ~/.codex/superpowers/skills
```
So `~/.codex/superpowers` is the backing store, and `~/.agents/skills/superpowers` is a symlink used to make Codex discover the skills. Their migration instructions also remove an old bootstrap block from `~/.codex/AGENTS.md`, which implies an earlier non-skill-discovery install path.
This is useful, but it has tradeoffs we should not copy blindly:
- It requires users to clone and update a Git repo manually.
- It uses a namespaced subfolder under `~/.agents/skills`.
- It is optimized for Codex, but `~/.agents/skills` can shadow Copilot native plugin skills.
- It works for pass-through source skills, but CE's Codex target also generates target-specific artifacts from agents/commands, transforms content, writes prompt wrappers, and manages cleanup. A raw clone plus symlink would still need a generation/cleanup step unless we intentionally drop those converted artifacts.
The useful part to emulate is the idea of isolating a plugin's files under a named folder. The part to avoid is writing CE-owned files into `~/.agents/skills` or requiring a manual clone/update workflow for normal users.
### Subfolder Decision
Do not use `~/.agents/skills` for CE Codex installs. Even if Codex discovers it, Copilot also reads it and will let those skills shadow native plugin skills.
For CE's Codex target, use a Codex-specific namespaced folder:
```text
~/.codex/skills/compound-engineering/<skill-name>/SKILL.md
```
This is not the documented modern Codex skill path, so the implementation should keep a smoke test for current Codex discovery behavior. The tradeoff is intentional: we prefer a Codex-only compatibility path over writing to a shared root that breaks Copilot plugin isolation.
### Source-of-Truth Decision
For Codex, `~/.codex` is the durable source of truth for CE-owned Codex artifacts. Keep all generated Codex artifacts under Codex-owned roots and track them with a manifest:
```text
~/.codex/skills/compound-engineering/<skill-name>/SKILL.md
~/.codex/agents/compound-engineering/<agent-name>.toml
~/.codex/compound-engineering/install-manifest.json
```
Do not create symlinks from `~/.agents/skills` to these Codex-owned files.
### Intended CE Codex Plan
For now:
- Keep a custom CE Codex install path.
- Run legacy cleanup on every custom Codex install.
- Install generated/converted skills under `~/.codex/skills/compound-engineering/<skill-name>/SKILL.md`.
- Convert Claude Markdown agents to Codex TOML custom agents under `~/.codex/agents/compound-engineering/<agent-name>.toml`.
- Name converted agents with the source category and CE agent name, for example `review-ce-correctness-reviewer` or `research-ce-repo-research-analyst`, and rewrite skill orchestration text to spawn those names.
- Track generated skills, prompts, and agents in `~/.codex/compound-engineering/install-manifest.json`.
- Keep Codex-only artifacts under `~/.codex`, such as prompt wrappers, `config.toml` MCP entries, and Codex TOML custom agents.
- Rewrite `Task`/agent references to spawn generated Codex custom agents when the referenced agent is known.
- Track an install manifest so removed skills and renamed skills can be cleaned later.
- Track historical CE artifacts from git history so old flat installs, prompt files, and converted-agent skills can be cleaned safely.
Do not require users to clone the CE repo for Codex. The CLI should continue to fetch/install from the package or branch source, then write the local Codex-compatible output.
### Smoke Test Result
On 2026-04-18, we verified the proposed Codex split with a local smoke test:
```text
~/.agents/skills/ce-codex-agent-smoke/SKILL.md
~/.codex/agents/ce-codex-agent-smoke.toml
```
The skill explicitly asked Codex to spawn the `ce_codex_agent_smoke` custom agent. Codex discovered the skill, spawned the TOML custom agent, waited for completion, and returned the expected marker:
```text
CODEX_TOML_AGENT_SMOKE_OK
```
This confirms the intended CE Codex architecture is viable: workflow skills can invoke Claude-authored agents converted to Codex TOML custom agents in `~/.codex/agents`. The skill root should now be moved from the tested `~/.agents/skills` path to the isolated CE path under `~/.codex/skills/compound-engineering`.
On 2026-04-19, we also verified that Codex discovers nested TOML custom agents under:
```text
~/.codex/agents/compound-engineering/<agent-name>.toml
```
and accepts hyphenated TOML `name` values such as `ce-codex-hyphen-toml-smoke`. CE should therefore use the nested `compound-engineering` agent root for cleanup parity with `~/.codex/skills/compound-engineering/`.
We also tested Codex native plugin-bundled agents in three shapes:
```text
plugins/<plugin>/agents/<agent>.toml
plugins/<plugin>/.codex/agents/<agent>.toml
plugins/<plugin>/.codex-plugin/plugin.json with "agents": "./agents/"
```
All installed plugin skills loaded, but spawning the bundled custom agents failed with `unknown agent_type`. Codex native plugins are therefore not a sufficient CE install path for agent-heavy workflows yet.
On the same day, we verified duplicate discovery behavior by installing two skills with the same `name`:
```text
~/.agents/skills/ce-duplicate-discovery-smoke/SKILL.md
~/.codex/skills/ce-duplicate-discovery-smoke/SKILL.md
```
Codex displayed both skill entries in the picker, one from `~/.agents/skills` and one from `~/.codex/skills`. This confirms that any old CE skills left in either root can cause visible duplicates. Cleanup must remove CE-owned stale skills from both `~/.agents/skills` and legacy flat `~/.codex/skills` before writing the namespaced `~/.codex/skills/compound-engineering` install.
Also on 2026-04-18, we tested nested skill discovery across Codex, Copilot, and Gemini with three shapes:
```text
~/.agents/skills/ce-flat-discovery-smoke/SKILL.md
~/.agents/skills/ce-nested-pack/ce-nested-discovery-smoke/SKILL.md
~/.agents/skills/ce-symlink-pack -> ~/.agents/ce-discovery-packs/ce-symlink-pack/skills
```
Results:
| Harness | Flat direct skill | Regular nested skill | Superpowers-style symlink pack |
| --- | --- | --- | --- |
| Codex | Worked | Worked | Worked |
| Copilot CLI | Worked | Not found | Not found |
| Gemini CLI | Worked | Not found | Not found |
Conclusion for shared skill roots: cross-harness `~/.agents/skills` installs only work portably when skills are direct children:
```text
~/.agents/skills/<skill-name>/SKILL.md
```
But CE should no longer install there because Copilot plugin skills can be shadowed by `~/.agents/skills`. Treat these results as cleanup/discovery context, not the target install shape.
### Future Codex Plugin Option
Codex now has a documented marketplace/plugin install path, including `codex marketplace add <source>`, but CE should not use it as the primary Codex install path yet because plugin-bundled custom agents did not register in testing.
Revisit Codex native plugins when Codex documents and supports plugin-bundled custom agents, or when the plugin installer can declare files that should be installed into the user's custom-agent roots.
Until then, Codex native plugins are useful for local development and testing skill-only packages, but not for CE's agent-heavy workflows.
## Gemini CLI
### Current Platform Facts
Gemini has two relevant install surfaces:
1. Shared/user skills:
- Workspace skills: `.gemini/skills/` or `.agents/skills/`
- User skills: `~/.gemini/skills/` or `~/.agents/skills/`
- Extension skills bundled inside installed extensions
2. Extensions:
- Installed with `gemini extensions install <source>`
- `<source>` can be a GitHub repository URL or a local path
- Gemini copies the extension during installation
- Installed extensions live under `~/.gemini/extensions`
- `gemini extensions link <path>` symlinks a local development extension for immediate iteration
Gemini extension roots require `gemini-extension.json`. An extension can bundle:
- `skills/<skill-name>/SKILL.md`
- `commands/*.toml`
- `agents/*.md` for preview subagents
- `GEMINI.md` context via `contextFileName`
- MCP server config
- hooks
- policies
- themes
For remote distribution and public gallery discovery, Gemini requires `gemini-extension.json` at the absolute root of the GitHub repository or release archive. `gemini extensions install <source>` accepts a GitHub repository URL or local path, but the documented and locally verified command does not include a monorepo `--path` option for extension installs.
Gemini subagents are Markdown files with YAML frontmatter. Local user/project agents are documented under:
```text
~/.gemini/agents/*.md
.gemini/agents/*.md
```
Extension subagents are documented under:
```text
<extension-root>/agents/*.md
```
The shared `.agents/*` alias is documented for skills, not subagents.
Gemini CLI 0.38.2 implementation confirms this: user agents resolve to `~/.gemini/agents`, project agents resolve to `.gemini/agents`, while shared aliases exist only for skill directories (`~/.agents/skills` and `.agents/skills`). Do not use `~/.agents/agents` as a shared CE agent install root for Gemini.
### Discovery Test Result
On 2026-04-18, we tested Gemini shared skill discovery with three shapes:
```text
~/.agents/skills/ce-flat-discovery-smoke/SKILL.md
~/.agents/skills/ce-nested-pack/ce-nested-discovery-smoke/SKILL.md
~/.agents/skills/ce-symlink-pack -> ~/.agents/ce-discovery-packs/ce-symlink-pack/skills
```
Gemini discovered only the flat direct skill. It did not discover the regular nested skill or the Superpowers-style symlink pack.
If `~/.agents/skills` is used manually, Gemini-compatible skills must be direct children:
```text
~/.agents/skills/<skill-name>/SKILL.md
```
CE should not use that path for managed Gemini installs because it can shadow Copilot plugin skills.
### Intended CE Gemini Plan
For now, keep a custom CE Gemini install path and write directly to Gemini-owned roots:
```text
~/.gemini/skills/<skill-name>/SKILL.md
~/.gemini/agents/<agent-name>.md
~/.gemini/commands/*.toml # source commands only, if present
~/.gemini/compound-engineering/install-manifest.json
```
The Gemini writer should copy pass-through skills to `~/.gemini/skills`, generate normalized flat Gemini subagents in `~/.gemini/agents`, and write command TOML files under `~/.gemini/commands` if CE ships commands again.
Gemini extension distribution is already supported. The CE blocker is packaging shape: our source repo is a multi-plugin repo and the CE plugin root is `plugins/compound-engineering/`, while Gemini extension installs expect `gemini-extension.json` at the extension source root. Current Gemini extension install does not support a documented monorepo `--path` flow.
Native Gemini extension packaging should become the preferred Gemini distribution path once CE ships one of these shapes:
- a generated extension root published as the repository or release archive root
- a dedicated Gemini extension repository
- a distribution branch whose root is the Gemini extension root
That extension root should be generated/normalized, not just the Claude plugin directory with `gemini-extension.json` added, because Gemini loads direct `agents/*.md` files and validates Gemini-shaped agent frontmatter.
Open questions to validate in implementation:
- Whether Gemini supports any undocumented repository subdirectory syntax for extensions. Current docs and local help only show whole GitHub repository URLs or local paths.
- Whether Gemini preview subagents are enabled by default for all users or require settings in some versions/environments.
- How Gemini extension subagent invocation names map from nested Claude agent paths.
### Cleanup
The Gemini custom writer must clean old CE-owned artifacts so users do not see duplicates or stale converted-agent skills.
Cleanup should cover:
- Old CE-owned `.gemini/skills/*`
- Old CE-owned `.gemini/agents/*`
- Old CE-owned `.gemini/commands/*`
- Old CE-owned `~/.gemini/skills/*`
- Old CE-owned `~/.gemini/agents/*`
- Old CE-owned `~/.gemini/commands/*`
- Any CE-owned flat shared skills under `~/.agents/skills/*` from older experiments or installs
- Any future CE-owned extension install if we need to uninstall/reinstall a broken pre-release
## Sources
- Claude/Copilot marketplace metadata: `.claude-plugin/marketplace.json`
- Cursor marketplace metadata: `.cursor-plugin/marketplace.json`
- Claude plugin manifest: `plugins/compound-engineering/.claude-plugin/plugin.json`
- Cursor plugin manifest: `plugins/compound-engineering/.cursor-plugin/plugin.json`
- Copilot plugin reference: `https://docs.github.com/en/copilot/reference/copilot-cli-reference/cli-plugin-reference`
- Copilot CLI plugins overview: `https://docs.github.com/en/copilot/concepts/agents/copilot-cli/about-cli-plugins`
- Factory Droid plugin configuration: `https://docs.factory.ai/cli/configuration/plugins`
- Factory Droid plugin build guide: `https://docs.factory.ai/guides/building/building-plugins`
- OpenCode config: `https://opencode.ai/docs/config/`
- OpenCode skills: `https://opencode.ai/docs/skills`
- OpenCode agents: `https://opencode.ai/docs/agents/`
- OpenCode commands: `https://opencode.ai/docs/commands/`
- OpenCode plugins: `https://opencode.ai/docs/plugins/`
- Pi overview: `https://buildwithpi.ai/README.md`
- Pi skills/packages: `https://github.com/badlogic/pi-mono/blob/main/packages/coding-agent/docs/skills.md`, `https://github.com/badlogic/pi-mono/blob/main/packages/coding-agent/docs/packages.md`
- Codex skills: `https://developers.openai.com/codex/skills`
- Codex plugin build/distribution docs: `https://developers.openai.com/codex/plugins/build`
- Superpowers Codex install guide: `https://github.com/obra/superpowers/blob/main/.codex/INSTALL.md`
- Gemini extension reference: `https://geminicli.com/docs/extensions/reference/`
- Gemini extension build guide: `https://geminicli.com/docs/extensions/writing-extensions/`
- Gemini skills: `https://geminicli.com/docs/cli/skills/`
- Gemini subagents: `https://geminicli.com/docs/core/subagents/`
- Gemini subagents announcement: `https://developers.googleblog.com/subagents-have-arrived-in-gemini-cli/`

View File

@@ -1,6 +1,6 @@
# Codex Spec (Config, Prompts, Skills, MCP)
# Codex Spec (Config, Prompts, Skills, Subagents, MCP)
Last verified: 2026-01-21
Last verified: 2026-04-19
## Primary sources
@@ -10,6 +10,7 @@ https://developers.openai.com/codex/config-advanced
https://developers.openai.com/codex/custom-prompts
https://developers.openai.com/codex/skills
https://developers.openai.com/codex/skills/create-skill
https://developers.openai.com/codex/subagents
https://developers.openai.com/codex/guides/agents-md
https://developers.openai.com/codex/mcp
```
@@ -49,10 +50,28 @@ https://developers.openai.com/codex/mcp
- Required fields are single-line with length limits (name ≤ 100 chars, description ≤ 500 chars). citeturn3view4
- At startup, Codex loads only each skills name/description; full content is injected when invoked. citeturn3view3turn3view4
- Skills can be repo-scoped in `.agents/skills/` and are discovered from the current working directory up to the repository root. User-scoped skills live in `~/.agents/skills/`. citeturn1view1turn1view4
- Inference: some existing tooling and user setups still use `.codex/skills/` and `~/.codex/skills/` as legacy compatibility paths, but those locations are not documented in the current OpenAI Codex skills docs linked above.
- Inference: some existing tooling and user setups still use `.codex/skills/` and `~/.codex/skills/` as compatibility paths, but those locations are not documented in the current OpenAI Codex skills docs linked above.
- Compound Engineering should avoid `~/.agents/skills` for managed installs because that shared root can shadow Copilot's native plugin skills. Use the Codex-specific compatibility root `~/.codex/skills/compound-engineering/<skill-name>/SKILL.md` for CE Codex skills, and track generated files with a CE manifest.
- Codex also supports admin-scoped skills in `/etc/codex/skills` plus built-in system skills bundled with Codex. citeturn1view4
- Skills can be invoked explicitly using `/skills` or `$skill-name`. citeturn3view3
## Subagents and custom agents
- Codex subagent workflows are enabled by default in current releases.
- Codex only spawns subagents when explicitly asked.
- Custom agent files are standalone TOML files under `~/.codex/agents/` for personal agents or `.codex/agents/` for project-scoped agents.
- Each TOML file defines one custom agent. Required fields:
- `name`
- `description`
- `developer_instructions`
- Optional fields can include `nickname_candidates`, `model`, `model_reasoning_effort`, `sandbox_mode`, `mcp_servers`, and `skills.config`.
- The TOML `name` field is the source of truth; matching the filename to the agent name is only a convention.
- CE converts Claude Markdown agents into Codex custom-agent TOML files under `~/.codex/agents/compound-engineering/`.
- CE keeps generated agents under `~/.codex/agents`, not `~/.agents/skills`, because `~/.agents` is shared across harnesses and can shadow native plugin installs.
- Generated TOML agent names preserve CE's hyphenated naming and include the source category, such as `review-ce-correctness-reviewer` and `research-ce-repo-research-analyst`.
- Empirical test on 2026-04-19 confirmed Codex discovers nested custom-agent TOML files under `~/.codex/agents/compound-engineering/` and accepts hyphenated TOML `name` values.
- Empirical plugin test on 2026-04-19 found Codex native plugins did not register custom agents bundled under plugin-local `agents/`, plugin-local `.codex/agents/`, or an undocumented plugin manifest `agents` field. Therefore CE still needs the custom Bun Codex installer for agent-heavy workflows.
## MCP (Model Context Protocol)
- MCP configuration lives in `~/.codex/config.toml` and is shared by the CLI and IDE extension. citeturn3view2turn3view5

View File

@@ -1,10 +1,14 @@
# GitHub Copilot Spec (Agents, Skills, MCP)
Last verified: 2026-02-14
Last verified: 2026-04-18
## Primary sources
```
https://docs.github.com/en/copilot/how-tos/copilot-cli/customize-copilot/create-custom-agents-for-cli
https://docs.github.com/en/copilot/reference/copilot-cli-reference/cli-command-reference
https://docs.github.com/en/copilot/reference/copilot-cli-reference/cli-plugin-reference
https://docs.github.com/en/copilot/concepts/agents/copilot-cli/about-cli-plugins
https://docs.github.com/en/copilot/reference/custom-agents-configuration
https://docs.github.com/en/copilot/concepts/agents/about-agent-skills
https://docs.github.com/en/copilot/concepts/agents/coding-agent/mcp-and-coding-agent
@@ -15,19 +19,50 @@ https://docs.github.com/en/copilot/concepts/agents/coding-agent/mcp-and-coding-a
| Scope | Path |
|-------|------|
| Project agents | `.github/agents/*.agent.md` |
| Project agents (Claude-compatible) | `.claude/agents/*.md` |
| Personal agents | `~/.copilot/agents/*.agent.md` |
| Personal agents (Claude-compatible) | `~/.claude/agents/*.md` |
| Plugin agents | `agents/` by default, overridable in plugin manifest |
| Project skills | `.github/skills/*/SKILL.md` |
| Project skills (auto-discovery) | `.agents/skills/*/SKILL.md` |
| Project instructions | `.github/copilot-instructions.md` |
| Path-specific instructions | `.github/instructions/*.instructions.md` |
| Project prompts | `.github/prompts/*.prompt.md` |
| Org/enterprise agents | `.github-private/agents/*.agent.md` |
| Personal skills | `~/.copilot/skills/*/SKILL.md` |
| Personal skills (auto-discovery) | `~/.agents/skills/*/SKILL.md` |
| Directory instructions | `AGENTS.md` (nearest ancestor wins) |
## Agents (.agent.md files)
- Custom agents are Markdown files with YAML frontmatter stored in `.github/agents/`.
- File extension is `.agent.md` (or `.md`). Filenames may only contain: `.`, `-`, `_`, `a-z`, `A-Z`, `0-9`.
- The documented custom-agent extension is singular `.agent.md`, not `.agents.md`.
- `description` is the only required frontmatter field.
- Current Copilot CLI docs do not list `.agents/agents` or `~/.agents/agents` as custom-agent discovery paths. The `.agents/*` convention is documented for skills (`.agents/skills`, `~/.agents/skills`), not agents.
- Copilot CLI also loads Claude-compatible agent directories (`.claude/agents`, `~/.claude/agents`) after native Copilot agent directories and before plugin agents.
- `AGENTS.md` files are supported as custom instruction/context files, not as custom-agent profile files.
## Plugins
- Copilot CLI plugins bundle reusable agents, skills, hooks, MCP servers, and related configuration.
- Install from a registered marketplace with:
```text
/plugin marketplace add EveryInc/compound-engineering-plugin
/plugin install compound-engineering@compound-engineering-plugin
```
- The terminal equivalents are:
```bash
copilot plugin marketplace add EveryInc/compound-engineering-plugin
copilot plugin install compound-engineering@compound-engineering-plugin
```
- Copilot CLI looks for plugin manifests at `.plugin/plugin.json`, `plugin.json`, `.github/plugin/plugin.json`, or `.claude-plugin/plugin.json`.
- Copilot CLI looks for marketplace manifests at `marketplace.json`, `.plugin/marketplace.json`, `.github/plugin/marketplace.json`, or `.claude-plugin/marketplace.json`.
- Therefore the existing repository-level `.claude-plugin/marketplace.json` and plugin-level `plugins/compound-engineering/.claude-plugin/plugin.json` are expected to be sufficient for Copilot native plugin install. Do not add a parallel `.github/plugin` surface unless Copilot requires a Copilot-only manifest field in the future.
### Frontmatter fields
@@ -72,6 +107,7 @@ Agent body content is limited to **30,000 characters**.
| Project (Claude-compatible) | `.claude/skills/*/SKILL.md` |
| Project (auto-discovery) | `.agents/skills/*/SKILL.md` |
| Personal | `~/.copilot/skills/*/SKILL.md` |
| Personal (auto-discovery) | `~/.agents/skills/*/SKILL.md` |
## MCP (Model Context Protocol)
@@ -115,8 +151,20 @@ Agent body content is limited to **30,000 characters**.
## Precedence
1. Repository-level agents
2. Organization-level agents (`.github-private`)
3. Enterprise-level agents (`.github-private`)
1. Built-in agents
2. `~/.copilot/agents`
3. `<project>/.github/agents`
4. `<parents>/.github/agents`
5. `~/.claude/agents`
6. `<project>/.claude/agents`
7. `<parents>/.claude/agents`
8. Plugin `agents/` directories
9. Remote organization/enterprise agents
Within a repo, `AGENTS.md` files in directories provide nearest-ancestor-wins instructions.
Skills use separate first-found-wins precedence. Current docs list project `.github/skills`, `.agents/skills`, `.claude/skills`, inherited project skills, personal `~/.copilot/skills`, personal `~/.agents/skills`, personal `~/.claude/skills`, then plugin skill directories.
Skills are deduplicated by the `name` field inside `SKILL.md`, not by directory name. If a personal or project skill has the same `name` as a plugin skill, Copilot uses the first-loaded personal/project skill and silently ignores the plugin skill. For example, a stale `~/.agents/skills/ce-plan/SKILL.md` with `name: ce-plan` would shadow the native plugin's `ce-plan`; it should not show as two separate skills in Copilot CLI. Use `/skills info ce-plan` to confirm which location won.
This makes Copilot cleanup different from Codex duplicate cleanup: stale CE skills in `~/.agents/skills`, `~/.copilot/skills`, `.agents/skills`, or `.github/skills` may not create visible duplicates, but they can silently override newer plugin-provided CE skills.

View File

@@ -1,4 +1,4 @@
# Cursor Spec (Rules, Commands, Skills, MCP)
# Cursor Spec (Plugin Marketplace, Rules, Commands, Skills, MCP)
Last verified: 2026-02-12
@@ -10,6 +10,27 @@ https://docs.cursor.com/context/rules-for-ai
https://docs.cursor.com/customize/model-context-protocol
```
## Plugin Marketplace
Compound Engineering is published through the Cursor Plugin Marketplace.
In Cursor Agent chat, install with:
```text
/add-plugin compound-engineering
```
Users can also search for "compound engineering" in the plugin marketplace.
The repo-owned marketplace files are:
```text
.cursor-plugin/marketplace.json
plugins/compound-engineering/.cursor-plugin/plugin.json
```
Do not use the old custom Bun converter/install path for Cursor.
## Config locations
| Scope | Path |

View File

@@ -1,6 +1,6 @@
# Gemini CLI Spec (GEMINI.md, Commands, Skills, MCP, Settings)
# Gemini CLI Spec (GEMINI.md, Commands, Skills, Subagents, Extensions)
Last verified: 2026-02-14
Last verified: 2026-04-18
## Primary sources
@@ -10,7 +10,9 @@ https://geminicli.com/docs/get-started/configuration/
https://geminicli.com/docs/cli/custom-commands/
https://geminicli.com/docs/cli/skills/
https://geminicli.com/docs/cli/creating-skills/
https://geminicli.com/docs/extensions/writing-extensions/
https://geminicli.com/docs/core/subagents/
https://geminicli.com/docs/extensions/reference/
https://developers.googleblog.com/subagents-have-arrived-in-gemini-cli/
https://google-gemini.github.io/gemini-cli/docs/tools/mcp-server.html
```
@@ -53,7 +55,10 @@ User request: {{args}}
## Skills (SKILL.md standard)
- A skill is a folder containing `SKILL.md` plus optional supporting files.
- Skills live in `.gemini/skills/`.
- Workspace skills live in `.gemini/skills/` or the `.agents/skills/` alias.
- User skills live in `~/.gemini/skills/` or the `~/.agents/skills/` alias.
- Extension skills live in an installed extension's `skills/` directory.
- Compound Engineering managed Gemini installs should use Gemini-owned roots (`~/.gemini/skills`, `~/.gemini/agents`, `~/.gemini/commands`) rather than `~/.agents/skills`, because `~/.agents/skills` can shadow Copilot plugin skills.
- `SKILL.md` uses YAML frontmatter with `name` and `description` fields.
- Gemini activates skills on demand via `activate_skill` tool based on description matching.
- The `description` field is critical — Gemini uses it to decide when to activate the skill.
@@ -71,6 +76,34 @@ description: Review code for security vulnerabilities and OWASP compliance
Detailed instructions for security review...
```
## Subagents
- Gemini CLI supports custom subagents as Markdown files with YAML frontmatter.
- Project subagents live in `.gemini/agents/*.md`.
- User subagents live in `~/.gemini/agents/*.md`.
- Extension subagents live in an installed extension's `agents/*.md` directory.
- Current Gemini docs, `/agents reload` command text, and Gemini CLI 0.38.2 implementation name only `.gemini/agents` and `~/.gemini/agents` for local subagent discovery. The `.agents/skills` and `~/.agents/skills` aliases apply to skills; Gemini does not currently read `~/.agents/agents` or `.agents/agents` as subagent discovery paths.
- Subagents can be invoked explicitly with `@agent-name` or selected automatically by description.
- Subagents run in isolated context loops and can have restricted tool access.
- Subagents cannot call other subagents, even if granted wildcard tool access.
Example:
```yaml
---
name: security-auditor
description: Specialized in finding security vulnerabilities in code.
kind: local
tools:
- read_file
- grep_search
model: inherit
max_turns: 10
---
You are a ruthless Security Auditor.
```
## MCP server configuration
- MCP servers are configured in `settings.json` under the `mcpServers` key.
@@ -103,8 +136,147 @@ Detailed instructions for security review...
## Extensions
- Extensions are distributable packages for Gemini CLI.
- They extend functionality with custom tools, hooks, and commands.
- Not used for plugin conversion (different purpose from Claude Code plugins).
- Install with `gemini extensions install <github-url-or-local-path>`.
- Unlike `gemini skills install`, current Gemini extension docs and local `gemini extensions install --help` output do not list a `--path` flag for installing an extension from a monorepo subdirectory.
- Remote extension installs are not local-only. Gemini supports Git repository distribution and GitHub Releases.
- For public gallery discovery and normal remote install, `gemini-extension.json` must be at the absolute root of the GitHub repository or release archive.
- Gemini CLI copies installed extensions under `~/.gemini/extensions`.
- `gemini extensions link <path>` creates a symlink for local development instead of copying the extension.
- Extension management commands run from the shell, not inside Gemini's interactive mode. Restart the Gemini session after install/update for commands and extension changes to take effect.
- Extensions can bundle commands, skills, subagents, hooks, MCP servers, context files, policies, settings, and themes.
- Every extension root must contain `gemini-extension.json`.
- Extension commands live in `commands/*.toml`.
- Extension skills live in `skills/<name>/SKILL.md`.
- Extension subagents live in `agents/*.md`.
- For Compound Engineering, native extension packaging is now the likely primary Gemini distribution path because it can preserve commands, skills, and subagents. Direct `.gemini/` writes should be treated as a legacy/custom install path unless retained for local development.
- Because this repo is a monorepo with the plugin under `plugins/compound-engineering/`, public Gemini extension distribution likely needs a generated extension-root source, a dedicated extension repo, or a distribution branch whose root is the Gemini extension root.
- Interim CE distribution should keep using the Bun installer, but change the writer to install into `~/.gemini/{skills,agents,commands}` with a manifest under `~/.gemini/compound-engineering`.
### Extension root shape
A distributable Gemini extension source should look like:
```text
gemini-extension.json
GEMINI.md # optional context file
skills/<skill-name>/SKILL.md
commands/<command>.toml
agents/<agent-name>.md
hooks/hooks.json # optional
policies/*.toml # optional
package.json # optional, if the extension has runtime code
```
Minimal manifest:
```json
{
"name": "compound-engineering",
"version": "1.0.0",
"description": "Compound Engineering workflows for Gemini CLI",
"contextFileName": "GEMINI.md"
}
```
Relevant manifest fields:
- `name`: Required. Local CLI validation allows letters, numbers, and dashes; docs recommend lowercase numbers/dashes and expect the extension directory name to match.
- `version`: Required. Validation warns if it is not standard semver.
- `description`: Optional but used by the public gallery.
- `contextFileName`: Optional. Defaults to `GEMINI.md` when present.
- `mcpServers`: Optional. Loaded like user `settings.json` MCP servers, except `trust` is ignored for extension MCP config.
- `settings`: Optional install-time/user configuration prompts; values are stored in extension `.env` or keychain for sensitive values.
- `excludeTools`, `migratedTo`, `plan`, `themes`: Optional target-specific behavior.
### Install commands
Install from a GitHub repository whose root is the extension root:
```bash
gemini extensions install https://github.com/EveryInc/compound-engineering-gemini
```
Install from a branch, tag, or commit:
```bash
gemini extensions install https://github.com/EveryInc/compound-engineering-gemini --ref stable
```
Install from a local extension root:
```bash
gemini extensions install ./dist/gemini-extension
```
Link a local extension root for development:
```bash
gemini extensions link ./dist/gemini-extension
```
Validate a local extension root:
```bash
gemini extensions validate ./dist/gemini-extension
```
Uninstall:
```bash
gemini extensions uninstall compound-engineering
```
### Release options
Gemini supports two remote release shapes:
1. **Git repository:** Users install the repository URL. The repository root must contain `gemini-extension.json`.
2. **GitHub Releases:** Users still install the repository URL. Gemini can use the latest release archive or a release tag via `--ref`; custom archives must be self-contained with `gemini-extension.json` at the archive root.
The public Gemini extension gallery indexes public GitHub repositories with the `gemini-cli-extension` topic when `gemini-extension.json` is at the repository or release archive root.
### Compound Engineering packaging implications
The current `plugins/compound-engineering/` source root is not currently a valid Gemini extension root because it lacks `gemini-extension.json`:
```bash
gemini extensions validate plugins/compound-engineering
# Configuration file not found at .../plugins/compound-engineering/gemini-extension.json
```
Adding only that manifest would make the root validate, but it would not be enough for correct agent packaging:
- CE agents currently live in nested category directories such as `agents/review/correctness-reviewer.md`.
- Gemini's local loader in `@google/gemini-cli` 0.38.2 reads only direct `*.md` files under the extension `agents/` directory.
- Gemini agent frontmatter is strict. CE's Claude-authored agent frontmatter can include Claude-only fields such as `color`, and some files use Claude string-form `tools: Read, Grep, Glob, Bash`; Gemini expects `tools` to be an array of valid Gemini tool names.
Therefore a proper CE Gemini extension should be generated or normalized, not just the Claude plugin root plus a manifest. This does not mean rewriting agent prompts into bespoke Gemini-only instructions. The agent bodies and most `name`/`description`/`model` frontmatter can usually pass through. The generated extension should:
- Copy pass-through `skills/<skill>/SKILL.md` directories that are not excluded for Gemini.
- Convert Claude agents into flat Gemini-compatible subagents under `agents/<agent-name>.md`.
- Strip or translate Claude-only frontmatter fields.
- Convert Claude tool names to Gemini tool names, or omit tools when there is no reliable mapping.
- Generate Gemini `commands/*.toml` only if CE ships source commands again.
- Include a `gemini-extension.json` at the generated extension root.
- Use `gemini extensions validate <generated-root>` in tests.
The same normalization is needed for the interim Bun installer, except the output root is `~/.gemini` instead of an extension root:
```text
~/.gemini/skills/<skill-name>/SKILL.md
~/.gemini/agents/<agent-name>.md
~/.gemini/commands/*.toml
~/.gemini/compound-engineering/install-manifest.json
```
Local smoke test on 2026-04-18 with Gemini CLI 0.38.2:
- A direct extension agent using CE/Claude-style `tools: Read, Grep, Glob, Bash` plus `color: blue` failed to load with Gemini validation errors: `tools: Expected array, received string` and `Unrecognized key(s) in object: 'color'`.
- A nested extension agent under `agents/review/nested-agent.md` produced no validation error because the loader only scans direct files under `agents/`; it was not discovered.
Do not place CE agents in `~/.agents/agents` as a shared cross-harness agent root. Gemini does not currently read it, and if Gemini adds that alias later, Claude/Copilot-shaped frontmatter could become a compatibility problem. For Gemini, use either a native extension with normalized `agents/*.md` files or a legacy/custom install under `~/.gemini/agents` with cleanup.
If the same Gemini agent name exists in multiple Gemini-read locations, Gemini registers user agents first, project agents next, and extension agents last. Later registrations override earlier ones by name. This avoids duplicate visible agent tools, but stale CE files in `~/.gemini/agents` can still emit validation errors or mask behavior when an extension is disabled, so cleanup remains necessary.
## Settings.json structure

View File

@@ -1,57 +1,92 @@
# OpenCode Spec (Config, Agents, Plugins)
Last verified: 2026-01-21
Last verified: 2026-04-19
## Primary sources
```
https://opencode.ai/docs/config
https://opencode.ai/docs/config/
https://opencode.ai/docs/tools
https://opencode.ai/docs/permissions
https://opencode.ai/docs/plugins/
https://opencode.ai/docs/agents/
https://opencode.ai/docs/commands/
https://opencode.ai/docs/skills
https://opencode.ai/config.json
```
## Config files and precedence
- OpenCode supports JSON and JSONC configs. citeturn10view0
- Config sources are merged (not replaced), with a defined precedence order from remote → global → custom → project → `.opencode` directories → inline overrides. citeturn10view0
- Global config is stored at `~/.config/opencode/opencode.json`, and project config is `opencode.json` in the project root. citeturn10view0
- Custom config file and directory can be provided via `OPENCODE_CONFIG` and `OPENCODE_CONFIG_DIR`. citeturn10view0
- The `.opencode` and `~/.config/opencode` directories use plural subdirectory names (`agents/`, `commands/`, `modes/`, `plugins/`, `skills/`, `tools/`, `themes/`), but singular names are also supported for backwards compatibility. citeturn10view0
- OpenCode supports JSON and JSONC configs.
- Config sources are merged rather than replaced, with global and project config both participating in the final config.
- Global config is stored at `~/.config/opencode/opencode.json`, and project config is `opencode.json` in the project root.
- Custom config file and directory can be provided via `OPENCODE_CONFIG` and `OPENCODE_CONFIG_DIR`.
- The `.opencode` and `~/.config/opencode` directories use plural subdirectory names (`agents/`, `commands/`, `modes/`, `plugins/`, `skills/`, `tools/`, `themes/`).
## Core config keys
- `model` and `small_model` set the primary and lightweight models; `provider` configures provider options. citeturn10view0
- `tools` is still supported but deprecated; permissions are now the canonical control surface. citeturn1search0
- `permission` controls tool approvals and can be configured globally or per tool, including pattern-based rules. citeturn1search0
- `mcp`, `instructions`, and `disabled_providers` are supported config sections. citeturn1search5
- `plugin` can list npm packages to load at startup. citeturn1search2
- `model` and `small_model` set the primary and lightweight models; `provider` configures provider options.
- `tools` is still supported but deprecated as of OpenCode v1.1.1; permissions are now the canonical control surface.
- `permission` controls tool approvals and can be configured globally or per tool, including pattern-based rules.
- `mcp`, `instructions`, `disabled_providers`, `enabled_providers`, and `plugin` are supported config sections.
- `plugin` can list npm packages to load at startup.
- `skills.paths` and `skills.urls` can add extra skill discovery locations, but CE should not depend on them until the layout is smoke-tested locally with OpenCode.
## Tools
- OpenCode ships with built-in tools, and permissions determine whether each tool runs automatically, requires approval, or is denied. citeturn1search3turn1search0
- Tools are enabled by default; permissions provide the gating mechanism. citeturn1search3
- OpenCode ships with built-in tools, and permissions determine whether each tool runs automatically, requires approval, or is denied.
- Tools are enabled by default; permissions provide the gating mechanism.
## Permissions
- Permissions resolve to `allow`, `ask`, or `deny` and can be configured globally or per tool, with pattern-based rules. citeturn1search0
- Defaults are permissive, with special cases such as `.env` file reads. citeturn1search0
- Agent-level permissions override the global permission block. citeturn1search1turn1search0
- Permissions resolve to `allow`, `ask`, or `deny` and can be configured globally or per tool, with pattern-based rules.
- Defaults are permissive, with special cases such as `.env` file reads.
- Agent-level permissions override the global permission block.
## Agents
- Agents can be configured in `opencode.json` or as markdown files in `~/.config/opencode/agents/` or `.opencode/agents/`. citeturn1search1turn10view0
- Agent config supports `mode`, `model`, `temperature`, `tools`, and `permission`, and agent configs override global settings. citeturn1search1
- Model IDs use the `provider/model-id` format. citeturn1search1
- Agents can be configured in `opencode.json` or as markdown files in `~/.config/opencode/agents/` or `.opencode/agents/`.
- Agent config supports `mode`, `model`, `variant`, `temperature`, `top_p`, `hidden`, `steps`, `options`, `permission`, and other schema fields. `tools` still exists but is deprecated.
- `mode` can be `primary`, `subagent`, or `all`; omitted mode defaults to `all`.
- `hidden: true` hides subagents from the `@` autocomplete menu.
- `permission.task` controls which subagents an agent may invoke.
- Model IDs use the `provider/model-id` format.
## Skills
- Skills are reusable `SKILL.md` definitions loaded on demand through OpenCode's native `skill` tool.
- OpenCode searches direct child skill directories in its built-in roots:
- `.opencode/skills/<name>/SKILL.md`
- `~/.config/opencode/skills/<name>/SKILL.md`
- `.claude/skills/<name>/SKILL.md`
- `~/.claude/skills/<name>/SKILL.md`
- `.agents/skills/<name>/SKILL.md`
- `~/.agents/skills/<name>/SKILL.md`
- The config schema also exposes `skills.paths` and `skills.urls` for extra skill sources. Do not switch CE to those until tested against a local OpenCode install; direct `~/.config/opencode/skills/<name>/SKILL.md` remains the stable writer shape.
- Skill frontmatter recognizes `name`, `description`, `license`, `compatibility`, and `metadata`; unknown fields are ignored.
- Skill names must be lowercase alphanumeric with single hyphen separators and must match the directory name.
## Commands
- Commands can be configured in `opencode.json` or as Markdown files in `~/.config/opencode/commands/` or `.opencode/commands/`.
- Markdown command frontmatter can include fields such as `description`, `agent`, `model`, and `subtask`; the body becomes the prompt template.
- If a command targets an agent whose mode is `subagent`, OpenCode invokes it as a subagent by default. `subtask: true` can force subagent invocation.
## Plugins and events
- Local plugins are loaded from `.opencode/plugin/` (project) and `~/.config/opencode/plugin/` (global). npm plugins can be listed in `plugin` in `opencode.json`. citeturn1search2
- Plugins are loaded in a defined order across config and plugin directories. citeturn1search2
- Plugins export a function that returns a map of event handlers; the plugins doc lists supported event categories. citeturn1search2
- Local plugins are loaded from `.opencode/plugins/` and `~/.config/opencode/plugins/`. npm plugins can be listed in `plugin` in `opencode.json`.
- Plugins are JavaScript/TypeScript modules. Each exported plugin function receives OpenCode context and returns hooks/event handlers.
- Local plugins and custom tools can use npm dependencies declared in a `package.json` in the OpenCode config directory; OpenCode runs `bun install` at startup.
## Notes for this repository
- Config docs describe plural subdirectory names, while the plugins doc uses `.opencode/plugin/`. This implies singular paths remain accepted for backwards compatibility, but plural paths are the canonical structure. citeturn10view0turn1search2
- The current documented global CE install root should stay `~/.config/opencode`, not `~/.agents`, to avoid conflicts with harnesses that also read `~/.agents`.
- The current CE writer shape is still appropriate in April 2026:
- `~/.config/opencode/opencode.json`
- `~/.config/opencode/agents/*.md`
- `~/.config/opencode/commands/*.md` only when a source plugin ships commands
- `~/.config/opencode/plugins/*.ts`
- `~/.config/opencode/skills/*/SKILL.md`
- OpenCode's plugin system is useful for JS/TS hooks and custom tools, but current docs do not describe a native marketplace command that consumes CE's `.claude-plugin/marketplace.json` and installs the full skills/agents/commands payload.
- Keep the custom Bun writer until OpenCode documents a native distribution path for packaged skills and agents.
- The `compound-engineering` plugin currently emits skills and subagent Markdown files for OpenCode. It should not emit deprecated `tools` config; permission config is enough for non-default permission modes.

View File

@@ -1,477 +0,0 @@
# Windsurf Editor Global Configuration Guide
> **Purpose**: Technical reference for programmatically creating and managing Windsurf's global Skills, Workflows, and Rules.
>
> **Source**: Official Windsurf documentation at [docs.windsurf.com](https://docs.windsurf.com) + local file analysis.
>
> **Last Updated**: February 2026
---
## Table of Contents
1. [Overview](#overview)
2. [Base Directory Structure](#base-directory-structure)
3. [Skills](#skills)
4. [Workflows](#workflows)
5. [Rules](#rules)
6. [Memories](#memories)
7. [System-Level Configuration (Enterprise)](#system-level-configuration-enterprise)
8. [Programmatic Creation Reference](#programmatic-creation-reference)
9. [Best Practices](#best-practices)
---
## Overview
Windsurf provides three main customization mechanisms:
| Feature | Purpose | Invocation |
|---------|---------|------------|
| **Skills** | Complex multi-step tasks with supporting resources | Automatic (progressive disclosure) or `@skill-name` |
| **Workflows** | Reusable step-by-step procedures | Slash command `/workflow-name` |
| **Rules** | Behavioral guidelines and preferences | Trigger-based (always-on, glob, manual, or model decision) |
All three support both **workspace-level** (project-specific) and **global** (user-wide) scopes.
---
## Base Directory Structure
### Global Configuration Root
| OS | Path |
|----|------|
| **Windows** | `C:\Users\{USERNAME}\.codeium\windsurf\` |
| **macOS** | `~/.codeium/windsurf/` |
| **Linux** | `~/.codeium/windsurf/` |
### Directory Layout
```
~/.codeium/windsurf/
├── skills/ # Global skills (directories)
│ └── {skill-name}/
│ └── SKILL.md
├── global_workflows/ # Global workflows (flat .md files)
│ └── {workflow-name}.md
├── rules/ # Global rules (flat .md files)
│ └── {rule-name}.md
├── memories/
│ ├── global_rules.md # Always-on global rules (plain text)
│ └── *.pb # Auto-generated memories (protobuf)
├── mcp_config.json # MCP server configuration
└── user_settings.pb # User settings (protobuf)
```
---
## Skills
Skills bundle instructions with supporting resources for complex, multi-step tasks. Cascade uses **progressive disclosure** to automatically invoke skills when relevant.
### Storage Locations
| Scope | Location |
|-------|----------|
| **Global** | `~/.codeium/windsurf/skills/{skill-name}/SKILL.md` |
| **Workspace** | `.windsurf/skills/{skill-name}/SKILL.md` |
### Directory Structure
Each skill is a **directory** (not a single file) containing:
```
{skill-name}/
├── SKILL.md # Required: Main skill definition
├── references/ # Optional: Reference documentation
├── assets/ # Optional: Images, diagrams, etc.
├── scripts/ # Optional: Helper scripts
└── {any-other-files} # Optional: Templates, configs, etc.
```
### SKILL.md Format
```markdown
---
name: skill-name
description: Brief description shown to model to help it decide when to invoke the skill
---
# Skill Title
Instructions for the skill go here in markdown format.
## Section 1
Step-by-step guidance...
## Section 2
Reference supporting files using relative paths:
- See [deployment-checklist.md](./deployment-checklist.md)
- Run script: [deploy.sh](./scripts/deploy.sh)
```
### Required YAML Frontmatter Fields
| Field | Required | Description |
|-------|----------|-------------|
| `name` | **Yes** | Unique identifier (lowercase letters, numbers, hyphens only). Must match directory name. |
| `description` | **Yes** | Explains what the skill does and when to use it. Critical for automatic invocation. |
### Naming Convention
- Use **lowercase-kebab-case**: `deploy-to-staging`, `code-review`, `setup-dev-environment`
- Name must match the directory name exactly
### Invocation Methods
1. **Automatic**: Cascade automatically invokes when request matches skill description
2. **Manual**: Type `@skill-name` in Cascade input
### Example: Complete Skill
```
~/.codeium/windsurf/skills/deploy-to-production/
├── SKILL.md
├── deployment-checklist.md
├── rollback-procedure.md
└── config-template.yaml
```
**SKILL.md:**
```markdown
---
name: deploy-to-production
description: Guides the deployment process to production with safety checks. Use when deploying to prod, releasing, or pushing to production environment.
---
## Pre-deployment Checklist
1. Run all tests
2. Check for uncommitted changes
3. Verify environment variables
## Deployment Steps
Follow these steps to deploy safely...
See [deployment-checklist.md](./deployment-checklist.md) for full checklist.
See [rollback-procedure.md](./rollback-procedure.md) if issues occur.
```
---
## Workflows
Workflows define step-by-step procedures invoked via slash commands. They guide Cascade through repetitive tasks.
### Storage Locations
| Scope | Location |
|-------|----------|
| **Global** | `~/.codeium/windsurf/global_workflows/{workflow-name}.md` |
| **Workspace** | `.windsurf/workflows/{workflow-name}.md` |
### File Format
Workflows are **single markdown files** (not directories):
```markdown
---
description: Short description of what the workflow does
---
# Workflow Title
> Arguments: [optional arguments description]
Step-by-step instructions in markdown.
1. First step
2. Second step
3. Third step
```
### Required YAML Frontmatter Fields
| Field | Required | Description |
|-------|----------|-------------|
| `description` | **Yes** | Short title/description shown in UI |
### Invocation
- Slash command: `/workflow-name`
- Filename becomes the command (e.g., `deploy.md``/deploy`)
### Constraints
- **Character limit**: 12,000 characters per workflow file
- Workflows can call other workflows: Include instructions like "Call `/other-workflow`"
### Example: Complete Workflow
**File**: `~/.codeium/windsurf/global_workflows/address-pr-comments.md`
```markdown
---
description: Address all PR review comments systematically
---
# Address PR Comments
> Arguments: [PR number]
1. Check out the PR branch: `gh pr checkout [id]`
2. Get comments on PR:
```bash
gh api --paginate repos/[owner]/[repo]/pulls/[id]/comments | jq '.[] | {user: .user.login, body, path, line}'
```
3. For EACH comment:
a. Print: "(index). From [user] on [file]:[lines] — [body]"
b. Analyze the file and line range
c. If unclear, ask for clarification
d. Make the change before moving to next comment
4. Summarize what was done and which comments need attention
```
---
## Rules
Rules provide persistent behavioral guidelines that influence how Cascade responds.
### Storage Locations
| Scope | Location |
|-------|----------|
| **Global** | `~/.codeium/windsurf/rules/{rule-name}.md` |
| **Workspace** | `.windsurf/rules/{rule-name}.md` |
### File Format
Rules are **single markdown files**:
```markdown
---
description: When to use this rule
trigger: activation_mode
globs: ["*.py", "src/**/*.ts"]
---
Rule instructions in markdown format.
- Guideline 1
- Guideline 2
- Guideline 3
```
### YAML Frontmatter Fields
| Field | Required | Description |
|-------|----------|-------------|
| `description` | **Yes** | Describes when to use the rule |
| `trigger` | Optional | Activation mode (see below) |
| `globs` | Optional | File patterns for glob trigger |
### Activation Modes (trigger field)
| Mode | Value | Description |
|------|-------|-------------|
| **Manual** | `manual` | Activated via `@mention` in Cascade input |
| **Always On** | `always` | Always applied to every conversation |
| **Model Decision** | `model_decision` | Model decides based on description |
| **Glob** | `glob` | Applied when working with files matching pattern |
### Constraints
- **Character limit**: 12,000 characters per rule file
### Example: Complete Rule
**File**: `~/.codeium/windsurf/rules/python-style.md`
```markdown
---
description: Python coding standards and style guidelines. Use when writing or reviewing Python code.
trigger: glob
globs: ["*.py", "**/*.py"]
---
# Python Coding Guidelines
- Use type hints for all function parameters and return values
- Follow PEP 8 style guide
- Use early returns when possible
- Always add docstrings to public functions and classes
- Prefer f-strings over .format() or % formatting
- Use pathlib instead of os.path for file operations
```
---
## Memories
### Global Rules (Always-On)
**Location**: `~/.codeium/windsurf/memories/global_rules.md`
This is a special file for rules that **always apply** to all conversations. Unlike rules in the `rules/` directory, this file:
- Does **not** require YAML frontmatter
- Is plain text/markdown
- Is always active (no trigger configuration)
**Format:**
```markdown
Plain text rules that always apply to all conversations.
- Rule 1
- Rule 2
- Rule 3
```
### Auto-Generated Memories
Cascade automatically creates memories during conversations, stored as `.pb` (protobuf) files in `~/.codeium/windsurf/memories/`. These are managed by Windsurf and should not be manually edited.
---
## System-Level Configuration (Enterprise)
Enterprise organizations can deploy system-level configurations that apply globally and cannot be modified by end users.
### System-Level Paths
| Type | Windows | macOS | Linux/WSL |
|------|---------|-------|-----------|
| **Rules** | `C:\ProgramData\Windsurf\rules\*.md` | `/Library/Application Support/Windsurf/rules/*.md` | `/etc/windsurf/rules/*.md` |
| **Workflows** | `C:\ProgramData\Windsurf\workflows\*.md` | `/Library/Application Support/Windsurf/workflows/*.md` | `/etc/windsurf/workflows/*.md` |
### Precedence Order
When items with the same name exist at multiple levels:
1. **System** (highest priority) - Organization-wide, deployed by IT
2. **Workspace** - Project-specific in `.windsurf/`
3. **Global** - User-defined in `~/.codeium/windsurf/`
4. **Built-in** - Default items provided by Windsurf
---
## Programmatic Creation Reference
### Quick Reference Table
| Type | Path Pattern | Format | Key Fields |
|------|--------------|--------|------------|
| **Skill** | `skills/{name}/SKILL.md` | YAML frontmatter + markdown | `name`, `description` |
| **Workflow** | `global_workflows/{name}.md` (global) or `workflows/{name}.md` (workspace) | YAML frontmatter + markdown | `description` |
| **Rule** | `rules/{name}.md` | YAML frontmatter + markdown | `description`, `trigger`, `globs` |
| **Global Rules** | `memories/global_rules.md` | Plain text/markdown | None |
### Minimal Templates
#### Skill (SKILL.md)
```markdown
---
name: my-skill
description: What this skill does and when to use it
---
Instructions here.
```
#### Workflow
```markdown
---
description: What this workflow does
---
1. Step one
2. Step two
```
#### Rule
```markdown
---
description: When this rule applies
trigger: model_decision
---
- Guideline one
- Guideline two
```
### Validation Checklist
When programmatically creating items:
- [ ] **Skills**: Directory exists with `SKILL.md` inside
- [ ] **Skills**: `name` field matches directory name exactly
- [ ] **Skills**: Name uses only lowercase letters, numbers, hyphens
- [ ] **Workflows/Rules**: File is `.md` extension
- [ ] **All**: YAML frontmatter uses `---` delimiters
- [ ] **All**: `description` field is present and meaningful
- [ ] **All**: File size under 12,000 characters (workflows/rules)
---
## Best Practices
### Writing Effective Descriptions
The `description` field is critical for automatic invocation. Be specific:
**Good:**
```yaml
description: Guides deployment to staging environment with pre-flight checks. Use when deploying to staging, testing releases, or preparing for production.
```
**Bad:**
```yaml
description: Deployment stuff
```
### Formatting Guidelines
- Use bullet points and numbered lists (easier for Cascade to follow)
- Use markdown headers to organize sections
- Keep rules concise and specific
- Avoid generic rules like "write good code" (already built-in)
### XML Tags for Grouping
XML tags can effectively group related rules:
```markdown
<coding_guidelines>
- Use early returns when possible
- Always add documentation for new functions
- Prefer composition over inheritance
</coding_guidelines>
<testing_requirements>
- Write unit tests for all public methods
- Maintain 80% code coverage
</testing_requirements>
```
### Skills vs Rules vs Workflows
| Use Case | Recommended |
|----------|-------------|
| Multi-step procedure with supporting files | **Skill** |
| Repeatable CLI/automation sequence | **Workflow** |
| Coding style preferences | **Rule** |
| Project conventions | **Rule** |
| Deployment procedure | **Skill** or **Workflow** |
| Code review checklist | **Skill** |
---
## Additional Resources
- **Official Documentation**: [docs.windsurf.com](https://docs.windsurf.com)
- **Skills Specification**: [agentskills.io](https://agentskills.io/home)
- **Rule Templates**: [windsurf.com/editor/directory](https://windsurf.com/editor/directory)

View File

@@ -17,7 +17,7 @@ After installing, run `/ce-setup` in any project. It diagnoses your environment,
### Core Workflow
The primary entry points for engineering work, invoked as slash commands:
The primary entry points for engineering work are skills invoked with slash syntax:
| Skill | Description |
|-------|-------------|

708
src/commands/cleanup.ts Normal file
View File

@@ -0,0 +1,708 @@
import { defineCommand } from "citty"
import fs from "fs/promises"
import os from "os"
import path from "path"
import { fileURLToPath } from "url"
import { loadClaudePlugin } from "../parsers/claude"
import { convertClaudeToCodex } from "../converters/claude-to-codex"
import { convertClaudeToCopilot } from "../converters/claude-to-copilot"
import { convertClaudeToDroid } from "../converters/claude-to-droid"
import { convertClaudeToGemini } from "../converters/claude-to-gemini"
import { convertClaudeToKiro } from "../converters/claude-to-kiro"
import { convertClaudeToOpenCode } from "../converters/claude-to-opencode"
import { convertClaudeToPi } from "../converters/claude-to-pi"
import {
getLegacyCodexArtifacts,
getLegacyCopilotArtifacts,
getLegacyDroidArtifacts,
getLegacyGeminiArtifacts,
getLegacyKiroArtifacts,
getLegacyOpenCodeArtifacts,
getLegacyPiArtifacts,
getLegacyPluginArtifacts,
getLegacyWindsurfArtifacts,
} from "../data/plugin-legacy-artifacts"
import { moveLegacyArtifactToBackup } from "../targets/managed-artifacts"
import { isManagedCodexAgentsSymlink, readCodexInstallManifest, resolveCodexManagedRoots } from "../targets/codex"
import { isSafeManagedPath, pathExists, readJson, sanitizePathName } from "../utils/files"
import { resolveOpenCodeGlobalRoot } from "../utils/opencode-config"
import { expandHome, resolveTargetHome } from "../utils/resolve-home"
const cleanupTargets = ["codex", "opencode", "pi", "gemini", "kiro", "copilot", "droid", "qwen", "windsurf"] as const
type CleanupTarget = typeof cleanupTargets[number]
type CleanupResult = {
target: CleanupTarget
root: string
moved: number
}
export default defineCommand({
meta: {
name: "cleanup",
description: "Back up stale compound-engineering artifacts from previous installs",
},
args: {
plugin: {
type: "positional",
required: false,
description: "Plugin name or local plugin path (default: compound-engineering)",
},
target: {
type: "string",
default: "all",
description: "Target to clean: codex | opencode | pi | gemini | kiro | copilot | droid | qwen | windsurf | all",
},
output: {
type: "string",
alias: "o",
description: "Workspace/project root for workspace-scoped legacy installs",
},
codexHome: {
type: "string",
alias: "codex-home",
description: "Codex root to clean (default: ~/.codex)",
},
piHome: {
type: "string",
alias: "pi-home",
description: "Pi root to clean (default: ~/.pi/agent)",
},
opencodeHome: {
type: "string",
alias: "opencode-home",
description: "OpenCode root to clean (default: $OPENCODE_CONFIG_DIR or ~/.config/opencode)",
},
geminiHome: {
type: "string",
alias: "gemini-home",
description: "Gemini root to clean (default: ~/.gemini)",
},
kiroHome: {
type: "string",
alias: "kiro-home",
description: "Kiro root to clean (default: ./.kiro)",
},
copilotHome: {
type: "string",
alias: "copilot-home",
description: "Copilot root to clean (default: ~/.copilot)",
},
droidHome: {
type: "string",
alias: "droid-home",
description: "Droid root to clean (default: ~/.factory)",
},
qwenHome: {
type: "string",
alias: "qwen-home",
description: "Qwen root to clean for legacy Bun installs (default: ~/.qwen)",
},
windsurfHome: {
type: "string",
alias: "windsurf-home",
description: "Deprecated Windsurf root to clean (default: ~/.codeium/windsurf)",
},
agentsHome: {
type: "string",
alias: "agents-home",
description: "Shared .agents root to clean for shadowing skills (default: ~/.agents)",
},
},
async run({ args }) {
const pluginPath = await resolveCleanupPluginPath(args.plugin ? String(args.plugin) : "compound-engineering")
const plugin = await loadClaudePlugin(pluginPath)
if (plugin.manifest.name !== "compound-engineering") {
throw new Error("Cleanup currently supports only the compound-engineering plugin.")
}
const targetNames = resolveCleanupTargets(String(args.target))
const outputRoot = resolveWorkspaceRoot(args.output)
const hasExplicitGeminiHome = hasExplicitValue(args.geminiHome)
const hasExplicitOpenCodeHome = hasExplicitValue(args.opencodeHome)
const roots = {
codexHome: resolveTargetHome(args.codexHome, path.join(os.homedir(), ".codex")),
piHome: resolveTargetHome(args.piHome, path.join(os.homedir(), ".pi", "agent")),
// Mirror install: respect OPENCODE_CONFIG_DIR before falling back to the
// XDG default so cleanup scans the same directory install wrote to.
opencodeHome: resolveTargetHome(args.opencodeHome, resolveOpenCodeGlobalRoot()),
geminiHome: resolveTargetHome(args.geminiHome, path.join(os.homedir(), ".gemini")),
kiroHome: resolveTargetHome(args.kiroHome, path.join(outputRoot, ".kiro")),
copilotHome: resolveTargetHome(args.copilotHome, path.join(os.homedir(), ".copilot")),
droidHome: resolveTargetHome(args.droidHome, path.join(os.homedir(), ".factory")),
qwenHome: resolveTargetHome(args.qwenHome, path.join(os.homedir(), ".qwen")),
windsurfHome: resolveTargetHome(args.windsurfHome, path.join(os.homedir(), ".codeium", "windsurf")),
agentsHome: resolveTargetHome(args.agentsHome, path.join(os.homedir(), ".agents")),
workspaceRoot: outputRoot,
hasExplicitOutput: hasExplicitValue(args.output),
hasExplicitGeminiHome,
hasExplicitOpenCodeHome,
}
const results: CleanupResult[] = []
for (const target of targetNames) {
results.push(...await cleanupTarget(target, plugin, roots))
}
const total = results.reduce((sum, result) => sum + result.moved, 0)
for (const result of results) {
console.log(`Cleaned ${result.target} at ${result.root}: backed up ${result.moved} artifact(s)`)
}
console.log(`Cleanup complete for ${plugin.manifest.name}: backed up ${total} artifact(s).`)
},
})
async function cleanupTarget(
target: CleanupTarget,
plugin: Awaited<ReturnType<typeof loadClaudePlugin>>,
roots: {
codexHome: string
piHome: string
opencodeHome: string
geminiHome: string
kiroHome: string
copilotHome: string
droidHome: string
qwenHome: string
windsurfHome: string
agentsHome: string
workspaceRoot: string
hasExplicitOutput: boolean
hasExplicitGeminiHome: boolean
hasExplicitOpenCodeHome: boolean
},
): Promise<CleanupResult[]> {
switch (target) {
case "codex":
return [
await cleanupCodex(plugin, roots.codexHome),
await cleanupCodexSharedAgents(plugin, roots.agentsHome, roots.codexHome),
]
case "opencode": {
// Mirror install: when `--output <workspace>` is passed (without an
// explicit `--opencode-home`), install writes managed artifacts under
// `<workspace>/.opencode/{agents,skills,commands,plugins}`. Cleanup must
// scan the same directory or stale workspace artifacts get left behind.
// An explicit `--opencode-home` remains authoritative so users can still
// target a specific global-style root. When neither is set, fall back to
// the OpenCode global root (OPENCODE_CONFIG_DIR / XDG default).
if (roots.hasExplicitOpenCodeHome) {
return [await cleanupOpenCode(plugin, roots.opencodeHome)]
}
if (roots.hasExplicitOutput) {
return [await cleanupOpenCode(plugin, resolveOpenCodeWorkspaceRoot(roots.workspaceRoot))]
}
return [await cleanupOpenCode(plugin, roots.opencodeHome)]
}
case "pi":
return [await cleanupPi(plugin, roots.piHome)]
case "gemini": {
// `install`/`convert` write Gemini output to `<cwd>/.gemini` by default
// (see `resolveTargetOutputRoot`), so cleanup must scan the workspace
// root in the same default flow. When neither `--gemini-home` nor
// `--output` is set, also scan `~/.gemini` as a safety net for users
// who installed to the home-scoped location with an older CLI.
if (roots.hasExplicitGeminiHome) {
return [await cleanupGemini(plugin, roots.geminiHome)]
}
const workspaceGemini = resolveGeminiWorkspaceRoot(roots.workspaceRoot)
if (roots.hasExplicitOutput) {
return [await cleanupGemini(plugin, workspaceGemini)]
}
// Deduplicate before launching parallel cleanups: when cwd === $HOME,
// `<cwd>/.gemini` and `~/.gemini` resolve to the same directory and two
// concurrent passes would race on renames into legacy-backup, producing
// intermittent ENOENT failures. `process.cwd()` and `os.homedir()` are
// already absolute, and `path.join` (inside `resolveGeminiWorkspaceRoot`
// and `resolveTargetHome`) normalizes the result, so string equality on
// the post-resolve paths is sufficient.
const rootsToClean = await dedupeRoots([workspaceGemini, roots.geminiHome])
return await Promise.all(rootsToClean.map((root) => cleanupGemini(plugin, root)))
}
case "kiro":
return [await cleanupKiro(plugin, roots.kiroHome)]
case "copilot": {
// Same race-prevention as Gemini: if a user points `--copilot-home`,
// `--output`, or `--agents-home` at the same directory these parallel
// passes collide on renames. Default values are distinct so the dedup
// is mostly defensive, but keep the shape consistent across targets
// that fan out with `Promise.all`.
const rootsToClean = roots.hasExplicitOutput
? [resolveCopilotWorkspaceRoot(roots.workspaceRoot)]
: await dedupeRoots([roots.copilotHome, resolveCopilotWorkspaceRoot(roots.workspaceRoot), roots.agentsHome])
return await Promise.all(rootsToClean.map((root) => cleanupCopilot(plugin, root)))
}
case "droid":
return [await cleanupDroid(plugin, roots.hasExplicitOutput ? resolveDroidWorkspaceRoot(roots.workspaceRoot) : roots.droidHome)]
case "qwen":
return [await cleanupQwen(plugin, roots.qwenHome)]
case "windsurf": {
// Same race-prevention as Gemini/Copilot: dedup after path resolution
// so overlapping overrides can't produce concurrent renames on the
// same directory.
const rootsToClean = roots.hasExplicitOutput
? [resolveWindsurfWorkspaceRoot(roots.workspaceRoot)]
: await dedupeRoots([roots.windsurfHome, resolveWindsurfWorkspaceRoot(roots.workspaceRoot)])
return await Promise.all(rootsToClean.map((root) => cleanupWindsurf(plugin, root)))
}
}
}
async function cleanupCodex(plugin: Awaited<ReturnType<typeof loadClaudePlugin>>, codexRoot: string): Promise<CleanupResult> {
const bundle = convertClaudeToCodex(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
const artifacts = getLegacyCodexArtifacts(bundle)
const currentNamespacedSkills = new Set([
...bundle.skillDirs.map((skill) => sanitizePathName(skill.name)),
...bundle.generatedSkills.map((skill) => sanitizePathName(skill.name)),
])
const currentPrompts = new Set(bundle.prompts.map((prompt) => `${sanitizePathName(prompt.name)}.md`))
const currentAgents = new Set((bundle.agents ?? []).map((agent) => `${sanitizePathName(agent.name)}.toml`))
const managedDir = path.join(codexRoot, plugin.manifest.name)
let moved = 0
for (const skillName of artifacts.skills) {
moved += await moveIfExists(managedDir, "skills", path.join(codexRoot, "skills"), skillName, "Codex")
if (!currentNamespacedSkills.has(skillName)) {
moved += await moveIfExists(
managedDir,
"skills",
path.join(codexRoot, "skills", plugin.manifest.name),
skillName,
"Codex",
)
}
}
for (const promptFile of artifacts.prompts) {
moved += await moveIfExists(managedDir, "prompts", path.join(codexRoot, "prompts"), promptFile, "Codex")
}
// Manifest-driven migration: read the previous install's manifest and
// migrate any entries that are no longer in the current bundle. This
// catches artifacts whose *type or emission format* has changed between
// CE versions (e.g., agents that were previously emitted as generated
// skills under `skills/<plugin>/<agent-name>/` but are now emitted as
// TOML custom agents under `agents/<plugin>/<name>.toml`). The historical
// allow-list only covers renamed/removed names — it does not cover
// current-named artifacts that moved locations.
const installedManifest = await readCodexInstallManifest(codexRoot, plugin.manifest.name)
if (installedManifest) {
for (const skillName of installedManifest.skills) {
if (currentNamespacedSkills.has(skillName)) continue
moved += await moveIfExists(
managedDir,
"skills",
path.join(codexRoot, "skills", plugin.manifest.name),
skillName,
"Codex",
)
}
for (const promptFile of installedManifest.prompts) {
if (currentPrompts.has(promptFile)) continue
moved += await moveIfExists(managedDir, "prompts", path.join(codexRoot, "prompts"), promptFile, "Codex")
}
for (const agentFile of installedManifest.agents) {
if (currentAgents.has(agentFile)) continue
moved += await moveIfExists(
managedDir,
"agents",
path.join(codexRoot, "agents", plugin.manifest.name),
agentFile,
"Codex",
)
}
}
return { target: "codex", root: codexRoot, moved }
}
async function cleanupCodexSharedAgents(
plugin: Awaited<ReturnType<typeof loadClaudePlugin>>,
agentsRoot: string,
codexRoot: string,
): Promise<CleanupResult> {
// Ownership check: `~/.agents/skills/` is a cross-plugin shared store, so a
// name collision alone is not a strong enough signal to move an entry. CE
// only ever emitted symlinks into this tree pointing at skill directories
// inside its own Codex install root, so we restrict cleanup to symlinks
// whose resolved target lives inside a CE-managed Codex root. Plain files
// or directories at colliding names are user-authored by definition and
// left alone; symlinks pointing elsewhere (another plugin, a user's own
// skill checkout) are similarly skipped. Mirrors
// `cleanupLegacyAgentsSkillSymlinks` in `src/targets/codex.ts`, which uses
// the same ownership gate at install time.
const bundle = convertClaudeToCodex(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
const artifacts = getLegacyCodexArtifacts(bundle)
const managedDir = path.join(agentsRoot, "compound-engineering")
const agentsSkillsDir = path.join(agentsRoot, "skills")
const managedRoots = await resolveCodexManagedRoots(codexRoot, plugin.manifest.name)
let moved = 0
for (const skillName of artifacts.skills) {
moved += await moveIfSymlinkManaged(
managedDir,
"skills",
agentsSkillsDir,
skillName,
".agents",
managedRoots,
)
}
return { target: "codex", root: agentsRoot, moved }
}
async function moveIfSymlinkManaged(
managedDir: string,
kind: string,
artifactRoot: string,
relativePath: string,
label: string,
managedRoots: string[],
): Promise<number> {
// Defense in depth — same guard as `moveIfExists`: even though legacy
// allow-list names are safe by construction, re-check the join so a future
// caller can't issue an out-of-tree rename via `moveLegacyArtifactToBackup`.
if (!isSafeManagedPath(artifactRoot, relativePath)) return 0
const artifactPath = path.join(artifactRoot, ...relativePath.split("/"))
if (!(await isManagedCodexAgentsSymlink(artifactPath, managedRoots))) return 0
await moveLegacyArtifactToBackup(managedDir, kind, artifactRoot, relativePath, label)
return 1
}
async function cleanupOpenCode(plugin: Awaited<ReturnType<typeof loadClaudePlugin>>, opencodeRoot: string): Promise<CleanupResult> {
const bundle = convertClaudeToOpenCode(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
const artifacts = getLegacyOpenCodeArtifacts(bundle)
const managedDir = path.join(opencodeRoot, "compound-engineering")
let moved = 0
for (const skillName of artifacts.skills) {
moved += await moveIfExists(managedDir, "skills", path.join(opencodeRoot, "skills"), skillName, "OpenCode")
}
for (const agentPath of artifacts.agents) {
moved += await moveIfExists(managedDir, "agents", path.join(opencodeRoot, "agents"), agentPath, "OpenCode")
}
for (const commandPath of artifacts.commands) {
moved += await moveIfExists(managedDir, "commands", path.join(opencodeRoot, "commands"), commandPath, "OpenCode")
}
return { target: "opencode", root: opencodeRoot, moved }
}
async function cleanupPi(plugin: Awaited<ReturnType<typeof loadClaudePlugin>>, piRoot: string): Promise<CleanupResult> {
const bundle = convertClaudeToPi(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
const artifacts = getLegacyPiArtifacts(bundle)
const managedDir = path.join(piRoot, "compound-engineering")
let moved = 0
for (const skillName of artifacts.skills) {
moved += await moveIfExists(managedDir, "skills", path.join(piRoot, "skills"), skillName, "Pi")
}
for (const promptFile of artifacts.prompts) {
moved += await moveIfExists(managedDir, "prompts", path.join(piRoot, "prompts"), promptFile, "Pi")
}
return { target: "pi", root: piRoot, moved }
}
async function cleanupGemini(plugin: Awaited<ReturnType<typeof loadClaudePlugin>>, geminiRoot: string): Promise<CleanupResult> {
const bundle = convertClaudeToGemini(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
const artifacts = getLegacyGeminiArtifacts(bundle)
const managedDir = path.join(geminiRoot, "compound-engineering")
let moved = 0
for (const skillName of artifacts.skills) {
moved += await moveIfExists(managedDir, "skills", path.join(geminiRoot, "skills"), skillName, "Gemini")
}
for (const agentPath of artifacts.agents) {
moved += await moveIfExists(managedDir, "agents", path.join(geminiRoot, "agents"), agentPath, "Gemini")
}
for (const commandPath of artifacts.commands) {
moved += await moveIfExists(managedDir, "commands", path.join(geminiRoot, "commands"), commandPath, "Gemini")
}
return { target: "gemini", root: geminiRoot, moved }
}
async function cleanupKiro(plugin: Awaited<ReturnType<typeof loadClaudePlugin>>, kiroRoot: string): Promise<CleanupResult> {
const bundle = convertClaudeToKiro(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
const artifacts = getLegacyKiroArtifacts(bundle)
const skillNames = new Set([
...artifacts.skills,
...bundle.skillDirs.map((skill) => sanitizePathName(skill.name)),
...bundle.generatedSkills.map((skill) => sanitizePathName(skill.name)),
])
const agentNames = new Set([
...artifacts.agents,
...bundle.agents.map((agent) => sanitizePathName(agent.name)),
])
const managedDir = path.join(kiroRoot, "compound-engineering")
let moved = 0
for (const skillName of skillNames) {
moved += await moveIfExists(managedDir, "skills", path.join(kiroRoot, "skills"), skillName, "Kiro")
}
for (const agentName of agentNames) {
moved += await moveIfExists(managedDir, "agents", path.join(kiroRoot, "agents"), `${agentName}.json`, "Kiro")
moved += await moveIfExists(managedDir, "agents", path.join(kiroRoot, "agents", "prompts"), `${agentName}.md`, "Kiro")
}
return { target: "kiro", root: kiroRoot, moved }
}
async function cleanupCopilot(plugin: Awaited<ReturnType<typeof loadClaudePlugin>>, copilotRoot: string): Promise<CleanupResult> {
// IMPORTANT: legacy detection for Copilot roots must be driven exclusively
// by the historical allow-list returned from `getLegacyCopilotArtifacts`
// (see EXTRA_LEGACY_ARTIFACTS_BY_PLUGIN). Mirrors the Codex/Droid/Windsurf
// cleanup fixes: seeding candidates from the current plugin bundle would
// sweep up user-authored files at workspace paths like
// `.github/skills/ce-plan/SKILL.md` or `.github/agents/<name>.agent.md` that
// happen to share a name with a current CE artifact but were never
// installed by this plugin. The Copilot writer has been removed — users now
// install via `copilot plugin install` — so this cleanup exists solely to
// back up stale files from past manual installs, which means the current
// bundle was never a valid candidate source.
const bundle = convertClaudeToCopilot(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
const artifacts = getLegacyCopilotArtifacts(bundle)
const managedDir = path.join(copilotRoot, "compound-engineering")
let moved = 0
for (const skillName of artifacts.skills) {
moved += await moveIfExists(managedDir, "skills", path.join(copilotRoot, "skills"), skillName, "Copilot")
}
for (const agentPath of artifacts.agents) {
moved += await moveIfExists(managedDir, "agents", path.join(copilotRoot, "agents"), agentPath, "Copilot")
}
return { target: "copilot", root: copilotRoot, moved }
}
async function cleanupDroid(plugin: Awaited<ReturnType<typeof loadClaudePlugin>>, droidRoot: string): Promise<CleanupResult> {
// IMPORTANT: legacy detection for `~/.factory/{skills,droids,commands}` must
// be driven exclusively by the historical allow-list returned from
// `getLegacyDroidArtifacts` (see EXTRA_LEGACY_ARTIFACTS_BY_PLUGIN). Mirrors
// the Codex cleanup fix: seeding candidates from the current plugin bundle
// would sweep up user-authored files at `~/.factory/commands/<name>.md`
// (or the skills/droids equivalents) that happen to share a name with a
// current CE artifact but were never installed by this plugin.
const bundle = convertClaudeToDroid(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
const artifacts = getLegacyDroidArtifacts(bundle)
const managedDir = path.join(droidRoot, "compound-engineering")
let moved = 0
for (const skillName of artifacts.skills) {
moved += await moveIfExists(managedDir, "skills", path.join(droidRoot, "skills"), skillName, "Droid")
}
for (const droidPath of artifacts.droids) {
moved += await moveIfExists(managedDir, "droids", path.join(droidRoot, "droids"), droidPath, "Droid")
}
for (const commandPath of artifacts.commands) {
moved += await moveIfExists(managedDir, "commands", path.join(droidRoot, "commands"), commandPath, "Droid")
}
return { target: "droid", root: droidRoot, moved }
}
async function cleanupQwen(plugin: Awaited<ReturnType<typeof loadClaudePlugin>>, qwenRoot: string): Promise<CleanupResult> {
// IMPORTANT: legacy detection for `~/.qwen/{skills,agents,commands}` must be
// driven exclusively by the historical allow-list in
// `EXTRA_LEGACY_ARTIFACTS_BY_PLUGIN`. Mirrors the Codex/Droid/Windsurf/
// Copilot cleanup fixes: the Bun-based Qwen writer was replaced by native
// `qwen extensions install`, so this cleanup exists solely to back up stale
// files from legacy manual installs. Seeding from the current plugin bundle
// (`plugin.skills`, `plugin.agents`, `plugin.commands`) would sweep up
// user-authored files at paths like `~/.qwen/skills/ce-debug/SKILL.md` or
// `~/.qwen/agents/ce-correctness-reviewer.md` that happen to share a name
// with a current CE artifact but were never installed by this plugin.
const managedDir = path.join(qwenRoot, plugin.manifest.name)
const extras = getLegacyPluginArtifacts(plugin.manifest.name)
const skillNames = new Set((extras.skills ?? []).map(sanitizePathName))
const agentNames = new Set((extras.agents ?? []).map(sanitizePathName))
// The old Bun-based Qwen writer wrote commands via `resolveCommandPath`,
// which split colon-namespaced names into nested directories (e.g.
// `compound:plan` -> `commands/compound/plan.md`). We also probe the flat
// sanitized form (`commands/compound-plan.md`) in case a historical install
// landed commands there. Both shapes need cleanup so stale files can't
// shadow native plugin commands after migration. Candidates come exclusively
// from the historical allow-list, not from the current plugin bundle.
const commandPaths = new Set<string>()
for (const name of extras.commands ?? []) {
commandPaths.add(`${sanitizePathName(name)}.md`)
if (name.includes(":")) {
commandPaths.add(`${name.split(":").join("/")}.md`)
}
}
let moved = 0
if (await isLegacyQwenExtensionInstall(qwenRoot, plugin.manifest.name)) {
moved += await moveIfExists(
managedDir,
"extensions",
path.join(qwenRoot, "extensions"),
plugin.manifest.name,
"Qwen",
)
}
for (const skillName of skillNames) {
moved += await moveIfExists(managedDir, "skills", path.join(qwenRoot, "skills"), skillName, "Qwen")
}
for (const agentName of agentNames) {
moved += await moveIfExists(managedDir, "agents", path.join(qwenRoot, "agents"), `${agentName}.yaml`, "Qwen")
moved += await moveIfExists(managedDir, "agents", path.join(qwenRoot, "agents"), `${agentName}.md`, "Qwen")
}
for (const commandPath of commandPaths) {
moved += await moveIfExists(managedDir, "commands", path.join(qwenRoot, "commands"), commandPath, "Qwen")
}
return { target: "qwen", root: qwenRoot, moved }
}
async function isLegacyQwenExtensionInstall(qwenRoot: string, pluginName: string): Promise<boolean> {
const configPath = path.join(qwenRoot, "extensions", pluginName, "qwen-extension.json")
if (!(await pathExists(configPath))) return false
try {
const config = await readJson<Record<string, unknown>>(configPath)
return "_compound_managed_mcp" in config || "_compound_managed_keys" in config
} catch {
return false
}
}
async function cleanupWindsurf(plugin: Awaited<ReturnType<typeof loadClaudePlugin>>, windsurfRoot: string): Promise<CleanupResult> {
const artifacts = getLegacyWindsurfArtifacts(plugin)
const managedDir = path.join(windsurfRoot, "compound-engineering")
let moved = 0
for (const skillName of artifacts.skills) {
moved += await moveIfExists(managedDir, "skills", path.join(windsurfRoot, "skills"), skillName, "Windsurf")
}
for (const workflowPath of artifacts.workflows) {
moved += await moveIfExists(managedDir, "global_workflows", path.join(windsurfRoot, "global_workflows"), workflowPath, "Windsurf")
moved += await moveIfExists(managedDir, "workflows", path.join(windsurfRoot, "workflows"), workflowPath, "Windsurf")
}
return { target: "windsurf", root: windsurfRoot, moved }
}
async function moveIfExists(
managedDir: string,
kind: string,
artifactRoot: string,
relativePath: string,
label: string,
): Promise<number> {
// Defense in depth: relativePath comes from either the historical legacy
// allow-list (safe by construction) or an install-manifest entry that
// `readManagedInstallManifest` / `readInstallManifest` already filtered.
// Re-check here so any future caller that skips the read layer cannot
// issue an out-of-tree rename via `moveLegacyArtifactToBackup`.
if (!isSafeManagedPath(artifactRoot, relativePath)) return 0
const artifactPath = path.join(artifactRoot, ...relativePath.split("/"))
if (!(await pathExists(artifactPath))) return 0
await moveLegacyArtifactToBackup(managedDir, kind, artifactRoot, relativePath, label)
return 1
}
function resolveCleanupTargets(targetArg: string): CleanupTarget[] {
if (targetArg === "all") return [...cleanupTargets]
const targets = targetArg.split(",").map((entry) => entry.trim()).filter(Boolean)
for (const target of targets) {
if (!cleanupTargets.includes(target as CleanupTarget)) {
throw new Error(`Unknown cleanup target: ${target}. Use one of: ${cleanupTargets.join(", ")}, all`)
}
}
return targets as CleanupTarget[]
}
async function resolveCleanupPluginPath(input: string): Promise<string> {
if (input.startsWith(".") || input.startsWith("/") || input.startsWith("~")) {
const expanded = expandHome(input)
const directPath = path.resolve(expanded)
if (await pathExists(directPath)) return directPath
throw new Error(`Local plugin path not found: ${directPath}`)
}
const bundledRoot = fileURLToPath(new URL("../../plugins/", import.meta.url))
const pluginPath = path.join(bundledRoot, input)
const manifestPath = path.join(pluginPath, ".claude-plugin", "plugin.json")
if (await pathExists(manifestPath)) return pluginPath
throw new Error(`Unknown bundled plugin: ${input}`)
}
function resolveWorkspaceRoot(value: unknown): string {
if (value && String(value).trim()) {
return path.resolve(expandHome(String(value).trim()))
}
return process.cwd()
}
function resolveCopilotWorkspaceRoot(outputRoot: string): string {
return path.basename(outputRoot) === ".github" ? outputRoot : path.join(outputRoot, ".github")
}
function resolveGeminiWorkspaceRoot(outputRoot: string): string {
return path.basename(outputRoot) === ".gemini" ? outputRoot : path.join(outputRoot, ".gemini")
}
function resolveOpenCodeWorkspaceRoot(outputRoot: string): string {
return path.basename(outputRoot) === ".opencode" ? outputRoot : path.join(outputRoot, ".opencode")
}
function hasExplicitValue(value: unknown): boolean {
return Boolean(value && String(value).trim())
}
async function dedupeRoots(roots: string[]): Promise<string[]> {
const seen = new Set<string>()
const result: string[] = []
for (const root of roots) {
// Resolve symlinks before comparing. Plain string equality is not enough
// on macOS where `$HOME` is typically `/Users/<name>` but `process.cwd()`
// on a directory under `/var/folders` resolves to `/private/var/folders`,
// and similar per-user tmpdir setups produce two strings that point at
// the same inode. Falling back to `path.normalize` on the raw string when
// the directory doesn't yet exist (e.g. the first `install` ever) keeps
// the pre-realpath behavior as a safety net.
const key = await resolveCanonicalPath(root)
if (seen.has(key)) continue
seen.add(key)
result.push(root)
}
return result
}
async function resolveCanonicalPath(target: string): Promise<string> {
const normalized = path.normalize(target)
try {
return await fs.realpath(normalized)
} catch {
// Directory does not exist yet — fall back to the normalized string. This
// is fine because a non-existent path has no filesystem aliases to race
// against.
return normalized
}
}
function resolveDroidWorkspaceRoot(outputRoot: string): string {
return path.basename(outputRoot) === ".factory" ? outputRoot : path.join(outputRoot, ".factory")
}
function resolveWindsurfWorkspaceRoot(outputRoot: string): string {
return path.basename(outputRoot) === ".windsurf" ? outputRoot : path.join(outputRoot, ".windsurf")
}

View File

@@ -6,7 +6,7 @@ import { targets, validateScope } from "../targets"
import type { ClaudeToOpenCodeOptions, PermissionMode } from "../converters/claude-to-opencode"
import { ensureCodexAgentsFile } from "../utils/codex-agents"
import { expandHome, resolveTargetHome } from "../utils/resolve-home"
import { resolveTargetOutputRoot } from "../utils/resolve-output"
import { resolveOpenCodeWriteScope, resolveTargetOutputRoot } from "../utils/resolve-output"
import { detectInstalledTools } from "../utils/detect-tools"
const permissionModes: PermissionMode[] = ["none", "broad", "from-commands"]
@@ -25,7 +25,7 @@ export default defineCommand({
to: {
type: "string",
default: "opencode",
description: "Target format (opencode | codex | droid | cursor | pi | copilot | gemini | kiro | windsurf | openclaw | qwen | all)",
description: "Target format (opencode | codex | pi | gemini | kiro | all)",
},
output: {
type: "string",
@@ -42,16 +42,6 @@ export default defineCommand({
alias: "pi-home",
description: "Write Pi output to this Pi root (ex: ~/.pi/agent or ./.pi)",
},
openclawHome: {
type: "string",
alias: "openclaw-home",
description: "Write OpenClaw output to this extensions root (ex: ~/.openclaw/extensions)",
},
qwenHome: {
type: "string",
alias: "qwen-home",
description: "Write Qwen output to this Qwen extensions root (ex: ~/.qwen/extensions)",
},
scope: {
type: "string",
description: "Scope level: global | workspace (default varies by target)",
@@ -89,8 +79,6 @@ export default defineCommand({
const hasExplicitOutput = Boolean(args.output && String(args.output).trim())
const codexHome = resolveTargetHome(args.codexHome, path.join(os.homedir(), ".codex"))
const piHome = resolveTargetHome(args.piHome, path.join(os.homedir(), ".pi", "agent"))
const openclawHome = resolveTargetHome(args.openclawHome, path.join(os.homedir(), ".openclaw", "extensions"))
const qwenHome = resolveTargetHome(args.qwenHome, path.join(os.homedir(), ".qwen", "extensions"))
const options: ClaudeToOpenCodeOptions = {
agentMode: String(args.agentMode) === "primary" ? "primary" : "subagent",
@@ -100,15 +88,19 @@ export default defineCommand({
if (targetName === "all") {
const detected = await detectInstalledTools()
const activeTargets = detected.filter((t) => t.detected)
const activeTargets = detected.filter((t) => t.detected && targets[t.name]?.implemented)
if (activeTargets.length === 0) {
console.log("No AI coding tools detected. Install at least one tool first.")
console.log("No installable AI coding tools detected. Use native plugin install for Claude Code, Copilot, Droid, and Qwen.")
return
}
console.log(`Detected ${activeTargets.length} tool(s):`)
console.log(`Detected ${activeTargets.length} installable tool(s):`)
for (const tool of detected) {
if (tool.detected && !targets[tool.name]?.implemented) {
console.log(` - ${tool.name} — native plugin install; skipped`)
continue
}
console.log(` ${tool.detected ? "✓" : "✗"} ${tool.name}${tool.reason}`)
}
@@ -128,12 +120,12 @@ export default defineCommand({
outputRoot,
codexHome,
piHome,
openclawHome,
qwenHome,
pluginName: plugin.manifest.name,
hasExplicitOutput,
})
await handler.write(root, bundle)
const writeScope =
tool.name === "opencode" ? resolveOpenCodeWriteScope(hasExplicitOutput, undefined) : undefined
await handler.write(root, bundle, writeScope)
console.log(`Converted ${plugin.manifest.name} to ${tool.name} at ${root}`)
}
@@ -159,8 +151,6 @@ export default defineCommand({
outputRoot,
codexHome,
piHome,
openclawHome,
qwenHome,
pluginName: plugin.manifest.name,
hasExplicitOutput,
scope: resolvedScope,
@@ -170,7 +160,9 @@ export default defineCommand({
throw new Error(`Target ${targetName} did not return a bundle.`)
}
await target.write(primaryOutputRoot, bundle, resolvedScope)
const effectiveScope =
targetName === "opencode" ? resolveOpenCodeWriteScope(hasExplicitOutput, resolvedScope) : resolvedScope
await target.write(primaryOutputRoot, bundle, effectiveScope)
console.log(`Converted ${plugin.manifest.name} to ${targetName} at ${primaryOutputRoot}`)
const extraTargets = parseExtraTargets(args.also)
@@ -192,16 +184,18 @@ export default defineCommand({
}
const extraRoot = resolveTargetOutputRoot({
targetName: extra,
outputRoot: path.join(outputRoot, extra),
outputRoot,
codexHome,
piHome,
openclawHome,
qwenHome,
pluginName: plugin.manifest.name,
hasExplicitOutput,
scope: handler.defaultScope,
})
await handler.write(extraRoot, extraBundle, handler.defaultScope)
const extraScope =
extra === "opencode"
? resolveOpenCodeWriteScope(hasExplicitOutput, handler.defaultScope)
: handler.defaultScope
await handler.write(extraRoot, extraBundle, extraScope)
console.log(`Converted ${plugin.manifest.name} to ${extra} at ${extraRoot}`)
}

View File

@@ -9,7 +9,7 @@ import { pathExists } from "../utils/files"
import type { ClaudeToOpenCodeOptions, PermissionMode } from "../converters/claude-to-opencode"
import { ensureCodexAgentsFile } from "../utils/codex-agents"
import { expandHome, resolveTargetHome } from "../utils/resolve-home"
import { resolveTargetOutputRoot } from "../utils/resolve-output"
import { resolveOpenCodeWriteScope, resolveTargetOutputRoot } from "../utils/resolve-output"
import { detectInstalledTools } from "../utils/detect-tools"
const permissionModes: PermissionMode[] = ["none", "broad", "from-commands"]
@@ -28,7 +28,7 @@ export default defineCommand({
to: {
type: "string",
default: "opencode",
description: "Target format (opencode | codex | droid | cursor | pi | copilot | gemini | kiro | windsurf | openclaw | qwen | all)",
description: "Target format (opencode | codex | pi | gemini | kiro | all)",
},
output: {
type: "string",
@@ -45,16 +45,6 @@ export default defineCommand({
alias: "pi-home",
description: "Write Pi output to this Pi root (ex: ~/.pi/agent or ./.pi)",
},
openclawHome: {
type: "string",
alias: "openclaw-home",
description: "Write OpenClaw output to this extensions root (ex: ~/.openclaw/extensions)",
},
qwenHome: {
type: "string",
alias: "qwen-home",
description: "Write Qwen output to this Qwen extensions root (ex: ~/.qwen/extensions)",
},
scope: {
type: "string",
description: "Scope level: global | workspace (default varies by target)",
@@ -100,8 +90,6 @@ export default defineCommand({
const codexHome = resolveTargetHome(args.codexHome, path.join(os.homedir(), ".codex"))
const piHome = resolveTargetHome(args.piHome, path.join(os.homedir(), ".pi", "agent"))
const hasExplicitOutput = Boolean(args.output && String(args.output).trim())
const openclawHome = resolveTargetHome(args.openclawHome, path.join(os.homedir(), ".openclaw", "extensions"))
const qwenHome = resolveTargetHome(args.qwenHome, path.join(os.homedir(), ".qwen", "extensions"))
const options: ClaudeToOpenCodeOptions = {
agentMode: String(args.agentMode) === "primary" ? "primary" : "subagent",
@@ -111,15 +99,19 @@ export default defineCommand({
if (targetName === "all") {
const detected = await detectInstalledTools()
const activeTargets = detected.filter((t) => t.detected)
const activeTargets = detected.filter((t) => t.detected && targets[t.name]?.implemented)
if (activeTargets.length === 0) {
console.log("No AI coding tools detected. Install at least one tool first.")
console.log("No installable AI coding tools detected. Use native plugin install for Claude Code, Copilot, Droid, and Qwen.")
return
}
console.log(`Detected ${activeTargets.length} tool(s):`)
console.log(`Detected ${activeTargets.length} installable tool(s):`)
for (const tool of detected) {
if (tool.detected && !targets[tool.name]?.implemented) {
console.log(` - ${tool.name} — native plugin install; skipped`)
continue
}
console.log(` ${tool.detected ? "✓" : "✗"} ${tool.name}${tool.reason}`)
}
@@ -139,12 +131,12 @@ export default defineCommand({
outputRoot,
codexHome,
piHome,
openclawHome,
qwenHome,
pluginName: plugin.manifest.name,
hasExplicitOutput,
})
await handler.write(root, bundle)
const writeScope =
tool.name === "opencode" ? resolveOpenCodeWriteScope(hasExplicitOutput, undefined) : undefined
await handler.write(root, bundle, writeScope)
console.log(`Installed ${plugin.manifest.name} to ${tool.name} at ${root}`)
}
@@ -173,13 +165,13 @@ export default defineCommand({
outputRoot,
codexHome,
piHome,
openclawHome,
qwenHome,
pluginName: plugin.manifest.name,
hasExplicitOutput,
scope: resolvedScope,
})
await target.write(primaryOutputRoot, bundle, resolvedScope)
const effectiveScope =
targetName === "opencode" ? resolveOpenCodeWriteScope(hasExplicitOutput, resolvedScope) : resolvedScope
await target.write(primaryOutputRoot, bundle, effectiveScope)
console.log(`Installed ${plugin.manifest.name} to ${primaryOutputRoot}`)
const extraTargets = parseExtraTargets(args.also)
@@ -201,16 +193,18 @@ export default defineCommand({
}
const extraRoot = resolveTargetOutputRoot({
targetName: extra,
outputRoot: path.join(outputRoot, extra),
outputRoot,
codexHome,
piHome,
openclawHome,
qwenHome,
pluginName: plugin.manifest.name,
hasExplicitOutput,
scope: handler.defaultScope,
})
await handler.write(extraRoot, extraBundle, handler.defaultScope)
const extraScope =
extra === "opencode"
? resolveOpenCodeWriteScope(hasExplicitOutput, handler.defaultScope)
: handler.defaultScope
await handler.write(extraRoot, extraBundle, extraScope)
console.log(`Installed ${plugin.manifest.name} to ${extraRoot}`)
}
@@ -264,9 +258,12 @@ function resolveOutputRoot(value: unknown): string {
const expanded = expandHome(String(value).trim())
return path.resolve(expanded)
}
// OpenCode global config lives at ~/.config/opencode per XDG spec
// See: https://opencode.ai/docs/config/
return path.join(os.homedir(), ".config", "opencode")
// Per-target defaults are applied in `resolveTargetOutputRoot` -- e.g.,
// OpenCode falls back to `OPENCODE_CONFIG_DIR` / `~/.config/opencode`,
// Codex falls back to `~/.codex`. Falling through to `process.cwd()` keeps
// workspace-rooted targets (gemini, kiro) using the user's project root
// when neither `--output` nor a target-specific home flag was supplied.
return process.cwd()
}
async function resolveBundledPluginPath(pluginName: string): Promise<string | null> {

View File

@@ -1,88 +0,0 @@
import { defineCommand } from "citty"
import path from "path"
import { loadClaudeHome } from "../parsers/claude-home"
import {
getDefaultSyncRegistryContext,
getSyncTarget,
isSyncTargetName,
syncTargetNames,
type SyncTargetName,
} from "../sync/registry"
import { expandHome } from "../utils/resolve-home"
import { hasPotentialSecrets } from "../utils/secrets"
import { detectInstalledTools } from "../utils/detect-tools"
const validTargets = [...syncTargetNames, "all"] as const
type SyncTarget = SyncTargetName | "all"
function isValidTarget(value: string): value is SyncTarget {
return value === "all" || isSyncTargetName(value)
}
export default defineCommand({
meta: {
name: "sync",
description: "Sync Claude Code config (~/.claude/) to supported provider configs and skills",
},
args: {
target: {
type: "string",
default: "all",
description: `Target: ${syncTargetNames.join(" | ")} | all (default: all)`,
},
claudeHome: {
type: "string",
alias: "claude-home",
description: "Path to Claude home (default: ~/.claude)",
},
},
async run({ args }) {
if (!isValidTarget(args.target)) {
throw new Error(`Unknown target: ${args.target}. Use one of: ${validTargets.join(", ")}`)
}
const { home, cwd } = getDefaultSyncRegistryContext()
const claudeHome = expandHome(args.claudeHome ?? path.join(home, ".claude"))
const config = await loadClaudeHome(claudeHome)
// Warn about potential secrets in MCP env vars
if (hasPotentialSecrets(config.mcpServers)) {
console.warn(
"⚠️ Warning: MCP servers contain env vars that may include secrets (API keys, tokens).\n" +
" These will be copied to the target config. Review before sharing the config file.",
)
}
if (args.target === "all") {
const detected = await detectInstalledTools()
const activeTargets = detected.filter((t) => t.detected).map((t) => t.name)
if (activeTargets.length === 0) {
console.log("No AI coding tools detected.")
return
}
console.log(`Syncing to ${activeTargets.length} detected tool(s)...`)
for (const tool of detected) {
console.log(` ${tool.detected ? "✓" : "✗"} ${tool.name}${tool.reason}`)
}
for (const name of activeTargets) {
const target = getSyncTarget(name as SyncTargetName)
const outputRoot = target.resolveOutputRoot(home, cwd)
await target.sync(config, outputRoot)
console.log(`✓ Synced to ${name}: ${outputRoot}`)
}
return
}
console.log(
`Syncing ${config.skills.length} skills, ${config.commands?.length ?? 0} commands, ${Object.keys(config.mcpServers).length} MCP servers...`,
)
const target = getSyncTarget(args.target as SyncTargetName)
const outputRoot = target.resolveOutputRoot(home, cwd)
await target.sync(config, outputRoot)
console.log(`✓ Synced to ${args.target}: ${outputRoot}`)
},
})

View File

@@ -2,7 +2,7 @@ import fs, { type Dirent } from "fs"
import path from "path"
import { formatFrontmatter } from "../utils/frontmatter"
import { type ClaudeAgent, type ClaudeCommand, type ClaudePlugin, type ClaudeSkill, filterSkillsByPlatform } from "../types/claude"
import type { CodexBundle, CodexGeneratedSkill, CodexGeneratedSkillSidecarDir } from "../types/codex"
import type { CodexAgent, CodexBundle, CodexGeneratedSkill, CodexGeneratedSkillSidecarDir } from "../types/codex"
import type { ClaudeToOpenCodeOptions } from "./claude-to-opencode"
import {
normalizeCodexName,
@@ -57,7 +57,9 @@ export function convertClaudeToCodex(
}
}
const invocationTargets: CodexInvocationTargets = { promptTargets, skillTargets }
const agents = plugin.agents.map(convertAgent)
const agentTargets = buildAgentTargets(plugin, agents)
const invocationTargets: CodexInvocationTargets = { promptTargets, skillTargets, agentTargets }
const commandSkills: CodexGeneratedSkill[] = []
const prompts = invocableCommands.map((command) => {
@@ -68,42 +70,34 @@ export function convertClaudeToCodex(
return { name: promptName, content }
})
const agentSkills = plugin.agents.map((agent) =>
convertAgent(agent, usedSkillNames, invocationTargets),
)
const generatedSkills = [...commandSkills, ...agentSkills]
const generatedSkills = [...commandSkills]
return {
pluginName: plugin.manifest.name,
prompts,
skillDirs,
generatedSkills,
agents,
invocationTargets,
mcpServers: plugin.mcpServers,
}
}
function convertAgent(
agent: ClaudeAgent,
usedNames: Set<string>,
invocationTargets: CodexInvocationTargets,
): CodexGeneratedSkill {
const name = uniqueName(normalizeCodexName(agent.name), usedNames)
function convertAgent(agent: ClaudeAgent): CodexAgent {
const name = buildCodexAgentName(agent)
const description = sanitizeDescription(
agent.description ?? `Converted from Claude agent ${agent.name}`,
)
const frontmatter: Record<string, unknown> = { name, description }
let body = transformContentForCodex(agent.body.trim(), invocationTargets)
let instructions = agent.body.trim()
if (agent.capabilities && agent.capabilities.length > 0) {
const capabilities = agent.capabilities.map((capability) => `- ${capability}`).join("\n")
body = `## Capabilities\n${capabilities}\n\n${body}`.trim()
instructions = `## Capabilities\n${capabilities}\n\n${instructions}`.trim()
}
if (body.length === 0) {
body = `Instructions converted from the ${agent.name} agent.`
if (instructions.length === 0) {
instructions = `Instructions converted from the ${agent.name} agent.`
}
const content = formatFrontmatter(frontmatter, body)
return { name, content, sidecarDirs: collectReferencedSidecarDirs(agent) }
return { name, description, instructions, sidecarDirs: collectReferencedSidecarDirs(agent) }
}
function convertCommandSkill(
@@ -164,6 +158,44 @@ function shouldApplyCompoundWorkflowModel(plugin: ClaudePlugin): boolean {
return plugin.manifest.name === "compound-engineering"
}
function buildAgentTargets(plugin: ClaudePlugin, agents: CodexAgent[]): Record<string, string> {
const targets: Record<string, string> = {}
plugin.agents.forEach((agent, index) => {
const targetName = agents[index]?.name
if (!targetName) return
const category = getAgentCategory(agent)
const aliases = [
agent.name,
normalizeCodexName(agent.name),
agent.name.startsWith("ce-") ? agent.name.slice("ce-".length) : "",
category ? `${category}:${agent.name}` : "",
category && agent.name.startsWith("ce-") ? `${category}:${agent.name.slice("ce-".length)}` : "",
category ? `${plugin.manifest.name}:${category}:${agent.name}` : "",
category && agent.name.startsWith("ce-") ? `${plugin.manifest.name}:${category}:${agent.name.slice("ce-".length)}` : "",
].filter(Boolean)
for (const alias of aliases) {
targets[normalizeCodexName(alias)] = targetName
}
})
return targets
}
function buildCodexAgentName(agent: ClaudeAgent): string {
const category = getAgentCategory(agent)
const agentName = normalizeCodexName(agent.name)
return category ? `${normalizeCodexName(category)}-${agentName}` : agentName
}
function getAgentCategory(agent: ClaudeAgent): string | null {
const parts = agent.sourcePath.split(path.sep)
const agentsIndex = parts.lastIndexOf("agents")
if (agentsIndex === -1) return null
const next = parts[agentsIndex + 1]
if (!next || next.endsWith(".md")) return null
return next
}
function sanitizeDescription(value: string, maxLength = CODEX_DESCRIPTION_MAX_LENGTH): string {
const normalized = value.replace(/\s+/g, " ").trim()
if (normalized.length <= maxLength) return normalized

View File

@@ -41,7 +41,7 @@ export function convertClaudeToCopilot(
console.warn("Warning: Copilot does not support hooks. Hooks were skipped during conversion.")
}
return { agents, generatedSkills, skillDirs, mcpConfig }
return { pluginName: plugin.manifest.name, agents, generatedSkills, skillDirs, mcpConfig }
}
function convertAgent(agent: ClaudeAgent, usedNames: Set<string>): CopilotAgent {

View File

@@ -50,7 +50,7 @@ export function convertClaudeToDroid(
sourceDir: skill.sourceDir,
}))
return { commands, droids, skillDirs }
return { pluginName: plugin.manifest.name, commands, droids, skillDirs }
}
function convertCommand(command: ClaudeCommand): DroidCommandFile {

View File

@@ -1,6 +1,6 @@
import { formatFrontmatter } from "../utils/frontmatter"
import { type ClaudeAgent, type ClaudeCommand, type ClaudeMcpServer, type ClaudePlugin, filterSkillsByPlatform } from "../types/claude"
import type { GeminiBundle, GeminiCommand, GeminiMcpServer, GeminiSkill } from "../types/gemini"
import type { GeminiAgent, GeminiBundle, GeminiCommand, GeminiMcpServer } from "../types/gemini"
import type { ClaudeToOpenCodeOptions } from "./claude-to-opencode"
export type ClaudeToGeminiOptions = ClaudeToOpenCodeOptions
@@ -11,7 +11,6 @@ export function convertClaudeToGemini(
plugin: ClaudePlugin,
_options: ClaudeToGeminiOptions,
): GeminiBundle {
const usedSkillNames = new Set<string>()
const usedCommandNames = new Set<string>()
const platformSkills = filterSkillsByPlatform(plugin.skills, "gemini")
@@ -20,12 +19,8 @@ export function convertClaudeToGemini(
sourceDir: skill.sourceDir,
}))
// Reserve skill names from pass-through skills
for (const skill of skillDirs) {
usedSkillNames.add(normalizeName(skill.name))
}
const generatedSkills = plugin.agents.map((agent) => convertAgentToSkill(agent, usedSkillNames))
const usedAgentNames = new Set<string>()
const agents = plugin.agents.map((agent) => convertAgent(agent, usedAgentNames))
const commands = plugin.commands.map((command) => convertCommand(command, usedCommandNames))
@@ -35,16 +30,16 @@ export function convertClaudeToGemini(
console.warn("Warning: Gemini CLI hooks use a different format (BeforeTool/AfterTool with matchers). Hooks were skipped during conversion.")
}
return { generatedSkills, skillDirs, commands, mcpServers }
return { pluginName: plugin.manifest.name, generatedSkills: [], skillDirs, agents, commands, mcpServers }
}
function convertAgentToSkill(agent: ClaudeAgent, usedNames: Set<string>): GeminiSkill {
function convertAgent(agent: ClaudeAgent, usedNames: Set<string>): GeminiAgent {
const name = uniqueName(normalizeName(agent.name), usedNames)
const description = sanitizeDescription(
agent.description ?? `Use this skill for ${agent.name} tasks`,
agent.description ?? `Use this agent for ${agent.name} tasks`,
)
const frontmatter: Record<string, unknown> = { name, description }
const frontmatter: Record<string, unknown> = { name, description, kind: "local" }
let body = transformContentForGemini(agent.body.trim())
if (agent.capabilities && agent.capabilities.length > 0) {
@@ -80,9 +75,9 @@ function convertCommand(command: ClaudeCommand, usedNames: Set<string>): GeminiC
/**
* Transform Claude Code content to Gemini-compatible content.
*
* 1. Task agent calls: Task agent-name(args) -> Use the agent-name skill to: args
* 1. Task agent calls: Task agent-name(args) -> Use the @agent-name subagent to: args
* 2. Path rewriting: .claude/ -> .gemini/, ~/.claude/ -> ~/.gemini/
* 3. Agent references: @agent-name -> the agent-name skill
* 3. Agent references: @agent-name -> @agent-name subagent
*/
export function transformContentForGemini(body: string): string {
let result = body
@@ -91,11 +86,11 @@ export function transformContentForGemini(body: string): string {
const taskPattern = /^(\s*-?\s*)Task\s+([a-z][a-z0-9:-]*)\(([^)]*)\)/gm
result = result.replace(taskPattern, (_match, prefix: string, agentName: string, args: string) => {
const finalSegment = agentName.includes(":") ? agentName.split(":").pop()! : agentName
const skillName = normalizeName(finalSegment)
const geminiAgentName = normalizeName(finalSegment)
const trimmedArgs = args.trim()
return trimmedArgs
? `${prefix}Use the ${skillName} skill to: ${trimmedArgs}`
: `${prefix}Use the ${skillName} skill`
? `${prefix}Use the @${geminiAgentName} subagent to: ${trimmedArgs}`
: `${prefix}Use the @${geminiAgentName} subagent`
})
// 2. Rewrite .claude/ paths to .gemini/
@@ -104,9 +99,9 @@ export function transformContentForGemini(body: string): string {
.replace(/\.claude\//g, ".gemini/")
// 3. Transform @agent-name references
const agentRefPattern = /@([a-z][a-z0-9-]*-(?:agent|reviewer|researcher|analyst|specialist|oracle|sentinel|guardian|strategist))/gi
const agentRefPattern = /@([a-z][a-z0-9-]*-(?:agent|reviewer|researcher|analyst|specialist|oracle|sentinel|guardian|strategist))(?!\s+subagent\b)/gi
result = result.replace(agentRefPattern, (_match, agentName: string) => {
return `the ${normalizeName(agentName)} skill`
return `@${normalizeName(agentName)} subagent`
})
return result

View File

@@ -66,7 +66,7 @@ export function convertClaudeToKiro(
)
}
return { agents, generatedSkills, skillDirs, steeringFiles, mcpServers }
return { pluginName: plugin.manifest.name, agents, generatedSkills, skillDirs, steeringFiles, mcpServers }
}
function convertAgentToKiroAgent(agent: ClaudeAgent, knownAgentNames: string[]): KiroAgent {

View File

@@ -1,215 +0,0 @@
import { formatFrontmatter } from "../utils/frontmatter"
import { normalizeModelWithProvider } from "../utils/model"
import { sanitizePathName } from "../utils/files"
import {
type ClaudeAgent,
type ClaudeCommand,
type ClaudePlugin,
type ClaudeMcpServer,
filterSkillsByPlatform,
} from "../types/claude"
import type {
OpenClawBundle,
OpenClawCommandRegistration,
OpenClawPluginManifest,
OpenClawSkillFile,
} from "../types/openclaw"
import type { ClaudeToOpenCodeOptions } from "./claude-to-opencode"
export type ClaudeToOpenClawOptions = ClaudeToOpenCodeOptions
export function convertClaudeToOpenClaw(
plugin: ClaudePlugin,
_options: ClaudeToOpenClawOptions,
): OpenClawBundle {
const enabledCommands = plugin.commands.filter((cmd) => !cmd.disableModelInvocation)
const agentSkills = plugin.agents.map(convertAgentToSkill)
const commandSkills = enabledCommands.map(convertCommandToSkill)
const commands = enabledCommands.map(convertCommand)
const skills: OpenClawSkillFile[] = [...agentSkills, ...commandSkills]
const platformSkills = filterSkillsByPlatform(plugin.skills, "openclaw")
const skillDirCopies = platformSkills.map((skill) => ({
sourceDir: skill.sourceDir,
name: skill.name,
}))
const allSkillDirs = [
...agentSkills.map((s) => sanitizePathName(s.dir)),
...commandSkills.map((s) => sanitizePathName(s.dir)),
...platformSkills.map((s) => sanitizePathName(s.name)),
]
const manifest = buildManifest(plugin, allSkillDirs)
const packageJson = buildPackageJson(plugin)
const openclawConfig = plugin.mcpServers
? buildOpenClawConfig(plugin.mcpServers)
: undefined
const entryPoint = generateEntryPoint(commands)
return {
manifest,
packageJson,
entryPoint,
skills,
skillDirCopies,
commands,
openclawConfig,
}
}
function buildManifest(plugin: ClaudePlugin, skillDirs: string[]): OpenClawPluginManifest {
return {
id: plugin.manifest.name,
name: formatDisplayName(plugin.manifest.name),
kind: "tool",
configSchema: {
type: "object",
properties: {},
},
skills: skillDirs.map((dir) => `skills/${dir}`),
}
}
function buildPackageJson(plugin: ClaudePlugin): Record<string, unknown> {
return {
name: `openclaw-${plugin.manifest.name}`,
version: plugin.manifest.version,
type: "module",
private: true,
description: plugin.manifest.description,
main: "index.ts",
openclaw: {
extensions: [
{
id: plugin.manifest.name,
entry: "./index.ts",
},
],
},
keywords: [
"openclaw",
"openclaw-plugin",
...(plugin.manifest.keywords ?? []),
],
}
}
function convertAgentToSkill(agent: ClaudeAgent): OpenClawSkillFile {
const frontmatter: Record<string, unknown> = {
name: agent.name,
description: agent.description,
}
if (agent.model && agent.model !== "inherit") {
frontmatter.model = normalizeModelWithProvider(agent.model)
}
const body = rewritePaths(agent.body)
const content = formatFrontmatter(frontmatter, body)
return {
name: agent.name,
content,
dir: `agent-${agent.name}`,
}
}
function convertCommandToSkill(command: ClaudeCommand): OpenClawSkillFile {
const frontmatter: Record<string, unknown> = {
name: `cmd-${command.name}`,
description: command.description,
}
if (command.model && command.model !== "inherit") {
frontmatter.model = normalizeModelWithProvider(command.model)
}
const body = rewritePaths(command.body)
const content = formatFrontmatter(frontmatter, body)
return {
name: command.name,
content,
dir: `cmd-${command.name}`,
}
}
function convertCommand(command: ClaudeCommand): OpenClawCommandRegistration {
return {
name: command.name.replace(/:/g, "-"),
description: command.description ?? `Run ${command.name}`,
acceptsArgs: Boolean(command.argumentHint),
body: rewritePaths(command.body),
}
}
function buildOpenClawConfig(
servers: Record<string, ClaudeMcpServer>,
): Record<string, unknown> {
const mcpServers: Record<string, unknown> = {}
for (const [name, server] of Object.entries(servers)) {
if (server.command) {
mcpServers[name] = {
type: "stdio",
command: server.command,
args: server.args ?? [],
env: server.env,
}
} else if (server.url) {
mcpServers[name] = {
type: "http",
url: server.url,
headers: server.headers,
}
}
}
return { mcpServers }
}
function generateEntryPoint(commands: OpenClawCommandRegistration[]): string {
const commandRegistrations = commands
.map((cmd) => {
const safeName = JSON.stringify(cmd.name)
const safeDesc = JSON.stringify(cmd.description ?? "")
const safeBody = JSON.stringify(cmd.body)
return ` api.registerCommand({
name: ${safeName},
description: ${safeDesc},
acceptsArgs: ${cmd.acceptsArgs},
requireAuth: false,
handler: () => ({
text: ${safeBody},
}),
});`
})
.join("\n\n")
return `// Auto-generated OpenClaw plugin entry point
// Converted from Claude Code plugin format by compound-plugin CLI
export default function register(api) {
${commandRegistrations}
}
`
}
function rewritePaths(body: string): string {
return body
.replace(/(?<=^|\s|["'`])~\/\.claude\//gm, "~/.openclaw/")
.replace(/(?<=^|\s|["'`])\.claude\//gm, ".openclaw/")
.replace(/\.claude-plugin\//g, "openclaw-plugin/")
}
function formatDisplayName(name: string): string {
return name
.split("-")
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
.join(" ")
}

View File

@@ -80,6 +80,7 @@ export function convertClaudeToOpenCode(
applyPermissions(config, plugin.commands, options.permissions)
return {
pluginName: plugin.manifest.name,
config,
agents: agentFiles,
commandFiles: cmdFiles,
@@ -361,11 +362,6 @@ function applyPermissions(
}
const permission: Record<string, "allow" | "deny" | Record<string, "allow" | "deny">> = {}
const tools: Record<string, boolean> = {}
for (const tool of sourceTools) {
tools[tool] = mode === "broad" ? true : enabled.has(tool)
}
if (mode === "broad") {
for (const tool of sourceTools) {
@@ -414,7 +410,6 @@ function applyPermissions(
}
config.permission = permission
config.tools = tools
}
function normalizeTool(raw: string): string | null {

View File

@@ -35,6 +35,7 @@ export function convertClaudeToPi(
]
return {
pluginName: plugin.manifest.name,
prompts,
skillDirs: platformSkills.map((skill) => ({
name: skill.name,

View File

@@ -1,219 +0,0 @@
import { formatFrontmatter } from "../utils/frontmatter"
import { normalizeModelWithProvider } from "../utils/model"
import { type ClaudeAgent, type ClaudeCommand, type ClaudeMcpServer, type ClaudePlugin, filterSkillsByPlatform } from "../types/claude"
import type {
QwenAgentFile,
QwenBundle,
QwenCommandFile,
QwenExtensionConfig,
QwenMcpServer,
QwenSetting,
} from "../types/qwen"
export type ClaudeToQwenOptions = {
agentMode: "primary" | "subagent"
inferTemperature: boolean
}
export function convertClaudeToQwen(plugin: ClaudePlugin, options: ClaudeToQwenOptions): QwenBundle {
const platformSkills = filterSkillsByPlatform(plugin.skills, "qwen")
const agentFiles = plugin.agents.map((agent) => convertAgent(agent, options))
const cmdFiles = convertCommands(plugin.commands)
const mcp = plugin.mcpServers ? convertMcp(plugin.mcpServers) : undefined
const settings = extractSettings(plugin.mcpServers)
const config: QwenExtensionConfig = {
name: plugin.manifest.name,
version: plugin.manifest.version || "1.0.0",
commands: "commands",
skills: "skills",
agents: "agents",
}
if (mcp && Object.keys(mcp).length > 0) {
config.mcpServers = mcp
}
if (settings && settings.length > 0) {
config.settings = settings
}
const contextFile = generateContextFile(plugin)
return {
config,
agents: agentFiles,
commandFiles: cmdFiles,
skillDirs: platformSkills.map((skill) => ({ sourceDir: skill.sourceDir, name: skill.name })),
contextFile,
}
}
function convertAgent(agent: ClaudeAgent, options: ClaudeToQwenOptions): QwenAgentFile {
const frontmatter: Record<string, unknown> = {
name: agent.name,
description: agent.description,
}
if (agent.model && agent.model !== "inherit") {
frontmatter.model = normalizeModelWithProvider(agent.model)
}
if (options.inferTemperature) {
const temperature = inferTemperature(agent)
if (temperature !== undefined) {
frontmatter.temperature = temperature
}
}
// Qwen supports both YAML and Markdown for agents
// Using YAML format for structured config
const content = formatFrontmatter(frontmatter, rewriteQwenPaths(agent.body))
return {
name: agent.name,
content,
format: "yaml",
}
}
function convertCommands(commands: ClaudeCommand[]): QwenCommandFile[] {
const files: QwenCommandFile[] = []
for (const command of commands) {
if (command.disableModelInvocation) continue
const frontmatter: Record<string, unknown> = {
description: command.description,
}
if (command.model && command.model !== "inherit") {
frontmatter.model = normalizeModelWithProvider(command.model)
}
if (command.allowedTools && command.allowedTools.length > 0) {
frontmatter.allowedTools = command.allowedTools
}
const content = formatFrontmatter(frontmatter, rewriteQwenPaths(command.body))
files.push({ name: command.name, content })
}
return files
}
function convertMcp(servers: Record<string, ClaudeMcpServer>): Record<string, QwenMcpServer> {
const result: Record<string, QwenMcpServer> = {}
for (const [name, server] of Object.entries(servers)) {
if (server.command) {
result[name] = {
command: server.command,
args: server.args,
env: server.env,
}
continue
}
if (server.url) {
// Qwen only supports stdio (command-based) MCP servers — skip remote servers
console.warn(
`Warning: Remote MCP server '${name}' (URL: ${server.url}) is not supported in Qwen format. Qwen only supports stdio MCP servers. Skipping.`,
)
}
}
return result
}
function extractSettings(mcpServers?: Record<string, ClaudeMcpServer>): QwenSetting[] {
const settings: QwenSetting[] = []
if (!mcpServers) return settings
for (const [name, server] of Object.entries(mcpServers)) {
if (server.env) {
for (const [envVar, value] of Object.entries(server.env)) {
// Only add settings for environment variables that look like placeholders
if (value.startsWith("${") || value.includes("YOUR_") || value.includes("XXX")) {
settings.push({
name: formatSettingName(envVar),
description: `Environment variable for ${name} MCP server`,
envVar,
sensitive: envVar.toLowerCase().includes("key") || envVar.toLowerCase().includes("token") || envVar.toLowerCase().includes("secret"),
})
}
}
}
}
return settings
}
function formatSettingName(envVar: string): string {
return envVar
.replace(/_/g, " ")
.toLowerCase()
.replace(/\b\w/g, (c) => c.toUpperCase())
}
function generateContextFile(plugin: ClaudePlugin): string {
const sections: string[] = []
// Plugin description
sections.push(`# ${plugin.manifest.name}`)
sections.push("")
if (plugin.manifest.description) {
sections.push(plugin.manifest.description)
sections.push("")
}
// Agents section
if (plugin.agents.length > 0) {
sections.push("## Agents")
sections.push("")
for (const agent of plugin.agents) {
sections.push(`- **${agent.name}**: ${agent.description || "No description"}`)
}
sections.push("")
}
// Commands section
if (plugin.commands.length > 0) {
sections.push("## Commands")
sections.push("")
for (const command of plugin.commands) {
if (!command.disableModelInvocation) {
sections.push(`- **/${command.name}**: ${command.description || "No description"}`)
}
}
sections.push("")
}
// Skills section
const qwenSkills = filterSkillsByPlatform(plugin.skills, "qwen")
if (qwenSkills.length > 0) {
sections.push("## Skills")
sections.push("")
for (const skill of qwenSkills) {
sections.push(`- ${skill.name}`)
}
sections.push("")
}
return sections.join("\n")
}
function rewriteQwenPaths(body: string): string {
return body
.replace(/(?<=^|\s|["'`])~\/\.claude\//gm, "~/.qwen/")
.replace(/(?<=^|\s|["'`])\.claude\//gm, ".qwen/")
}
function inferTemperature(agent: ClaudeAgent): number | undefined {
const sample = `${agent.name} ${agent.description ?? ""}`.toLowerCase()
if (/(review|audit|security|sentinel|oracle|lint|verification|guardian)/.test(sample)) {
return 0.1
}
if (/(plan|planning|architecture|strategist|analysis|research)/.test(sample)) {
return 0.2
}
if (/(doc|readme|changelog|editor|writer)/.test(sample)) {
return 0.3
}
if (/(brainstorm|creative|ideate|design|concept)/.test(sample)) {
return 0.6
}
return undefined
}

View File

@@ -1,212 +0,0 @@
import { formatFrontmatter } from "../utils/frontmatter"
import { sanitizePathName } from "../utils/files"
import { findServersWithPotentialSecrets } from "../utils/secrets"
import { type ClaudeAgent, type ClaudeCommand, type ClaudeMcpServer, type ClaudePlugin, filterSkillsByPlatform } from "../types/claude"
import type { WindsurfBundle, WindsurfGeneratedSkill, WindsurfMcpConfig, WindsurfMcpServerEntry, WindsurfWorkflow } from "../types/windsurf"
import type { ClaudeToOpenCodeOptions } from "./claude-to-opencode"
export type ClaudeToWindsurfOptions = ClaudeToOpenCodeOptions
const WINDSURF_WORKFLOW_CHAR_LIMIT = 12_000
export function convertClaudeToWindsurf(
plugin: ClaudePlugin,
_options: ClaudeToWindsurfOptions,
): WindsurfBundle {
const knownAgentNames = plugin.agents.map((a) => normalizeName(a.name))
// Pass-through skills (collected first so agent skill names can deduplicate against them)
const skillDirs = filterSkillsByPlatform(plugin.skills, "windsurf").map((skill) => ({
name: skill.name,
sourceDir: skill.sourceDir,
}))
// Convert agents to skills (seed usedNames with sanitized pass-through skill names
// so generated agent skills detect collisions that would occur on disk)
const usedSkillNames = new Set<string>(skillDirs.map((s) => sanitizePathName(s.name)))
const agentSkills = plugin.agents.map((agent) =>
convertAgentToSkill(agent, knownAgentNames, usedSkillNames),
)
// Convert commands to workflows
const usedCommandNames = new Set<string>()
const commandWorkflows = plugin.commands.map((command) =>
convertCommandToWorkflow(command, knownAgentNames, usedCommandNames),
)
// Build MCP config
const mcpConfig = buildMcpConfig(plugin.mcpServers)
// Warn about hooks
if (plugin.hooks && Object.keys(plugin.hooks.hooks).length > 0) {
console.warn(
"Warning: Windsurf has no hooks equivalent. Hooks were skipped during conversion.",
)
}
return { agentSkills, commandWorkflows, skillDirs, mcpConfig }
}
function convertAgentToSkill(
agent: ClaudeAgent,
knownAgentNames: string[],
usedNames: Set<string>,
): WindsurfGeneratedSkill {
const name = uniqueName(normalizeName(agent.name), usedNames)
const description = sanitizeDescription(
agent.description ?? `Converted from Claude agent ${agent.name}`,
)
let body = transformContentForWindsurf(agent.body.trim(), knownAgentNames)
if (agent.capabilities && agent.capabilities.length > 0) {
const capabilities = agent.capabilities.map((c) => `- ${c}`).join("\n")
body = `## Capabilities\n${capabilities}\n\n${body}`.trim()
}
if (body.length === 0) {
body = `Instructions converted from the ${agent.name} agent.`
}
const content = formatFrontmatter({ name, description }, `# ${name}\n\n${body}`) + "\n"
return { name, content }
}
function convertCommandToWorkflow(
command: ClaudeCommand,
knownAgentNames: string[],
usedNames: Set<string>,
): WindsurfWorkflow {
const name = uniqueName(normalizeName(command.name), usedNames)
const description = sanitizeDescription(
command.description ?? `Converted from Claude command ${command.name}`,
)
let body = transformContentForWindsurf(command.body.trim(), knownAgentNames)
if (command.argumentHint) {
body = `> Arguments: ${command.argumentHint}\n\n${body}`
}
if (body.length === 0) {
body = `Instructions converted from the ${command.name} command.`
}
const frontmatter: Record<string, unknown> = { description }
const fullContent = formatFrontmatter(frontmatter, `# ${name}\n\n${body}`)
if (fullContent.length > WINDSURF_WORKFLOW_CHAR_LIMIT) {
console.warn(
`Warning: Workflow "${name}" is ${fullContent.length} characters (limit: ${WINDSURF_WORKFLOW_CHAR_LIMIT}). It may be truncated by Windsurf.`,
)
}
return { name, description, body }
}
/**
* Transform Claude Code content to Windsurf-compatible content.
*
* 1. Path rewriting: .claude/ -> .windsurf/, ~/.claude/ -> ~/.codeium/windsurf/
* 2. Slash command refs: /workflows:plan -> /workflows-plan (Windsurf invokes workflows as /{name})
* 3. @agent-name refs: kept as @agent-name (already Windsurf skill invocation syntax)
* 4. Task agent calls: Task agent-name(args) -> Use the @agent-name skill: args
*/
export function transformContentForWindsurf(body: string, knownAgentNames: string[] = []): string {
let result = body
// 1. Rewrite paths
result = result.replace(/(?<=^|\s|["'`])~\/\.claude\//gm, "~/.codeium/windsurf/")
result = result.replace(/(?<=^|\s|["'`])\.claude\//gm, ".windsurf/")
// 2. Slash command refs: /workflows:plan -> /workflows-plan (Windsurf invokes as /{name})
result = result.replace(/(?<=^|\s)`?\/([a-zA-Z][a-zA-Z0-9_:-]*)`?/gm, (_match, cmdName: string) => {
const workflowName = normalizeName(cmdName)
return `/${workflowName}`
})
// 3. @agent-name references: no transformation needed.
// In Windsurf, @skill-name is the native invocation syntax for skills.
// Since agents are now mapped to skills, @agent-name already works correctly.
// 4. Transform Task agent calls to skill references (supports namespaced names)
const taskPattern = /^(\s*-?\s*)Task\s+([a-z][a-z0-9:-]*)\(([^)]*)\)/gm
result = result.replace(taskPattern, (_match, prefix: string, agentName: string, args: string) => {
const finalSegment = agentName.includes(":") ? agentName.split(":").pop()! : agentName
const skillRef = normalizeName(finalSegment)
const trimmedArgs = args.trim()
return trimmedArgs
? `${prefix}Use the @${skillRef} skill: ${trimmedArgs}`
: `${prefix}Use the @${skillRef} skill`
})
return result
}
function buildMcpConfig(servers?: Record<string, ClaudeMcpServer>): WindsurfMcpConfig | null {
if (!servers || Object.keys(servers).length === 0) return null
const result: Record<string, WindsurfMcpServerEntry> = {}
for (const [name, server] of Object.entries(servers)) {
if (server.command) {
// stdio transport
const entry: WindsurfMcpServerEntry = { command: server.command }
if (server.args?.length) entry.args = server.args
if (server.env && Object.keys(server.env).length > 0) entry.env = server.env
result[name] = entry
} else if (server.url) {
// HTTP/SSE transport
const entry: WindsurfMcpServerEntry = { serverUrl: server.url }
if (server.headers && Object.keys(server.headers).length > 0) entry.headers = server.headers
if (server.env && Object.keys(server.env).length > 0) entry.env = server.env
result[name] = entry
} else {
console.warn(`Warning: MCP server "${name}" has no command or URL. Skipping.`)
continue
}
}
if (Object.keys(result).length === 0) return null
// Warn about secrets (don't redact — they're needed for the config to work)
const flagged = findServersWithPotentialSecrets(result)
if (flagged.length > 0) {
console.warn(
`Warning: MCP servers contain env vars that may include secrets: ${flagged.join(", ")}.\n` +
" These will be written to mcp_config.json. Review before sharing the config file.",
)
}
return { mcpServers: result }
}
export function normalizeName(value: string): string {
const trimmed = value.trim()
if (!trimmed) return "item"
let normalized = trimmed
.toLowerCase()
.replace(/[\\/]+/g, "-")
.replace(/[:\s]+/g, "-")
.replace(/[^a-z0-9_-]+/g, "-")
.replace(/-+/g, "-")
.replace(/^-+|-+$/g, "")
if (normalized.length === 0 || !/^[a-z]/.test(normalized)) {
return "item"
}
return normalized
}
function sanitizeDescription(value: string): string {
return value.replace(/\s+/g, " ").trim()
}
function uniqueName(base: string, used: Set<string>): string {
if (!used.has(base)) {
used.add(base)
return base
}
let index = 2
while (used.has(`${base}-${index}`)) {
index += 1
}
const name = `${base}-${index}`
used.add(name)
return name
}

View File

@@ -0,0 +1,620 @@
import type { CodexBundle } from "../types/codex"
import type { CopilotBundle } from "../types/copilot"
import type { DroidBundle } from "../types/droid"
import type { ClaudePlugin } from "../types/claude"
import type { GeminiBundle } from "../types/gemini"
import type { KiroBundle } from "../types/kiro"
import type { OpenCodeBundle } from "../types/opencode"
import type { PiBundle } from "../types/pi"
import { sanitizePathName } from "../utils/files"
import { normalizeCodexName } from "../utils/codex-content"
type LegacyPluginArtifacts = {
skills?: string[]
agents?: string[]
commands?: string[]
}
const EXTRA_LEGACY_ARTIFACTS_BY_PLUGIN: Record<string, LegacyPluginArtifacts> = {
"compound-engineering": {
// Historical CE artifacts derived from git history. Keep these explicit so
// cleanup can remove stale flat installs without touching unrelated skills.
skills: [
"agent-browser",
"agent-native-architecture",
"agent-native-audit",
"andrew-kane-gem-writer",
"brainstorming",
"ce:brainstorm",
"ce:compound",
"ce:compound-refresh",
"ce:ideate",
"ce:plan",
"ce:plan-beta",
"ce:polish-beta",
"ce:release-notes",
"ce:review",
"ce:review-beta",
"ce:work",
"ce:work-beta",
"ce-audit",
"ce-claude-permissions-optimizer",
"ce-design",
"ce-doctor",
"ce-document-review",
"ce-feature-video",
"ce-orchestrating-swarms",
"ce-plan-beta",
"ce-pr-stack",
"ce-reproduce-bug",
"ce-review",
"ce-review-beta",
"ce-update",
"changelog",
"claude-permissions-optimizer",
"compound-docs",
"compound-foundations",
"create-agent-skill",
"create-agent-skills",
"creating-agent-skills",
"deepen-plan",
"deepen-plan-beta",
"demo-reel",
"deploy-docs",
"dhh-rails-style",
"dhh-ruby-style",
"doctor",
"document-review",
"dspy-ruby",
"every-style-editor",
"evidence-capture",
"feature-video",
"file-todos",
"frontend-design",
"gemini-imagegen",
"generate_command",
"git-clean-gone-branches",
"git-commit",
"git-commit-push-pr",
"git-stack",
"git-worktree",
"heal-skill",
"onboarding",
"orchestrating-swarms",
"pr-resolve-feedback",
"proof",
"proofread",
"rclone",
"report-bug",
"report-bug-ce",
"reproduce-bug",
"resolve-pr-feedback",
"resolve-pr-parallel",
"resolve-todo-parallel",
"resolve_parallel",
"resolve_pr_parallel",
"resolve_todo_parallel",
"setup",
"skill-creator",
"slfg",
"test-browser",
"test-xcode",
"todo-create",
"todo-resolve",
"todo-triage",
"triage",
"workflows-brainstorm",
"workflows:brainstorm",
"workflows-compound",
"workflows:compound",
"workflows-plan",
"workflows:plan",
"workflows-review",
"workflows:review",
"workflows-work",
"workflows:work",
],
agents: [
"adversarial-document-reviewer",
"adversarial-reviewer",
"agent-native-reviewer",
"ankane-readme-writer",
"api-contract-reviewer",
"architecture-strategist",
"best-practices-researcher",
"bug-reproduction-validator",
"ce-bug-reproduction-validator",
"ce-lint",
"cli-agent-readiness-reviewer",
"cli-readiness-reviewer",
"code-simplicity-reviewer",
"coherence-reviewer",
"correctness-reviewer",
"data-integrity-guardian",
"data-migration-expert",
"data-migrations-reviewer",
"deployment-verification-agent",
"design-implementation-reviewer",
"design-iterator",
"design-lens-reviewer",
"dhh-rails-reviewer",
"every-style-editor",
"feasibility-reviewer",
"figma-design-sync",
"framework-docs-researcher",
"git-history-analyzer",
"issue-intelligence-analyst",
"julik-frontend-races-reviewer",
"kieran-python-reviewer",
"kieran-rails-reviewer",
"kieran-typescript-reviewer",
"learnings-researcher",
"lint",
"maintainability-reviewer",
"pattern-recognition-specialist",
"performance-oracle",
"performance-reviewer",
"pr-comment-resolver",
"pr-reviewability-analyst",
"previous-comments-reviewer",
"product-lens-reviewer",
"project-standards-reviewer",
"reliability-reviewer",
"repo-research-analyst",
"schema-drift-detector",
"scope-guardian-reviewer",
"security-lens-reviewer",
"security-reviewer",
"security-sentinel",
"session-historian",
"session-history-researcher",
"slack-researcher",
"spec-flow-analyzer",
"testing-reviewer",
"web-researcher",
],
commands: [
"agent-native-audit",
"build-website",
"ce:brainstorm",
"ce:compound",
"ce:plan",
"ce:review",
"ce:work",
"changelog",
"codify",
"compound",
"compound:codify",
"compound:plan",
"compound:review",
"compound:work",
"create-agent-skill",
"deepen-plan",
"deprecated:deepen-plan",
"deprecated:plan-review",
"deprecated:workflows-plan",
"deploy-docs",
"feature-video",
"generate_command",
"heal-skill",
"lfg",
"plan",
"plan_review",
"playwright-test",
"prime",
"release-docs",
"report-bug",
"reproduce-bug",
"review",
"resolve_parallel",
"resolve_pr_parallel",
"resolve_todo_parallel",
"setup",
"slfg",
"swarm-status",
"technical_review",
"test-browser",
"test-xcode",
"triage",
"work",
"workflows:brainstorm",
"workflows:codify",
"workflows:compound",
"workflows:plan",
"workflows:review",
"workflows:work",
"xcode-test",
],
},
}
export type LegacyTargetArtifacts = {
skills: string[]
prompts: string[]
}
export type LegacyTargetFileArtifacts = {
skills: string[]
agents: string[]
commands: string[]
}
export type LegacyDroidArtifacts = {
skills: string[]
commands: string[]
droids: string[]
}
export type LegacyOpenCodeArtifacts = {
skills: string[]
commands: string[]
agents: string[]
}
export type LegacyKiroArtifacts = {
skills: string[]
agents: string[]
}
export type LegacyCopilotArtifacts = {
skills: string[]
agents: string[]
}
export type LegacyWindsurfArtifacts = {
skills: string[]
workflows: string[]
}
export function getLegacyPluginArtifacts(pluginName?: string): LegacyPluginArtifacts {
if (!pluginName) return {}
return EXTRA_LEGACY_ARTIFACTS_BY_PLUGIN[pluginName] ?? {}
}
export function getLegacyCodexArtifacts(bundle: CodexBundle): LegacyTargetArtifacts {
// IMPORTANT: legacy detection for the flat `~/.codex/skills/<name>` and
// `~/.codex/prompts/<name>.md` paths must be driven exclusively by the
// explicit historical allow-list in `EXTRA_LEGACY_ARTIFACTS_BY_PLUGIN`.
//
// Earlier versions of this function also seeded candidates from the current
// plugin bundle (`bundle.skillDirs`, `bundle.generatedSkills`, `bundle.agents`).
// That was unsafe: on a first install, any user-authored skill at a flat
// `~/.codex/skills/<name>` path that happened to share a name with a current
// CE skill or agent would be swept into `compound-engineering/legacy-backup`
// even though it was never part of CE.
//
// The historical allow-list already enumerates every skill/agent/command name
// CE has ever shipped (including names that are still current), so restricting
// detection to that list still cleans up real legacy installs without
// touching unrelated user skills.
const skills = new Set<string>()
const prompts = new Set<string>()
const currentPromptFiles = new Set<string>()
for (const prompt of bundle.prompts) {
currentPromptFiles.add(`${sanitizePathName(prompt.name)}.md`)
}
const extras = getLegacyPluginArtifacts(bundle.pluginName)
for (const name of extras.skills ?? []) {
addLegacySkillVariants(skills, name, { includeRawColon: true })
}
for (const name of extras.agents ?? []) {
skills.add(normalizeCodexName(name))
}
for (const name of extras.commands ?? []) {
const normalized = normalizeCodexName(name)
skills.add(normalized)
const promptFile = `${normalized}.md`
if (!currentPromptFiles.has(promptFile)) {
prompts.add(promptFile)
}
}
return {
skills: [...skills].sort(),
prompts: [...prompts].sort(),
}
}
export function getLegacyPiArtifacts(bundle: PiBundle): LegacyTargetArtifacts {
const skills = new Set<string>()
const prompts = new Set<string>()
const currentSkills = new Set<string>([
...bundle.generatedSkills.map((skill) => normalizePiName(skill.name)),
...bundle.skillDirs.map((skill) => normalizePiName(skill.name)),
])
const currentPromptFiles = new Set<string>()
for (const prompt of bundle.prompts) {
currentPromptFiles.add(`${sanitizePathName(prompt.name)}.md`)
}
const extras = getLegacyPluginArtifacts(bundle.pluginName)
for (const name of extras.skills ?? []) {
addLegacySkillVariants(skills, name, { currentSkills })
}
for (const name of extras.agents ?? []) {
const skillName = normalizePiName(name)
if (!currentSkills.has(skillName)) {
skills.add(skillName)
}
}
for (const name of extras.commands ?? []) {
const promptFile = `${normalizePiName(name)}.md`
if (!currentPromptFiles.has(promptFile)) {
prompts.add(promptFile)
}
}
return {
skills: [...skills].sort(),
prompts: [...prompts].sort(),
}
}
export function getLegacyGeminiArtifacts(bundle: GeminiBundle): LegacyTargetFileArtifacts {
const skills = new Set<string>()
const agents = new Set<string>()
const commands = new Set<string>()
const currentSkills = new Set<string>([
...bundle.generatedSkills.map((skill) => sanitizePathName(skill.name)),
...bundle.skillDirs.map((skill) => sanitizePathName(skill.name)),
])
const currentAgents = new Set<string>((bundle.agents ?? []).map((agent) => `${sanitizePathName(agent.name)}.md`))
const currentCommands = new Set<string>(bundle.commands.map((command) => `${command.name}.toml`))
const extras = getLegacyPluginArtifacts(bundle.pluginName)
for (const name of extras.skills ?? []) {
addLegacySkillVariants(skills, name, { currentSkills })
}
for (const name of extras.agents ?? []) {
const skillName = normalizeLegacyName(name)
if (!currentSkills.has(skillName)) {
skills.add(skillName)
}
const agentPath = `${skillName}.md`
if (!currentAgents.has(agentPath)) {
agents.add(agentPath)
}
}
for (const name of extras.commands ?? []) {
const commandPath = toNestedCommandRelativePath(name, ".toml")
if (!currentCommands.has(commandPath)) {
commands.add(commandPath)
}
}
return {
skills: [...skills].sort(),
agents: [...agents].sort(),
commands: [...commands].sort(),
}
}
export function getLegacyDroidArtifacts(bundle: DroidBundle): LegacyDroidArtifacts {
const skills = new Set<string>()
const commands = new Set<string>()
const droids = new Set<string>()
const currentSkills = new Set<string>(bundle.skillDirs.map((skill) => sanitizePathName(skill.name)))
const currentCommands = new Set<string>(bundle.commands.map((command) => `${command.name}.md`))
const currentDroids = new Set<string>(bundle.droids.map((droid) => `${sanitizePathName(droid.name)}.md`))
const extras = getLegacyPluginArtifacts(bundle.pluginName)
for (const name of extras.skills ?? []) {
addLegacySkillVariants(skills, name, { currentSkills })
}
for (const name of extras.agents ?? []) {
const droidPath = `${normalizeLegacyName(name)}.md`
if (!currentDroids.has(droidPath)) {
droids.add(droidPath)
}
}
for (const name of extras.commands ?? []) {
const commandPath = `${flattenLegacyCommandName(name)}.md`
if (!currentCommands.has(commandPath)) {
commands.add(commandPath)
}
}
return {
skills: [...skills].sort(),
commands: [...commands].sort(),
droids: [...droids].sort(),
}
}
export function getLegacyOpenCodeArtifacts(bundle: OpenCodeBundle): LegacyOpenCodeArtifacts {
const skills = new Set<string>()
const commands = new Set<string>()
const agents = new Set<string>()
const currentSkills = new Set<string>(bundle.skillDirs.map((skill) => sanitizePathName(skill.name)))
const currentCommands = new Set<string>(bundle.commandFiles.map((command) => toRawCommandRelativePath(command.name, ".md")))
const currentAgents = new Set<string>(bundle.agents.map((agent) => `${sanitizePathName(agent.name)}.md`))
const extras = getLegacyPluginArtifacts(bundle.pluginName)
for (const name of extras.skills ?? []) {
addLegacySkillVariants(skills, name, { currentSkills })
}
for (const name of extras.agents ?? []) {
const agentPath = `${sanitizePathName(name)}.md`
if (!currentAgents.has(agentPath)) {
agents.add(agentPath)
}
}
for (const name of extras.commands ?? []) {
const commandPath = toRawCommandRelativePath(name, ".md")
if (!currentCommands.has(commandPath)) {
commands.add(commandPath)
}
}
return {
skills: [...skills].sort(),
commands: [...commands].sort(),
agents: [...agents].sort(),
}
}
export function getLegacyKiroArtifacts(bundle: KiroBundle): LegacyKiroArtifacts {
const skills = new Set<string>()
const agents = new Set<string>()
const currentSkills = new Set<string>([
...bundle.generatedSkills.map((skill) => sanitizePathName(skill.name)),
...bundle.skillDirs.map((skill) => sanitizePathName(skill.name)),
])
const currentAgents = new Set<string>(bundle.agents.map((agent) => sanitizePathName(agent.name)))
const extras = getLegacyPluginArtifacts(bundle.pluginName)
for (const name of extras.skills ?? []) {
addLegacySkillVariants(skills, name, { currentSkills })
}
for (const name of extras.agents ?? []) {
const skillName = normalizeLegacyName(name)
if (!currentSkills.has(skillName)) {
skills.add(skillName)
}
const agentName = normalizeLegacyName(name)
if (!currentAgents.has(agentName)) {
agents.add(agentName)
}
}
for (const name of extras.commands ?? []) {
for (const skillName of legacyCommandSkillNames(name)) {
if (!currentSkills.has(skillName)) {
skills.add(skillName)
}
}
}
return {
skills: [...skills].sort(),
agents: [...agents].sort(),
}
}
export function getLegacyCopilotArtifacts(bundle: CopilotBundle): LegacyCopilotArtifacts {
const skills = new Set<string>()
const agents = new Set<string>()
const currentSkills = new Set<string>([
...bundle.generatedSkills.map((skill) => sanitizePathName(skill.name)),
...bundle.skillDirs.map((skill) => sanitizePathName(skill.name)),
])
const currentAgents = new Set<string>(bundle.agents.map((agent) => `${sanitizePathName(agent.name)}.agent.md`))
const extras = getLegacyPluginArtifacts(bundle.pluginName)
for (const name of extras.skills ?? []) {
addLegacySkillVariants(skills, name, { currentSkills })
}
for (const name of extras.agents ?? []) {
const agentPath = `${normalizeLegacyName(name)}.agent.md`
if (!currentAgents.has(agentPath)) {
agents.add(agentPath)
}
}
for (const name of extras.commands ?? []) {
for (const skillName of legacyCommandSkillNames(name)) {
if (!currentSkills.has(skillName)) {
skills.add(skillName)
}
}
}
return {
skills: [...skills].sort(),
agents: [...agents].sort(),
}
}
export function getLegacyWindsurfArtifacts(plugin: ClaudePlugin): LegacyWindsurfArtifacts {
// IMPORTANT: legacy detection for Windsurf roots must be driven exclusively
// by the explicit historical allow-list in `EXTRA_LEGACY_ARTIFACTS_BY_PLUGIN`.
//
// Earlier versions of this function also seeded candidates from the current
// plugin bundle (`plugin.skills`, `plugin.agents`, `plugin.commands`). That
// was unsafe: the Windsurf writer has since been removed, so the only
// purpose of this cleanup is backing up stale files from past installs.
// Any user-authored skill/workflow at a flat Windsurf path that happened to
// share a name with a current CE skill/agent/command (e.g.
// `skills/ce-debug` or `global_workflows/ce-plan.md`) would otherwise be
// swept into `compound-engineering/legacy-backup` even though it was never
// installed by CE.
//
// The historical allow-list already enumerates every skill/agent/command
// name CE has ever shipped (including names that are still current), so
// restricting detection to that list still cleans up real legacy installs
// without touching unrelated user content. If the allow-list is empty for
// this plugin, Windsurf cleanup is a no-op — the correct safety default.
const skills = new Set<string>()
const workflows = new Set<string>()
const extras = getLegacyPluginArtifacts(plugin.manifest.name)
for (const name of extras.skills ?? []) {
skills.add(sanitizePathName(name))
}
for (const name of extras.agents ?? []) {
skills.add(normalizeLegacyName(name))
}
for (const name of extras.commands ?? []) {
workflows.add(`${normalizeLegacyName(name)}.md`)
}
return {
skills: [...skills].sort(),
workflows: [...workflows].sort(),
}
}
function normalizePiName(value: string): string {
return normalizeLegacyName(value)
}
function addLegacySkillVariants(
skills: Set<string>,
name: string,
options: { currentSkills?: Set<string>; includeRawColon?: boolean } = {},
): void {
const { currentSkills, includeRawColon = false } = options
const sanitized = sanitizePathName(name)
if (!currentSkills?.has(sanitized)) {
skills.add(sanitized)
}
// Codex historically accepted raw colon directory names on macOS
// (for example ~/.codex/skills/ce:plan). Other targets generally sanitized
// these names, so raw-colon probing is target-specific.
if (includeRawColon && name.includes(":") && !currentSkills?.has(name)) {
skills.add(name)
}
}
function normalizeLegacyName(value: string): string {
const trimmed = value.trim()
if (!trimmed) return "item"
const normalized = trimmed
.toLowerCase()
.replace(/[\\/]+/g, "-")
.replace(/[:\s]+/g, "-")
.replace(/[^a-z0-9_-]+/g, "-")
.replace(/-+/g, "-")
.replace(/^-+|-+$/g, "")
return normalized || "item"
}
function flattenLegacyCommandName(value: string): string {
const finalSegment = value.includes(":") ? value.split(":").pop()! : value
return normalizeLegacyName(finalSegment)
}
function legacyCommandSkillNames(value: string): string[] {
return [...new Set([normalizeLegacyName(value), flattenLegacyCommandName(value)])]
}
function toNestedCommandRelativePath(value: string, ext: string): string {
return `${value.split(":").map((segment) => normalizeLegacyName(segment)).join("/")}${ext}`
}
function toRawCommandRelativePath(value: string, ext: string): string {
const parts = value.split(":").map((segment) => sanitizePathName(segment))
return `${parts.join("/")}${ext}`
}

View File

@@ -2,10 +2,10 @@
import { defineCommand, runMain } from "citty"
import packageJson from "../package.json"
import convert from "./commands/convert"
import cleanup from "./commands/cleanup"
import install from "./commands/install"
import listCommand from "./commands/list"
import pluginPath from "./commands/plugin-path"
import sync from "./commands/sync"
const main = defineCommand({
meta: {
@@ -14,11 +14,11 @@ const main = defineCommand({
description: "Convert Claude Code plugins into other agent formats",
},
subCommands: {
cleanup: () => cleanup,
convert: () => convert,
install: () => install,
list: () => listCommand,
"plugin-path": () => pluginPath,
sync: () => sync,
},
})

View File

@@ -1,127 +0,0 @@
import path from "path"
import os from "os"
import fs from "fs/promises"
import { parseFrontmatter } from "../utils/frontmatter"
import { walkFiles } from "../utils/files"
import type { ClaudeCommand, ClaudeSkill, ClaudeMcpServer } from "../types/claude"
export interface ClaudeHomeConfig {
skills: ClaudeSkill[]
commands?: ClaudeCommand[]
mcpServers: Record<string, ClaudeMcpServer>
}
export async function loadClaudeHome(claudeHome?: string): Promise<ClaudeHomeConfig> {
const home = claudeHome ?? path.join(os.homedir(), ".claude")
const [skills, commands, mcpServers] = await Promise.all([
loadPersonalSkills(path.join(home, "skills")),
loadPersonalCommands(path.join(home, "commands")),
loadSettingsMcp(path.join(home, "settings.json")),
])
return { skills, commands, mcpServers }
}
async function loadPersonalSkills(skillsDir: string): Promise<ClaudeSkill[]> {
try {
const entries = await fs.readdir(skillsDir, { withFileTypes: true })
const skills: ClaudeSkill[] = []
for (const entry of entries) {
// Check if directory or symlink (symlinks are common for skills)
if (!entry.isDirectory() && !entry.isSymbolicLink()) continue
const entryPath = path.join(skillsDir, entry.name)
const skillPath = path.join(entryPath, "SKILL.md")
try {
await fs.access(skillPath)
// Resolve symlink to get the actual source directory
const sourceDir = entry.isSymbolicLink()
? await fs.realpath(entryPath)
: entryPath
let data: Record<string, unknown> = {}
try {
const raw = await fs.readFile(skillPath, "utf8")
data = parseFrontmatter(raw, skillPath).data
} catch {
// Keep syncing the skill even if frontmatter is malformed.
}
skills.push({
name: entry.name,
description: data.description as string | undefined,
argumentHint: data["argument-hint"] as string | undefined,
disableModelInvocation: data["disable-model-invocation"] === true ? true : undefined,
sourceDir,
skillPath,
})
} catch {
// No SKILL.md, skip
}
}
return skills
} catch {
return [] // Directory doesn't exist
}
}
async function loadSettingsMcp(
settingsPath: string,
): Promise<Record<string, ClaudeMcpServer>> {
try {
const content = await fs.readFile(settingsPath, "utf-8")
const settings = JSON.parse(content) as { mcpServers?: Record<string, ClaudeMcpServer> }
return settings.mcpServers ?? {}
} catch {
return {} // File doesn't exist or invalid JSON
}
}
async function loadPersonalCommands(commandsDir: string): Promise<ClaudeCommand[]> {
try {
const files = (await walkFiles(commandsDir))
.filter((file) => file.endsWith(".md"))
.sort()
const commands: ClaudeCommand[] = []
for (const file of files) {
const raw = await fs.readFile(file, "utf8")
const { data, body } = parseFrontmatter(raw, file)
commands.push({
name: typeof data.name === "string" ? data.name : deriveCommandName(commandsDir, file),
description: data.description as string | undefined,
argumentHint: data["argument-hint"] as string | undefined,
model: data.model as string | undefined,
allowedTools: parseAllowedTools(data["allowed-tools"]),
disableModelInvocation: data["disable-model-invocation"] === true ? true : undefined,
body: body.trim(),
sourcePath: file,
})
}
return commands
} catch {
return []
}
}
function deriveCommandName(commandsDir: string, filePath: string): string {
const relative = path.relative(commandsDir, filePath)
const withoutExt = relative.replace(/\.md$/i, "")
return withoutExt.split(path.sep).join(":")
}
function parseAllowedTools(value: unknown): string[] | undefined {
if (!value) return undefined
if (Array.isArray(value)) {
return value.map((item) => String(item))
}
if (typeof value === "string") {
return value
.split(/,/)
.map((item) => item.trim())
.filter(Boolean)
}
return undefined
}

View File

@@ -1,31 +0,0 @@
import fs from "fs/promises"
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import { mergeCodexConfig, renderCodexConfig } from "../targets/codex"
import { writeTextSecure } from "../utils/files"
import { syncCodexCommands } from "./commands"
import { syncSkills } from "./skills"
export async function syncToCodex(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
await syncSkills(config.skills, path.join(outputRoot, "skills"))
await syncCodexCommands(config, outputRoot)
// Write MCP servers to config.toml, or clean up stale managed block if none remain
const configPath = path.join(outputRoot, "config.toml")
let existingContent = ""
try {
existingContent = await fs.readFile(configPath, "utf-8")
} catch (err) {
if ((err as NodeJS.ErrnoException).code !== "ENOENT") {
throw err
}
}
const mcpToml = renderCodexConfig(config.mcpServers)
const merged = mergeCodexConfig(existingContent, mcpToml)
if (merged !== null) {
await writeTextSecure(configPath, merged)
}
}

View File

@@ -1,198 +0,0 @@
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import type { ClaudePlugin } from "../types/claude"
import { backupFile, resolveCommandPath, sanitizePathName, writeText } from "../utils/files"
import { convertClaudeToCodex } from "../converters/claude-to-codex"
import { convertClaudeToCopilot } from "../converters/claude-to-copilot"
import { convertClaudeToDroid } from "../converters/claude-to-droid"
import { convertClaudeToGemini } from "../converters/claude-to-gemini"
import { convertClaudeToKiro } from "../converters/claude-to-kiro"
import { convertClaudeToOpenCode, type ClaudeToOpenCodeOptions } from "../converters/claude-to-opencode"
import { convertClaudeToPi } from "../converters/claude-to-pi"
import { convertClaudeToQwen, type ClaudeToQwenOptions } from "../converters/claude-to-qwen"
import { convertClaudeToWindsurf } from "../converters/claude-to-windsurf"
import { writeWindsurfBundle } from "../targets/windsurf"
type WindsurfSyncScope = "global" | "workspace"
const HOME_SYNC_PLUGIN_ROOT = path.join(process.cwd(), ".compound-sync-home")
const DEFAULT_SYNC_OPTIONS: ClaudeToOpenCodeOptions = {
agentMode: "subagent",
inferTemperature: false,
permissions: "none",
}
const DEFAULT_QWEN_SYNC_OPTIONS: ClaudeToQwenOptions = {
agentMode: "subagent",
inferTemperature: false,
}
function hasCommands(config: ClaudeHomeConfig): boolean {
return (config.commands?.length ?? 0) > 0
}
function buildClaudeHomePlugin(config: ClaudeHomeConfig): ClaudePlugin {
return {
root: HOME_SYNC_PLUGIN_ROOT,
manifest: {
name: "claude-home",
version: "1.0.0",
description: "Personal Claude Code home config",
},
agents: [],
commands: config.commands ?? [],
skills: config.skills,
mcpServers: undefined,
}
}
export async function syncOpenCodeCommands(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
if (!hasCommands(config)) return
const plugin = buildClaudeHomePlugin(config)
const bundle = convertClaudeToOpenCode(plugin, DEFAULT_SYNC_OPTIONS)
for (const commandFile of bundle.commandFiles) {
const commandPath = await resolveCommandPath(path.join(outputRoot, "commands"), commandFile.name, ".md")
const backupPath = await backupFile(commandPath)
if (backupPath) {
console.log(`Backed up existing command file to ${backupPath}`)
}
await writeText(commandPath, commandFile.content + "\n")
}
}
export async function syncCodexCommands(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
if (!hasCommands(config)) return
const plugin = buildClaudeHomePlugin(config)
const bundle = convertClaudeToCodex(plugin, DEFAULT_SYNC_OPTIONS)
for (const prompt of bundle.prompts) {
await writeText(path.join(outputRoot, "prompts", `${prompt.name}.md`), prompt.content + "\n")
}
for (const skill of bundle.generatedSkills) {
await writeText(path.join(outputRoot, "skills", sanitizePathName(skill.name), "SKILL.md"), skill.content + "\n")
}
}
export async function syncPiCommands(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
if (!hasCommands(config)) return
const plugin = buildClaudeHomePlugin(config)
const bundle = convertClaudeToPi(plugin, DEFAULT_SYNC_OPTIONS)
for (const prompt of bundle.prompts) {
await writeText(path.join(outputRoot, "prompts", `${prompt.name}.md`), prompt.content + "\n")
}
for (const extension of bundle.extensions) {
await writeText(path.join(outputRoot, "extensions", extension.name), extension.content + "\n")
}
}
export async function syncDroidCommands(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
if (!hasCommands(config)) return
const plugin = buildClaudeHomePlugin(config)
const bundle = convertClaudeToDroid(plugin, DEFAULT_SYNC_OPTIONS)
for (const command of bundle.commands) {
await writeText(path.join(outputRoot, "commands", `${command.name}.md`), command.content + "\n")
}
}
export async function syncCopilotCommands(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
if (!hasCommands(config)) return
const plugin = buildClaudeHomePlugin(config)
const bundle = convertClaudeToCopilot(plugin, DEFAULT_SYNC_OPTIONS)
for (const skill of bundle.generatedSkills) {
await writeText(path.join(outputRoot, "skills", sanitizePathName(skill.name), "SKILL.md"), skill.content + "\n")
}
}
export async function syncGeminiCommands(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
if (!hasCommands(config)) return
const plugin = buildClaudeHomePlugin(config)
const bundle = convertClaudeToGemini(plugin, DEFAULT_SYNC_OPTIONS)
for (const command of bundle.commands) {
await writeText(path.join(outputRoot, "commands", `${command.name}.toml`), command.content + "\n")
}
}
export async function syncKiroCommands(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
if (!hasCommands(config)) return
const plugin = buildClaudeHomePlugin(config)
const bundle = convertClaudeToKiro(plugin, DEFAULT_SYNC_OPTIONS)
for (const skill of bundle.generatedSkills) {
await writeText(path.join(outputRoot, "skills", sanitizePathName(skill.name), "SKILL.md"), skill.content + "\n")
}
}
export async function syncWindsurfCommands(
config: ClaudeHomeConfig,
outputRoot: string,
scope: WindsurfSyncScope = "global",
): Promise<void> {
if (!hasCommands(config)) return
const plugin = buildClaudeHomePlugin(config)
const bundle = convertClaudeToWindsurf(plugin, DEFAULT_SYNC_OPTIONS)
await writeWindsurfBundle(outputRoot, {
agentSkills: [],
commandWorkflows: bundle.commandWorkflows,
skillDirs: [],
mcpConfig: null,
}, scope)
}
export async function syncQwenCommands(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
if (!hasCommands(config)) return
const plugin = buildClaudeHomePlugin(config)
const bundle = convertClaudeToQwen(plugin, DEFAULT_QWEN_SYNC_OPTIONS)
for (const commandFile of bundle.commandFiles) {
const parts = commandFile.name.split(":")
if (parts.length > 1) {
const nestedDir = path.join(outputRoot, "commands", ...parts.slice(0, -1))
await writeText(path.join(nestedDir, `${parts[parts.length - 1]}.md`), commandFile.content + "\n")
continue
}
await writeText(path.join(outputRoot, "commands", `${commandFile.name}.md`), commandFile.content + "\n")
}
}
export function warnUnsupportedOpenClawCommands(config: ClaudeHomeConfig): void {
if (!hasCommands(config)) return
console.warn(
"Warning: OpenClaw personal command sync is skipped because this sync target currently has no documented user-level command surface.",
)
}

View File

@@ -1,78 +0,0 @@
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import type { ClaudeMcpServer } from "../types/claude"
import { syncCopilotCommands } from "./commands"
import { mergeJsonConfigAtKey } from "./json-config"
import { hasExplicitSseTransport } from "./mcp-transports"
import { syncSkills } from "./skills"
type CopilotMcpServer = {
type: "local" | "http" | "sse"
command?: string
args?: string[]
url?: string
tools: string[]
env?: Record<string, string>
headers?: Record<string, string>
}
type CopilotMcpConfig = {
mcpServers: Record<string, CopilotMcpServer>
}
export async function syncToCopilot(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
await syncSkills(config.skills, path.join(outputRoot, "skills"))
await syncCopilotCommands(config, outputRoot)
if (Object.keys(config.mcpServers).length > 0) {
const mcpPath = path.join(outputRoot, "mcp-config.json")
const converted = convertMcpForCopilot(config.mcpServers)
await mergeJsonConfigAtKey({
configPath: mcpPath,
key: "mcpServers",
incoming: converted,
})
}
}
function convertMcpForCopilot(
servers: Record<string, ClaudeMcpServer>,
): Record<string, CopilotMcpServer> {
const result: Record<string, CopilotMcpServer> = {}
for (const [name, server] of Object.entries(servers)) {
const entry: CopilotMcpServer = {
type: server.command ? "local" : hasExplicitSseTransport(server) ? "sse" : "http",
tools: ["*"],
}
if (server.command) {
entry.command = server.command
if (server.args && server.args.length > 0) entry.args = server.args
} else if (server.url) {
entry.url = server.url
if (server.headers && Object.keys(server.headers).length > 0) entry.headers = server.headers
}
if (server.env && Object.keys(server.env).length > 0) {
entry.env = prefixEnvVars(server.env)
}
result[name] = entry
}
return result
}
function prefixEnvVars(env: Record<string, string>): Record<string, string> {
const result: Record<string, string> = {}
for (const [key, value] of Object.entries(env)) {
if (key.startsWith("COPILOT_MCP_")) {
result[key] = value
} else {
result[`COPILOT_MCP_${key}`] = value
}
}
return result
}

View File

@@ -1,62 +0,0 @@
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import type { ClaudeMcpServer } from "../types/claude"
import { syncDroidCommands } from "./commands"
import { mergeJsonConfigAtKey } from "./json-config"
import { syncSkills } from "./skills"
type DroidMcpServer = {
type: "stdio" | "http"
command?: string
args?: string[]
env?: Record<string, string>
url?: string
headers?: Record<string, string>
disabled: boolean
}
export async function syncToDroid(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
await syncSkills(config.skills, path.join(outputRoot, "skills"))
await syncDroidCommands(config, outputRoot)
if (Object.keys(config.mcpServers).length > 0) {
await mergeJsonConfigAtKey({
configPath: path.join(outputRoot, "mcp.json"),
key: "mcpServers",
incoming: convertMcpForDroid(config.mcpServers),
})
}
}
function convertMcpForDroid(
servers: Record<string, ClaudeMcpServer>,
): Record<string, DroidMcpServer> {
const result: Record<string, DroidMcpServer> = {}
for (const [name, server] of Object.entries(servers)) {
if (server.command) {
result[name] = {
type: "stdio",
command: server.command,
args: server.args,
env: server.env,
disabled: false,
}
continue
}
if (server.url) {
result[name] = {
type: "http",
url: server.url,
headers: server.headers,
disabled: false,
}
}
}
return result
}

View File

@@ -1,136 +0,0 @@
import fs from "fs/promises"
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import type { ClaudeMcpServer } from "../types/claude"
import { sanitizePathName } from "../utils/files"
import { syncGeminiCommands } from "./commands"
import { mergeJsonConfigAtKey } from "./json-config"
import { syncSkills } from "./skills"
type GeminiMcpServer = {
command?: string
args?: string[]
url?: string
env?: Record<string, string>
headers?: Record<string, string>
}
export async function syncToGemini(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
await syncGeminiSkills(config.skills, outputRoot)
await syncGeminiCommands(config, outputRoot)
if (Object.keys(config.mcpServers).length > 0) {
const settingsPath = path.join(outputRoot, "settings.json")
const converted = convertMcpForGemini(config.mcpServers)
await mergeJsonConfigAtKey({
configPath: settingsPath,
key: "mcpServers",
incoming: converted,
})
}
}
async function syncGeminiSkills(
skills: ClaudeHomeConfig["skills"],
outputRoot: string,
): Promise<void> {
const skillsDir = path.join(outputRoot, "skills")
const sharedSkillsDir = getGeminiSharedSkillsDir(outputRoot)
if (!sharedSkillsDir) {
await syncSkills(skills, skillsDir)
return
}
const canonicalSharedSkillsDir = await canonicalizePath(sharedSkillsDir)
const mirroredSkills: ClaudeHomeConfig["skills"] = []
const directSkills: ClaudeHomeConfig["skills"] = []
for (const skill of skills) {
if (await isWithinDir(skill.sourceDir, canonicalSharedSkillsDir)) {
mirroredSkills.push(skill)
} else {
directSkills.push(skill)
}
}
await removeGeminiMirrorConflicts(mirroredSkills, skillsDir, canonicalSharedSkillsDir)
await syncSkills(directSkills, skillsDir)
}
function getGeminiSharedSkillsDir(outputRoot: string): string | null {
if (path.basename(outputRoot) !== ".gemini") return null
return path.join(path.dirname(outputRoot), ".agents", "skills")
}
async function canonicalizePath(targetPath: string): Promise<string> {
try {
return await fs.realpath(targetPath)
} catch {
return path.resolve(targetPath)
}
}
async function isWithinDir(candidate: string, canonicalParentDir: string): Promise<boolean> {
const resolvedCandidate = await canonicalizePath(candidate)
return resolvedCandidate === canonicalParentDir
|| resolvedCandidate.startsWith(`${canonicalParentDir}${path.sep}`)
}
async function removeGeminiMirrorConflicts(
skills: ClaudeHomeConfig["skills"],
skillsDir: string,
sharedSkillsDir: string,
): Promise<void> {
for (const skill of skills) {
const duplicatePath = path.join(skillsDir, sanitizePathName(skill.name))
let stat
try {
stat = await fs.lstat(duplicatePath)
} catch (error) {
if ((error as NodeJS.ErrnoException).code === "ENOENT") {
continue
}
throw error
}
if (!stat.isSymbolicLink()) {
continue
}
let resolvedTarget: string
try {
resolvedTarget = await canonicalizePath(duplicatePath)
} catch {
continue
}
if (resolvedTarget === await canonicalizePath(skill.sourceDir)
|| await isWithinDir(resolvedTarget, sharedSkillsDir)) {
await fs.unlink(duplicatePath)
}
}
}
function convertMcpForGemini(
servers: Record<string, ClaudeMcpServer>,
): Record<string, GeminiMcpServer> {
const result: Record<string, GeminiMcpServer> = {}
for (const [name, server] of Object.entries(servers)) {
const entry: GeminiMcpServer = {}
if (server.command) {
entry.command = server.command
if (server.args && server.args.length > 0) entry.args = server.args
if (server.env && Object.keys(server.env).length > 0) entry.env = server.env
} else if (server.url) {
entry.url = server.url
if (server.headers && Object.keys(server.headers).length > 0) entry.headers = server.headers
}
result[name] = entry
}
return result
}

View File

@@ -1,49 +0,0 @@
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import type { ClaudeMcpServer } from "../types/claude"
import type { KiroMcpServer } from "../types/kiro"
import { syncKiroCommands } from "./commands"
import { mergeJsonConfigAtKey } from "./json-config"
import { syncSkills } from "./skills"
export async function syncToKiro(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
await syncSkills(config.skills, path.join(outputRoot, "skills"))
await syncKiroCommands(config, outputRoot)
if (Object.keys(config.mcpServers).length > 0) {
await mergeJsonConfigAtKey({
configPath: path.join(outputRoot, "settings", "mcp.json"),
key: "mcpServers",
incoming: convertMcpForKiro(config.mcpServers),
})
}
}
function convertMcpForKiro(
servers: Record<string, ClaudeMcpServer>,
): Record<string, KiroMcpServer> {
const result: Record<string, KiroMcpServer> = {}
for (const [name, server] of Object.entries(servers)) {
if (server.command) {
result[name] = {
command: server.command,
args: server.args,
env: server.env,
}
continue
}
if (server.url) {
result[name] = {
url: server.url,
headers: server.headers,
}
}
}
return result
}

View File

@@ -1,19 +0,0 @@
import type { ClaudeMcpServer } from "../types/claude"
function getTransportType(server: ClaudeMcpServer): string {
return server.type?.toLowerCase().trim() ?? ""
}
export function hasExplicitSseTransport(server: ClaudeMcpServer): boolean {
const type = getTransportType(server)
return type.includes("sse")
}
export function hasExplicitHttpTransport(server: ClaudeMcpServer): boolean {
const type = getTransportType(server)
return type.includes("http") || type.includes("streamable")
}
export function hasExplicitRemoteTransport(server: ClaudeMcpServer): boolean {
return hasExplicitSseTransport(server) || hasExplicitHttpTransport(server)
}

View File

@@ -1,18 +0,0 @@
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import { warnUnsupportedOpenClawCommands } from "./commands"
import { syncSkills } from "./skills"
export async function syncToOpenClaw(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
await syncSkills(config.skills, path.join(outputRoot, "skills"))
warnUnsupportedOpenClawCommands(config)
if (Object.keys(config.mcpServers).length > 0) {
console.warn(
"Warning: OpenClaw MCP sync is skipped because the current official OpenClaw docs do not clearly document an MCP server config contract.",
)
}
}

View File

@@ -1,55 +0,0 @@
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import type { ClaudeMcpServer } from "../types/claude"
import type { OpenCodeMcpServer } from "../types/opencode"
import { syncOpenCodeCommands } from "./commands"
import { mergeJsonConfigAtKey } from "./json-config"
import { syncSkills } from "./skills"
export async function syncToOpenCode(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
await syncSkills(config.skills, path.join(outputRoot, "skills"))
await syncOpenCodeCommands(config, outputRoot)
// Merge MCP servers into opencode.json
if (Object.keys(config.mcpServers).length > 0) {
const configPath = path.join(outputRoot, "opencode.json")
const mcpConfig = convertMcpForOpenCode(config.mcpServers)
await mergeJsonConfigAtKey({
configPath,
key: "mcp",
incoming: mcpConfig,
})
}
}
function convertMcpForOpenCode(
servers: Record<string, ClaudeMcpServer>,
): Record<string, OpenCodeMcpServer> {
const result: Record<string, OpenCodeMcpServer> = {}
for (const [name, server] of Object.entries(servers)) {
if (server.command) {
result[name] = {
type: "local",
command: [server.command, ...(server.args ?? [])],
environment: server.env,
enabled: true,
}
continue
}
if (server.url) {
result[name] = {
type: "remote",
url: server.url,
headers: server.headers,
enabled: true,
}
}
}
return result
}

View File

@@ -1,64 +0,0 @@
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import type { ClaudeMcpServer } from "../types/claude"
import { ensureDir } from "../utils/files"
import { syncPiCommands } from "./commands"
import { mergeJsonConfigAtKey } from "./json-config"
import { syncSkills } from "./skills"
type McporterServer = {
baseUrl?: string
command?: string
args?: string[]
env?: Record<string, string>
headers?: Record<string, string>
}
type McporterConfig = {
mcpServers: Record<string, McporterServer>
}
export async function syncToPi(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
const mcporterPath = path.join(outputRoot, "compound-engineering", "mcporter.json")
await syncSkills(config.skills, path.join(outputRoot, "skills"))
await syncPiCommands(config, outputRoot)
if (Object.keys(config.mcpServers).length > 0) {
await ensureDir(path.dirname(mcporterPath))
const converted = convertMcpToMcporter(config.mcpServers)
await mergeJsonConfigAtKey({
configPath: mcporterPath,
key: "mcpServers",
incoming: converted.mcpServers,
})
}
}
function convertMcpToMcporter(servers: Record<string, ClaudeMcpServer>): McporterConfig {
const mcpServers: Record<string, McporterServer> = {}
for (const [name, server] of Object.entries(servers)) {
if (server.command) {
mcpServers[name] = {
command: server.command,
args: server.args,
env: server.env,
headers: server.headers,
}
continue
}
if (server.url) {
mcpServers[name] = {
baseUrl: server.url,
headers: server.headers,
}
}
}
return { mcpServers }
}

View File

@@ -1,66 +0,0 @@
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import type { ClaudeMcpServer } from "../types/claude"
import type { QwenMcpServer } from "../types/qwen"
import { syncQwenCommands } from "./commands"
import { mergeJsonConfigAtKey } from "./json-config"
import { hasExplicitRemoteTransport, hasExplicitSseTransport } from "./mcp-transports"
import { syncSkills } from "./skills"
export async function syncToQwen(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
await syncSkills(config.skills, path.join(outputRoot, "skills"))
await syncQwenCommands(config, outputRoot)
if (Object.keys(config.mcpServers).length > 0) {
await mergeJsonConfigAtKey({
configPath: path.join(outputRoot, "settings.json"),
key: "mcpServers",
incoming: convertMcpForQwen(config.mcpServers),
})
}
}
function convertMcpForQwen(
servers: Record<string, ClaudeMcpServer>,
): Record<string, QwenMcpServer> {
const result: Record<string, QwenMcpServer> = {}
for (const [name, server] of Object.entries(servers)) {
if (server.command) {
result[name] = {
command: server.command,
args: server.args,
env: server.env,
}
continue
}
if (!server.url) {
continue
}
if (hasExplicitSseTransport(server)) {
result[name] = {
url: server.url,
headers: server.headers,
}
continue
}
if (!hasExplicitRemoteTransport(server)) {
console.warn(
`Warning: Qwen MCP server "${name}" has an ambiguous remote transport; defaulting to Streamable HTTP.`,
)
}
result[name] = {
httpUrl: server.url,
headers: server.headers,
}
}
return result
}

View File

@@ -1,141 +0,0 @@
import os from "os"
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import { syncToCodex } from "./codex"
import { syncToCopilot } from "./copilot"
import { syncToDroid } from "./droid"
import { syncToGemini } from "./gemini"
import { syncToKiro } from "./kiro"
import { syncToOpenClaw } from "./openclaw"
import { syncToOpenCode } from "./opencode"
import { syncToPi } from "./pi"
import { syncToQwen } from "./qwen"
import { syncToWindsurf } from "./windsurf"
function getCopilotHomeRoot(home: string): string {
return path.join(home, ".copilot")
}
function getGeminiHomeRoot(home: string): string {
return path.join(home, ".gemini")
}
export type SyncTargetName =
| "opencode"
| "codex"
| "pi"
| "droid"
| "copilot"
| "gemini"
| "windsurf"
| "kiro"
| "qwen"
| "openclaw"
export type SyncTargetDefinition = {
name: SyncTargetName
detectPaths: (home: string, cwd: string) => string[]
resolveOutputRoot: (home: string, cwd: string) => string
sync: (config: ClaudeHomeConfig, outputRoot: string) => Promise<void>
}
export const syncTargets: SyncTargetDefinition[] = [
{
name: "opencode",
detectPaths: (home, cwd) => [
path.join(home, ".config", "opencode"),
path.join(cwd, ".opencode"),
],
resolveOutputRoot: (home) => path.join(home, ".config", "opencode"),
sync: syncToOpenCode,
},
{
name: "codex",
detectPaths: (home) => [path.join(home, ".codex")],
resolveOutputRoot: (home) => path.join(home, ".codex"),
sync: syncToCodex,
},
{
name: "pi",
detectPaths: (home) => [path.join(home, ".pi")],
resolveOutputRoot: (home) => path.join(home, ".pi", "agent"),
sync: syncToPi,
},
{
name: "droid",
detectPaths: (home) => [path.join(home, ".factory")],
resolveOutputRoot: (home) => path.join(home, ".factory"),
sync: syncToDroid,
},
{
name: "copilot",
detectPaths: (home, cwd) => [
getCopilotHomeRoot(home),
path.join(cwd, ".github", "skills"),
path.join(cwd, ".github", "agents"),
path.join(cwd, ".github", "copilot-instructions.md"),
],
resolveOutputRoot: (home) => getCopilotHomeRoot(home),
sync: syncToCopilot,
},
{
name: "gemini",
detectPaths: (home, cwd) => [
path.join(cwd, ".gemini"),
getGeminiHomeRoot(home),
],
resolveOutputRoot: (home) => getGeminiHomeRoot(home),
sync: syncToGemini,
},
{
name: "windsurf",
detectPaths: (home, cwd) => [
path.join(home, ".codeium", "windsurf"),
path.join(cwd, ".windsurf"),
],
resolveOutputRoot: (home) => path.join(home, ".codeium", "windsurf"),
sync: syncToWindsurf,
},
{
name: "kiro",
detectPaths: (home, cwd) => [
path.join(home, ".kiro"),
path.join(cwd, ".kiro"),
],
resolveOutputRoot: (home) => path.join(home, ".kiro"),
sync: syncToKiro,
},
{
name: "qwen",
detectPaths: (home, cwd) => [
path.join(home, ".qwen"),
path.join(cwd, ".qwen"),
],
resolveOutputRoot: (home) => path.join(home, ".qwen"),
sync: syncToQwen,
},
{
name: "openclaw",
detectPaths: (home) => [path.join(home, ".openclaw")],
resolveOutputRoot: (home) => path.join(home, ".openclaw"),
sync: syncToOpenClaw,
},
]
export const syncTargetNames = syncTargets.map((target) => target.name)
export function isSyncTargetName(value: string): value is SyncTargetName {
return syncTargetNames.includes(value as SyncTargetName)
}
export function getSyncTarget(name: SyncTargetName): SyncTargetDefinition {
const target = syncTargets.find((entry) => entry.name === name)
if (!target) {
throw new Error(`Unknown sync target: ${name}`)
}
return target
}
export function getDefaultSyncRegistryContext(): { home: string; cwd: string } {
return { home: os.homedir(), cwd: process.cwd() }
}

View File

@@ -1,29 +0,0 @@
import path from "path"
import type { ClaudeSkill } from "../types/claude"
import { ensureDir, sanitizePathName } from "../utils/files"
import { forceSymlink, isValidSkillName } from "../utils/symlink"
export async function syncSkills(
skills: ClaudeSkill[],
skillsDir: string,
): Promise<void> {
await ensureDir(skillsDir)
const seen = new Set<string>()
for (const skill of skills) {
if (!isValidSkillName(skill.name)) {
console.warn(`Skipping skill with invalid name: ${skill.name}`)
continue
}
const safeName = sanitizePathName(skill.name)
if (seen.has(safeName)) {
console.warn(`Skipping skill "${skill.name}": sanitized name "${safeName}" collides with another skill`)
continue
}
seen.add(safeName)
const target = path.join(skillsDir, safeName)
await forceSymlink(skill.sourceDir, target)
}
}

View File

@@ -1,59 +0,0 @@
import path from "path"
import type { ClaudeHomeConfig } from "../parsers/claude-home"
import type { ClaudeMcpServer } from "../types/claude"
import type { WindsurfMcpServerEntry } from "../types/windsurf"
import { syncWindsurfCommands } from "./commands"
import { mergeJsonConfigAtKey } from "./json-config"
import { hasExplicitSseTransport } from "./mcp-transports"
import { syncSkills } from "./skills"
export async function syncToWindsurf(
config: ClaudeHomeConfig,
outputRoot: string,
): Promise<void> {
await syncSkills(config.skills, path.join(outputRoot, "skills"))
await syncWindsurfCommands(config, outputRoot, "global")
if (Object.keys(config.mcpServers).length > 0) {
await mergeJsonConfigAtKey({
configPath: path.join(outputRoot, "mcp_config.json"),
key: "mcpServers",
incoming: convertMcpForWindsurf(config.mcpServers),
})
}
}
function convertMcpForWindsurf(
servers: Record<string, ClaudeMcpServer>,
): Record<string, WindsurfMcpServerEntry> {
const result: Record<string, WindsurfMcpServerEntry> = {}
for (const [name, server] of Object.entries(servers)) {
if (server.command) {
result[name] = {
command: server.command,
args: server.args,
env: server.env,
}
continue
}
if (!server.url) {
continue
}
const entry: WindsurfMcpServerEntry = {
headers: server.headers,
}
if (hasExplicitSseTransport(server)) {
entry.url = server.url
} else {
entry.serverUrl = server.url
}
result[name] = entry
}
return result
}

View File

@@ -1,10 +1,10 @@
import fs from "fs/promises"
import path from "path"
import { backupFile, copyDir, copySkillDir, ensureDir, sanitizePathName, writeText, writeTextSecure } from "../utils/files"
import { backupFile, copyDir, copySkillDir, ensureDir, isSafeManagedPath, pathExists, sanitizePathName, writeJson, writeText, writeTextSecure } from "../utils/files"
import type { CodexBundle } from "../types/codex"
import type { ClaudeMcpServer } from "../types/claude"
import { transformContentForCodex } from "../utils/codex-content"
import { cleanupStaleSkillDirs, cleanupStaleAgents, cleanupStalePrompts } from "../utils/legacy-cleanup"
import { getLegacyCodexArtifacts } from "../data/plugin-legacy-artifacts"
const MANAGED_START_MARKER = "# BEGIN Compound Engineering plugin MCP -- do not edit this block"
const MANAGED_END_MARKER = "# END Compound Engineering plugin MCP"
@@ -12,30 +12,56 @@ const PREV_START_MARKER = "# BEGIN compound-plugin Claude Code MCP"
const PREV_END_MARKER = "# END compound-plugin Claude Code MCP"
const LEGACY_MARKER = "# MCP servers synced from Claude Code"
const UNMARKED_LEGACY_MARKER = "# Generated by compound-plugin"
const MANAGED_INSTALL_MANIFEST = "install-manifest.json"
export type CodexInstallManifest = {
version: 1
pluginName: string
skills: string[]
prompts: string[]
agents: string[]
}
export async function writeCodexBundle(outputRoot: string, bundle: CodexBundle): Promise<void> {
const codexRoot = resolveCodexRoot(outputRoot)
await ensureDir(codexRoot)
// TODO(cleanup): Remove after v3 transition (circa Q3 2026)
const skillsRoot = path.join(codexRoot, "skills")
await cleanupStaleSkillDirs(skillsRoot)
await cleanupStaleAgents(skillsRoot, null) // agents are generated as skill dirs in Codex
await cleanupStalePrompts(path.join(codexRoot, "prompts"))
const pluginName = bundle.pluginName ? sanitizeCodexPathComponent(bundle.pluginName) : undefined
const manifest = pluginName ? await readInstallManifest(codexRoot, pluginName) : null
const currentPrompts = bundle.prompts.map((prompt) => `${sanitizePathName(prompt.name)}.md`)
const agents = bundle.agents ?? []
const agentsRoot = pluginName
? path.join(codexRoot, "agents", pluginName)
: path.join(codexRoot, "agents")
const currentAgents = agents.map((agent) => `${sanitizePathName(agent.name)}.toml`)
assertNoCodexAgentFilenameCollisions(agents)
if (bundle.prompts.length > 0) {
const promptsDir = path.join(codexRoot, "prompts")
await cleanupRemovedPrompts(promptsDir, manifest, currentPrompts)
for (const prompt of bundle.prompts) {
await writeText(path.join(promptsDir, `${prompt.name}.md`), prompt.content + "\n")
await writeText(path.join(promptsDir, `${sanitizePathName(prompt.name)}.md`), prompt.content + "\n")
}
} else if (pluginName) {
await cleanupRemovedPrompts(path.join(codexRoot, "prompts"), manifest, [])
}
const skillsRoot = pluginName
? path.join(codexRoot, "skills", pluginName)
: path.join(codexRoot, "skills")
const currentSkills = [
...bundle.skillDirs.map((skill) => sanitizePathName(skill.name)),
...bundle.generatedSkills.map((skill) => sanitizePathName(skill.name)),
]
await cleanupRemovedSkills(skillsRoot, manifest, currentSkills)
if (bundle.skillDirs.length > 0) {
const skillsRoot = path.join(codexRoot, "skills")
for (const skill of bundle.skillDirs) {
const targetDir = path.join(skillsRoot, sanitizePathName(skill.name))
await cleanupCurrentManagedSkillDir(targetDir, manifest, sanitizePathName(skill.name))
await copySkillDir(
skill.sourceDir,
path.join(skillsRoot, sanitizePathName(skill.name)),
targetDir,
(content) => transformContentForCodex(content, bundle.invocationTargets, {
unknownSlashBehavior: "preserve",
}),
@@ -44,9 +70,9 @@ export async function writeCodexBundle(outputRoot: string, bundle: CodexBundle):
}
if (bundle.generatedSkills.length > 0) {
const skillsRoot = path.join(codexRoot, "skills")
for (const skill of bundle.generatedSkills) {
const skillDir = path.join(skillsRoot, sanitizePathName(skill.name))
await cleanupCurrentManagedSkillDir(skillDir, manifest, sanitizePathName(skill.name))
await writeText(path.join(skillDir, "SKILL.md"), skill.content + "\n")
for (const sidecar of skill.sidecarDirs ?? []) {
await copyDir(sidecar.sourceDir, path.join(skillDir, sidecar.targetName))
@@ -54,6 +80,32 @@ export async function writeCodexBundle(outputRoot: string, bundle: CodexBundle):
}
}
await cleanupRemovedAgents(agentsRoot, manifest, currentAgents)
if (agents.length > 0) {
for (const agent of agents) {
const agentFile = `${sanitizePathName(agent.name)}.toml`
await writeText(path.join(agentsRoot, agentFile), renderCodexAgentToml(agent) + "\n")
for (const sidecar of agent.sidecarDirs ?? []) {
await copyDir(sidecar.sourceDir, path.join(agentsRoot, sanitizePathName(agent.name), sidecar.targetName))
}
}
}
if (pluginName) {
await ensureDir(skillsRoot)
await writeInstallManifest(codexRoot, {
version: 1,
pluginName,
skills: currentSkills,
prompts: currentPrompts,
agents: currentAgents,
})
await cleanupKnownLegacyCodexArtifacts(codexRoot, bundle)
await cleanupLegacyAgentSkillDirs(codexRoot, pluginName, currentSkills, bundle)
await cleanupLegacyAgentsSkillSymlinks(codexRoot, pluginName, currentSkills, manifest)
await cleanupPreviousManagedCodexSkillStore(codexRoot, pluginName)
}
const configPath = path.join(codexRoot, "config.toml")
const existingConfig = await readFileSafe(configPath)
const mcpToml = renderCodexConfig(bundle.mcpServers)
@@ -71,6 +123,313 @@ function resolveCodexRoot(outputRoot: string): string {
return path.basename(outputRoot) === ".codex" ? outputRoot : path.join(outputRoot, ".codex")
}
function sanitizeCodexPathComponent(name: string): string {
return sanitizePathName(name).replace(/[\\/]/g, "-")
}
export async function readCodexInstallManifest(codexRoot: string, pluginName: string): Promise<CodexInstallManifest | null> {
return readInstallManifest(codexRoot, pluginName)
}
async function readInstallManifest(codexRoot: string, pluginName: string): Promise<CodexInstallManifest | null> {
const manifestPath = path.join(codexRoot, pluginName, MANAGED_INSTALL_MANIFEST)
try {
const raw = await fs.readFile(manifestPath, "utf8")
const parsed = JSON.parse(raw) as Partial<CodexInstallManifest>
if (
parsed.version === 1 &&
parsed.pluginName === pluginName &&
Array.isArray(parsed.skills) &&
Array.isArray(parsed.prompts)
) {
// Filter manifest entries at read time. Cleanup functions join these
// strings into `fs.rm` paths, so a tampered or corrupted
// `install-manifest.json` could otherwise delete outside the Codex
// managed tree. Codex entries are bare leaf names joined against
// `skills/<plugin>`, `prompts/`, or `agents/<plugin>` — but the
// absolute-path and `..`-segment checks are root-independent, and we
// use `codexRoot` for the containment check as the outermost root
// that contains every possible destination.
const agents = Array.isArray(parsed.agents) ? parsed.agents : []
return {
version: 1,
pluginName,
skills: filterSafeCodexManifestEntries(parsed.skills, codexRoot, manifestPath, "skills"),
prompts: filterSafeCodexManifestEntries(parsed.prompts, codexRoot, manifestPath, "prompts"),
agents: filterSafeCodexManifestEntries(agents, codexRoot, manifestPath, "agents"),
}
}
} catch (err) {
if ((err as NodeJS.ErrnoException).code !== "ENOENT") {
console.warn(`Ignoring unreadable Codex install manifest at ${manifestPath}.`)
}
}
return null
}
function filterSafeCodexManifestEntries(
entries: unknown[],
codexRoot: string,
manifestPath: string,
group: string,
): string[] {
const safe: string[] = []
for (const entry of entries) {
if (isSafeManagedPath(codexRoot, entry)) {
safe.push(entry)
} else {
console.warn(
`Dropping unsafe Codex install-manifest entry in ${manifestPath} (group "${group}"): ${JSON.stringify(entry)}`,
)
}
}
return safe
}
async function writeInstallManifest(codexRoot: string, manifest: CodexInstallManifest): Promise<void> {
await writeJson(path.join(codexRoot, manifest.pluginName, MANAGED_INSTALL_MANIFEST), manifest)
}
async function cleanupRemovedSkills(
skillsRoot: string,
manifest: CodexInstallManifest | null,
currentSkills: string[],
): Promise<void> {
if (!manifest) return
const current = new Set(currentSkills)
for (const skillName of manifest.skills) {
if (current.has(skillName)) continue
// Defense in depth: `readInstallManifest` already drops unsafe entries,
// but re-check before any out-of-tree fs.rm can be issued from a future
// caller that bypasses the read layer.
if (!isSafeManagedPath(skillsRoot, skillName)) continue
await fs.rm(path.join(skillsRoot, skillName), { recursive: true, force: true })
}
}
async function cleanupRemovedPrompts(
promptsDir: string,
manifest: CodexInstallManifest | null,
currentPrompts: string[],
): Promise<void> {
if (!manifest) return
const current = new Set(currentPrompts)
for (const promptFile of manifest.prompts) {
if (current.has(promptFile)) continue
if (!isSafeManagedPath(promptsDir, promptFile)) continue
await fs.rm(path.join(promptsDir, promptFile), { force: true })
}
}
async function cleanupRemovedAgents(
agentsRoot: string,
manifest: CodexInstallManifest | null,
currentAgents: string[],
): Promise<void> {
if (!manifest) return
const current = new Set(currentAgents)
for (const agentFile of manifest.agents) {
if (current.has(agentFile)) continue
if (!isSafeManagedPath(agentsRoot, agentFile)) continue
await fs.rm(path.join(agentsRoot, agentFile), { force: true })
await fs.rm(path.join(agentsRoot, path.basename(agentFile, ".toml")), { recursive: true, force: true })
}
}
async function cleanupCurrentManagedSkillDir(
targetDir: string,
manifest: CodexInstallManifest | null,
skillName: string,
): Promise<void> {
if (!manifest?.skills.includes(skillName)) return
await fs.rm(targetDir, { recursive: true, force: true })
}
async function cleanupKnownLegacyCodexArtifacts(codexRoot: string, bundle: CodexBundle): Promise<void> {
const pluginName = bundle.pluginName
if (!pluginName) return
const legacyArtifacts = getLegacyCodexArtifacts(bundle)
for (const skillName of legacyArtifacts.skills) {
const legacySkillPath = path.join(codexRoot, "skills", skillName)
await moveLegacyArtifactToBackup(codexRoot, pluginName, "skills", legacySkillPath)
}
for (const promptFile of legacyArtifacts.prompts) {
const legacyPromptPath = path.join(codexRoot, "prompts", promptFile)
await moveLegacyArtifactToBackup(codexRoot, pluginName, "prompts", legacyPromptPath)
}
}
async function cleanupLegacyAgentSkillDirs(
codexRoot: string,
pluginName: string,
currentSkills: string[],
bundle: CodexBundle,
): Promise<void> {
const currentSkillSet = new Set(currentSkills)
const legacySkillNames = new Set<string>()
for (const agent of bundle.agents ?? []) {
const finalSegment = agent.name.includes("-ce-") ? agent.name.split("-ce-").pop() : agent.name
legacySkillNames.add(sanitizePathName(agent.name))
if (finalSegment) legacySkillNames.add(`ce-${sanitizePathName(finalSegment)}`)
}
for (const name of getLegacyCodexArtifacts({
pluginName,
prompts: [],
skillDirs: [],
generatedSkills: [],
agents: [],
}).skills) {
legacySkillNames.add(name)
}
const skillsRoot = path.join(codexRoot, "skills", pluginName)
for (const skillName of legacySkillNames) {
if (currentSkillSet.has(skillName)) continue
await moveLegacyArtifactToBackup(codexRoot, pluginName, "skills", path.join(skillsRoot, skillName))
}
}
async function cleanupLegacyAgentsSkillSymlinks(
codexRoot: string,
pluginName: string,
currentSkills: string[],
manifest: CodexInstallManifest | null,
): Promise<void> {
// Symlink cleanup is safe for a broad candidate set because
// `removeAgentsSkillSymlinkIfManaged` only removes a symlink whose resolved
// target is inside a managed root. We probe:
// - current and manifest-tracked skills (in case stale symlinks point at
// still-current skill directories under a previous layout)
// - the explicit historical legacy allow-list (renamed/removed CE skills)
// Bundle-derived names that might collide with unrelated user skills are
// safe here because the managed-root check rejects symlinks pointing
// anywhere outside CE's own install tree.
const legacyArtifacts = getLegacyCodexArtifacts({
pluginName,
prompts: [],
skillDirs: [],
generatedSkills: [],
})
const candidateSkillNames = new Set<string>([
...currentSkills,
...(manifest?.skills ?? []),
...legacyArtifacts.skills,
])
const agentsSkillsDir = path.join(path.dirname(codexRoot), ".agents", "skills")
const managedRoots = await resolveCodexManagedRoots(codexRoot, pluginName)
await removeAgentsSkillSymlinkIfManaged(path.join(agentsSkillsDir, pluginName), managedRoots)
for (const skillName of candidateSkillNames) {
await removeAgentsSkillSymlinkIfManaged(path.join(agentsSkillsDir, skillName), managedRoots)
}
}
async function cleanupPreviousManagedCodexSkillStore(codexRoot: string, pluginName: string): Promise<void> {
await fs.rm(path.join(codexRoot, pluginName, "skills"), { recursive: true, force: true })
}
async function removeAgentsSkillSymlinkIfManaged(symlinkPath: string, managedRoots: string[]): Promise<void> {
if (!(await isManagedCodexAgentsSymlink(symlinkPath, managedRoots))) return
try {
await fs.unlink(symlinkPath)
} catch (err) {
if ((err as NodeJS.ErrnoException).code !== "ENOENT") throw err
}
}
/**
* Ownership check for entries under the shared `~/.agents/skills/` store.
*
* Returns true only when `entryPath` is a symlink whose resolved target lives
* inside one of the supplied CE-managed Codex roots. Plain files, directories,
* and symlinks pointing elsewhere (user-created skills that happen to share a
* name with a CE skill) return false so callers can leave them alone.
*
* The shared `.agents` store is cross-plugin, so name-only matches are
* ambiguous. Only a symlink pointing into CE's own install tree is a strong
* signal that CE emitted it — use this guard before any mutation there.
*/
export async function isManagedCodexAgentsSymlink(
entryPath: string,
managedRoots: string[],
): Promise<boolean> {
let stats
try {
stats = await fs.lstat(entryPath)
} catch (err) {
if ((err as NodeJS.ErrnoException).code === "ENOENT") return false
throw err
}
if (!stats.isSymbolicLink()) return false
const resolvedTarget = await readResolvedSymlinkTarget(entryPath)
if (!resolvedTarget) return false
return managedRoots.some((root) => isPathInside(resolvedTarget, root))
}
/**
* Build the set of CE-managed Codex roots used as the ownership signal for
* entries under `~/.agents/skills/`. Returns both the raw and realpath-resolved
* forms so symlink-bearing paths on macOS (`/var/folders/...` -> `/private/...`)
* match regardless of which form the resolved symlink target takes.
*/
export async function resolveCodexManagedRoots(
codexRoot: string,
pluginName: string,
): Promise<string[]> {
const rawManagedRoots = [
path.join(codexRoot, pluginName),
path.join(codexRoot, "skills", pluginName),
]
return [
...rawManagedRoots,
...(await Promise.all(rawManagedRoots.map((root) => canonicalizePath(root)))),
]
}
async function readResolvedSymlinkTarget(symlinkPath: string): Promise<string | null> {
try {
return await fs.realpath(symlinkPath)
} catch {
try {
const linkTarget = await fs.readlink(symlinkPath)
return path.resolve(path.dirname(symlinkPath), linkTarget)
} catch {
return null
}
}
}
async function canonicalizePath(filePath: string): Promise<string> {
try {
return await fs.realpath(filePath)
} catch {
return path.resolve(filePath)
}
}
function isPathInside(candidatePath: string, rootPath: string): boolean {
const relative = path.relative(path.resolve(rootPath), path.resolve(candidatePath))
return relative === "" || (!relative.startsWith("..") && !path.isAbsolute(relative))
}
async function moveLegacyArtifactToBackup(
codexRoot: string,
pluginName: string,
kind: "skills" | "prompts",
artifactPath: string,
): Promise<void> {
if (!(await pathExists(artifactPath))) return
const timestamp = new Date().toISOString().replace(/[:.]/g, "-")
const backupDir = path.join(codexRoot, pluginName, "legacy-backup", timestamp, kind)
const backupPath = path.join(backupDir, path.basename(artifactPath))
await ensureDir(backupDir)
await fs.rename(artifactPath, backupPath)
console.warn(`Moved legacy Codex ${kind.slice(0, -1)} artifact to ${backupPath}`)
}
export function renderCodexConfig(mcpServers?: Record<string, ClaudeMcpServer>): string | null {
if (!mcpServers || Object.keys(mcpServers).length === 0) return null
@@ -177,6 +536,34 @@ function formatTomlString(value: string): string {
return JSON.stringify(value)
}
function assertNoCodexAgentFilenameCollisions(
agents: NonNullable<CodexBundle["agents"]>,
): void {
const seen = new Map<string, string>()
for (const agent of agents) {
const filename = `${sanitizePathName(agent.name)}.toml`
const prior = seen.get(filename)
if (prior !== undefined && prior !== agent.name) {
throw new Error(
`Codex agent filename collision: "${prior}" and "${agent.name}" both normalize to ` +
`"${filename}". Rename one of the source agents so their sanitized filenames differ. ` +
`A numeric suffix cannot be used here because the TOML filename must match the ` +
`agent name used for Task(subagent_type: ...) invocations.`,
)
}
seen.set(filename, agent.name)
}
}
function renderCodexAgentToml(agent: NonNullable<CodexBundle["agents"]>[number]): string {
const lines = [
`name = ${formatTomlString(agent.name)}`,
`description = ${formatTomlString(agent.description)}`,
`developer_instructions = ${formatTomlString(agent.instructions)}`,
]
return lines.join("\n")
}
function formatTomlKey(value: string): string {
if (/^[A-Za-z0-9_-]+$/.test(value)) return value
return JSON.stringify(value)

View File

@@ -1,109 +0,0 @@
import path from "path"
import { backupFile, copySkillDir, ensureDir, pathExists, readJson, sanitizePathName, writeJsonSecure, writeText } from "../utils/files"
import { transformContentForCopilot } from "../converters/claude-to-copilot"
import type { CopilotBundle } from "../types/copilot"
import { cleanupStaleSkillDirs, cleanupStaleAgents } from "../utils/legacy-cleanup"
export async function writeCopilotBundle(outputRoot: string, bundle: CopilotBundle): Promise<void> {
const paths = resolveCopilotPaths(outputRoot)
await ensureDir(paths.githubDir)
// TODO(cleanup): Remove after v3 transition (circa Q3 2026)
const skillsDir = path.join(paths.githubDir, "skills")
await cleanupStaleSkillDirs(skillsDir)
await cleanupStaleAgents(path.join(paths.githubDir, "agents"), ".agent.md")
if (bundle.agents.length > 0) {
const agentsDir = path.join(paths.githubDir, "agents")
for (const agent of bundle.agents) {
await writeText(path.join(agentsDir, `${sanitizePathName(agent.name)}.agent.md`), agent.content + "\n")
}
}
if (bundle.generatedSkills.length > 0) {
const skillsDir = path.join(paths.githubDir, "skills")
for (const skill of bundle.generatedSkills) {
await writeText(path.join(skillsDir, sanitizePathName(skill.name), "SKILL.md"), skill.content + "\n")
}
}
if (bundle.skillDirs.length > 0) {
const skillsDir = path.join(paths.githubDir, "skills")
for (const skill of bundle.skillDirs) {
await copySkillDir(skill.sourceDir, path.join(skillsDir, sanitizePathName(skill.name)), transformContentForCopilot)
}
}
const mcpPath = path.join(paths.githubDir, "copilot-mcp-config.json")
const merged = await mergeCopilotMcpConfig(mcpPath, bundle.mcpConfig ?? {})
if (merged !== null) {
const backupPath = await backupFile(mcpPath)
if (backupPath) {
console.log(`Backed up existing copilot-mcp-config.json to ${backupPath}`)
}
await writeJsonSecure(mcpPath, merged)
}
}
const MANAGED_KEY = "_compound_managed_mcp"
async function mergeCopilotMcpConfig(
configPath: string,
incoming: Record<string, unknown>,
): Promise<Record<string, unknown> | null> {
let existing: Record<string, unknown> = {}
if (await pathExists(configPath)) {
try {
const parsed = await readJson<unknown>(configPath)
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
existing = parsed as Record<string, unknown>
}
} catch {
// Unparseable file — proceed with incoming only
}
}
const existingMcp = (typeof existing.mcpServers === "object" && existing.mcpServers !== null && !Array.isArray(existing.mcpServers))
? { ...(existing.mcpServers as Record<string, unknown>) }
: {}
// Remove previously-managed plugin servers that are no longer in the bundle.
// Legacy migration: if no tracking key exists AND plugin has servers, assume all
// existing servers are plugin-managed (the old writer overwrote the entire file).
// When incoming is empty, skip pruning — there's nothing to migrate and we'd
// wrongly delete user servers from a pre-existing untracked config.
const incomingKeys = Object.keys(incoming)
const hasTrackingKey = Array.isArray(existing[MANAGED_KEY])
const prevManaged = hasTrackingKey
? existing[MANAGED_KEY] as string[]
: incomingKeys.length > 0 ? Object.keys(existingMcp) : []
for (const name of prevManaged) {
if (!(name in incoming)) {
delete existingMcp[name]
}
}
const mergedMcp = { ...existingMcp, ...incoming }
// Nothing to write — no user servers, no plugin servers, no existing file
if (Object.keys(mergedMcp).length === 0 && Object.keys(existing).length === 0) {
return null
}
// Always write tracking key (even as []) to prevent legacy fallback on future installs
return {
...existing,
mcpServers: mergedMcp,
[MANAGED_KEY]: incomingKeys,
}
}
function resolveCopilotPaths(outputRoot: string) {
const base = path.basename(outputRoot)
// If already pointing at .github, write directly into it
if (base === ".github") {
return { githubDir: outputRoot }
}
// Otherwise nest under .github
return { githubDir: path.join(outputRoot, ".github") }
}

View File

@@ -1,57 +0,0 @@
import path from "path"
import { copySkillDir, ensureDir, resolveCommandPath, sanitizePathName, writeText } from "../utils/files"
import { transformContentForDroid } from "../converters/claude-to-droid"
import type { DroidBundle } from "../types/droid"
import { cleanupStaleSkillDirs, cleanupStaleAgents } from "../utils/legacy-cleanup"
export async function writeDroidBundle(outputRoot: string, bundle: DroidBundle): Promise<void> {
const paths = resolveDroidPaths(outputRoot)
await ensureDir(paths.root)
// TODO(cleanup): Remove after v3 transition (circa Q3 2026)
await cleanupStaleSkillDirs(paths.skillsDir)
await cleanupStaleAgents(paths.droidsDir, ".md")
if (bundle.commands.length > 0) {
await ensureDir(paths.commandsDir)
for (const command of bundle.commands) {
const dest = await resolveCommandPath(paths.commandsDir, command.name, ".md")
await writeText(dest, command.content + "\n")
}
}
if (bundle.droids.length > 0) {
await ensureDir(paths.droidsDir)
for (const droid of bundle.droids) {
await writeText(path.join(paths.droidsDir, `${sanitizePathName(droid.name)}.md`), droid.content + "\n")
}
}
if (bundle.skillDirs.length > 0) {
await ensureDir(paths.skillsDir)
for (const skill of bundle.skillDirs) {
await copySkillDir(skill.sourceDir, path.join(paths.skillsDir, sanitizePathName(skill.name)), transformContentForDroid)
}
}
}
function resolveDroidPaths(outputRoot: string) {
const base = path.basename(outputRoot)
// If pointing directly at ~/.factory or .factory, write into it
if (base === ".factory") {
return {
root: outputRoot,
commandsDir: path.join(outputRoot, "commands"),
droidsDir: path.join(outputRoot, "droids"),
skillsDir: path.join(outputRoot, "skills"),
}
}
// Otherwise nest under .factory
return {
root: outputRoot,
commandsDir: path.join(outputRoot, ".factory", "commands"),
droidsDir: path.join(outputRoot, ".factory", "droids"),
skillsDir: path.join(outputRoot, ".factory", "skills"),
}
}

View File

@@ -1,32 +1,67 @@
import path from "path"
import { backupFile, copySkillDir, ensureDir, pathExists, readJson, resolveCommandPath, sanitizePathName, writeJson, writeText } from "../utils/files"
import { backupFile, copySkillDir, ensureDir, pathExists, readJson, sanitizePathName, writeJson, writeText } from "../utils/files"
import { transformContentForGemini } from "../converters/claude-to-gemini"
import type { GeminiBundle } from "../types/gemini"
import { cleanupStaleSkillDirs, cleanupStaleAgents } from "../utils/legacy-cleanup"
import { getLegacyGeminiArtifacts } from "../data/plugin-legacy-artifacts"
import {
archiveLegacyInstallManifestIfOwned,
cleanupCurrentManagedDirectory,
cleanupRemovedManagedDirectories,
cleanupRemovedManagedFiles,
moveLegacyArtifactToBackup,
readManagedInstallManifestWithLegacyFallback,
resolveManagedSegment,
sanitizeManagedPluginName,
writeManagedInstallManifest,
} from "./managed-artifacts"
export async function writeGeminiBundle(outputRoot: string, bundle: GeminiBundle): Promise<void> {
const paths = resolveGeminiPaths(outputRoot)
await ensureDir(paths.geminiDir)
const pluginName = bundle.pluginName ? sanitizeManagedPluginName(bundle.pluginName) : undefined
const paths = resolveGeminiPaths(outputRoot, pluginName)
const manifest = pluginName
? await readManagedInstallManifestWithLegacyFallback(paths.managedDir, pluginName)
: null
const currentSkills = [
...bundle.generatedSkills.map((skill) => sanitizePathName(skill.name)),
...bundle.skillDirs.map((skill) => sanitizePathName(skill.name)),
]
const agents = bundle.agents ?? []
const currentAgents = agents.map((agent) => `${sanitizePathName(agent.name)}.md`)
const currentCommands = bundle.commands.map((command) => `${command.name}.toml`)
// TODO(cleanup): Remove after v3 transition (circa Q3 2026)
await cleanupStaleSkillDirs(paths.skillsDir)
await cleanupStaleAgents(paths.skillsDir, null)
await ensureDir(paths.geminiDir)
await cleanupRemovedManagedDirectories(paths.skillsDir, manifest, "skills", currentSkills)
await cleanupRemovedManagedFiles(paths.agentsDir, manifest, "agents", currentAgents)
await cleanupRemovedManagedFiles(paths.commandsDir, manifest, "commands", currentCommands)
if (bundle.generatedSkills.length > 0) {
for (const skill of bundle.generatedSkills) {
await writeText(path.join(paths.skillsDir, sanitizePathName(skill.name), "SKILL.md"), skill.content + "\n")
const skillName = sanitizePathName(skill.name)
const targetDir = path.join(paths.skillsDir, skillName)
await cleanupCurrentManagedDirectory(targetDir, manifest, "skills", skillName)
await writeText(path.join(targetDir, "SKILL.md"), skill.content + "\n")
}
}
if (bundle.skillDirs.length > 0) {
for (const skill of bundle.skillDirs) {
await copySkillDir(skill.sourceDir, path.join(paths.skillsDir, sanitizePathName(skill.name)), transformContentForGemini)
const skillName = sanitizePathName(skill.name)
const targetDir = path.join(paths.skillsDir, skillName)
await cleanupCurrentManagedDirectory(targetDir, manifest, "skills", skillName)
await copySkillDir(skill.sourceDir, targetDir, transformContentForGemini)
}
}
if (agents.length > 0) {
for (const agent of agents) {
const agentFile = `${sanitizePathName(agent.name)}.md`
await writeText(path.join(paths.agentsDir, agentFile), agent.content + "\n")
}
}
if (bundle.commands.length > 0) {
for (const command of bundle.commands) {
const dest = await resolveCommandPath(paths.commandsDir, command.name, ".toml")
const dest = path.join(paths.commandsDir, ...command.name.split("/")) + ".toml"
await writeText(dest, command.content + "\n")
}
}
@@ -38,7 +73,6 @@ export async function writeGeminiBundle(outputRoot: string, bundle: GeminiBundle
console.log(`Backed up existing settings.json to ${backupPath}`)
}
// Merge mcpServers into existing settings if present
let existingSettings: Record<string, unknown> = {}
if (await pathExists(settingsPath)) {
try {
@@ -54,22 +88,59 @@ export async function writeGeminiBundle(outputRoot: string, bundle: GeminiBundle
const merged = { ...existingSettings, mcpServers: { ...existingMcp, ...bundle.mcpServers } }
await writeJson(settingsPath, merged)
}
if (pluginName) {
await writeManagedInstallManifest(paths.managedDir, {
version: 1,
pluginName,
groups: {
skills: currentSkills,
agents: currentAgents,
commands: currentCommands,
},
})
await archiveLegacyInstallManifestIfOwned(paths.managedDir, pluginName)
await cleanupKnownLegacyGeminiArtifacts(paths, bundle)
}
}
function resolveGeminiPaths(outputRoot: string) {
function resolveGeminiPaths(outputRoot: string, pluginName?: string) {
// Namespace the managed install directory per plugin so multiple plugins
// installed into the same Gemini root do not share (and overwrite) each
// other's install manifests. `resolveManagedSegment` falls back to the
// legacy "compound-engineering" segment when no plugin name is supplied.
const managedSegment = resolveManagedSegment(pluginName)
const base = path.basename(outputRoot)
// If already pointing at .gemini, write directly into it
if (base === ".gemini") {
return {
geminiDir: outputRoot,
managedDir: path.join(outputRoot, managedSegment),
skillsDir: path.join(outputRoot, "skills"),
agentsDir: path.join(outputRoot, "agents"),
commandsDir: path.join(outputRoot, "commands"),
}
}
// Otherwise nest under .gemini
return {
geminiDir: path.join(outputRoot, ".gemini"),
managedDir: path.join(outputRoot, ".gemini", managedSegment),
skillsDir: path.join(outputRoot, ".gemini", "skills"),
agentsDir: path.join(outputRoot, ".gemini", "agents"),
commandsDir: path.join(outputRoot, ".gemini", "commands"),
}
}
async function cleanupKnownLegacyGeminiArtifacts(
paths: ReturnType<typeof resolveGeminiPaths>,
bundle: GeminiBundle,
): Promise<void> {
const legacyArtifacts = getLegacyGeminiArtifacts(bundle)
for (const skillName of legacyArtifacts.skills) {
await moveLegacyArtifactToBackup(paths.managedDir, "skills", paths.skillsDir, skillName, "Gemini skill")
}
for (const agentPath of legacyArtifacts.agents) {
await moveLegacyArtifactToBackup(paths.managedDir, "agents", paths.agentsDir, agentPath, "Gemini agent")
}
for (const commandPath of legacyArtifacts.commands) {
await moveLegacyArtifactToBackup(paths.managedDir, "commands", paths.commandsDir, commandPath, "Gemini command")
}
}

View File

@@ -1,24 +1,14 @@
import type { ClaudePlugin } from "../types/claude"
import { convertClaudeToOpenCode, type ClaudeToOpenCodeOptions } from "../converters/claude-to-opencode"
import { convertClaudeToCodex } from "../converters/claude-to-codex"
import { convertClaudeToDroid } from "../converters/claude-to-droid"
import { convertClaudeToPi } from "../converters/claude-to-pi"
import { convertClaudeToCopilot } from "../converters/claude-to-copilot"
import { convertClaudeToGemini } from "../converters/claude-to-gemini"
import { convertClaudeToKiro } from "../converters/claude-to-kiro"
import { convertClaudeToWindsurf } from "../converters/claude-to-windsurf"
import { convertClaudeToOpenClaw } from "../converters/claude-to-openclaw"
import { convertClaudeToQwen } from "../converters/claude-to-qwen"
import { writeOpenCodeBundle } from "./opencode"
import { writeCodexBundle } from "./codex"
import { writeDroidBundle } from "./droid"
import { writePiBundle } from "./pi"
import { writeCopilotBundle } from "./copilot"
import { writeGeminiBundle } from "./gemini"
import { writeKiroBundle } from "./kiro"
import { writeWindsurfBundle } from "./windsurf"
import { writeOpenClawBundle } from "./openclaw"
import { writeQwenBundle } from "./qwen"
export type TargetScope = "global" | "workspace"
@@ -70,24 +60,12 @@ export const targets: Record<string, TargetHandler> = {
convert: convertClaudeToCodex as TargetHandler["convert"],
write: writeCodexBundle as TargetHandler["write"],
},
droid: {
name: "droid",
implemented: true,
convert: convertClaudeToDroid as TargetHandler["convert"],
write: writeDroidBundle as TargetHandler["write"],
},
pi: {
name: "pi",
implemented: true,
convert: convertClaudeToPi as TargetHandler["convert"],
write: writePiBundle as TargetHandler["write"],
},
copilot: {
name: "copilot",
implemented: true,
convert: convertClaudeToCopilot as TargetHandler["convert"],
write: writeCopilotBundle as TargetHandler["write"],
},
gemini: {
name: "gemini",
implemented: true,
@@ -100,24 +78,4 @@ export const targets: Record<string, TargetHandler> = {
convert: convertClaudeToKiro as TargetHandler["convert"],
write: writeKiroBundle as TargetHandler["write"],
},
windsurf: {
name: "windsurf",
implemented: true,
defaultScope: "global",
supportedScopes: ["global", "workspace"],
convert: convertClaudeToWindsurf as TargetHandler["convert"],
write: writeWindsurfBundle as TargetHandler["write"],
},
openclaw: {
name: "openclaw",
implemented: true,
convert: convertClaudeToOpenClaw as TargetHandler["convert"],
write: writeOpenClawBundle as TargetHandler["write"],
},
qwen: {
name: "qwen",
implemented: true,
convert: convertClaudeToQwen as TargetHandler["convert"],
write: writeQwenBundle as TargetHandler["write"],
},
}

View File

@@ -3,9 +3,12 @@ import { backupFile, copySkillDir, ensureDir, pathExists, readJson, sanitizePath
import { transformContentForKiro } from "../converters/claude-to-kiro"
import type { KiroBundle } from "../types/kiro"
import { cleanupStaleSkillDirs, cleanupStaleAgents } from "../utils/legacy-cleanup"
import { getLegacyKiroArtifacts } from "../data/plugin-legacy-artifacts"
import { moveLegacyArtifactToBackup, sanitizeManagedPluginName } from "./managed-artifacts"
export async function writeKiroBundle(outputRoot: string, bundle: KiroBundle): Promise<void> {
const paths = resolveKiroPaths(outputRoot)
const pluginName = bundle.pluginName ? sanitizeManagedPluginName(bundle.pluginName) : undefined
await ensureDir(paths.kiroDir)
// TODO(cleanup): Remove after v3 transition (circa Q3 2026)
@@ -100,6 +103,10 @@ export async function writeKiroBundle(outputRoot: string, bundle: KiroBundle): P
const merged = { ...existingConfig, mcpServers: { ...existingServers, ...bundle.mcpServers } }
await writeJson(mcpPath, merged)
}
if (pluginName) {
await cleanupKnownLegacyKiroArtifacts(paths, bundle)
}
}
function resolveKiroPaths(outputRoot: string) {
@@ -108,6 +115,7 @@ function resolveKiroPaths(outputRoot: string) {
if (base === ".kiro") {
return {
kiroDir: outputRoot,
managedDir: path.join(outputRoot, "compound-engineering"),
agentsDir: path.join(outputRoot, "agents"),
skillsDir: path.join(outputRoot, "skills"),
steeringDir: path.join(outputRoot, "steering"),
@@ -118,6 +126,7 @@ function resolveKiroPaths(outputRoot: string) {
const kiroDir = path.join(outputRoot, ".kiro")
return {
kiroDir,
managedDir: path.join(kiroDir, "compound-engineering"),
agentsDir: path.join(kiroDir, "agents"),
skillsDir: path.join(kiroDir, "skills"),
steeringDir: path.join(kiroDir, "steering"),
@@ -125,6 +134,26 @@ function resolveKiroPaths(outputRoot: string) {
}
}
async function cleanupKnownLegacyKiroArtifacts(
paths: ReturnType<typeof resolveKiroPaths>,
bundle: KiroBundle,
): Promise<void> {
const legacyArtifacts = getLegacyKiroArtifacts(bundle)
for (const skillName of legacyArtifacts.skills) {
await moveLegacyArtifactToBackup(paths.managedDir, "skills", paths.skillsDir, skillName, "Kiro skill")
}
for (const agentName of legacyArtifacts.agents) {
await moveLegacyArtifactToBackup(paths.managedDir, "agents", paths.agentsDir, `${agentName}.json`, "Kiro agent")
await moveLegacyArtifactToBackup(
paths.managedDir,
"agents",
path.join(paths.agentsDir, "prompts"),
`${agentName}.md`,
"Kiro agent prompt",
)
}
}
function validatePathSafe(name: string, label: string): void {
if (name.includes("..") || name.includes("/") || name.includes("\\")) {
throw new Error(`${label} name contains unsafe path characters: ${name}`)

View File

@@ -0,0 +1,212 @@
import fs from "fs/promises"
import path from "path"
import { ensureDir, isSafeManagedPath, pathExists, readText, sanitizePathName, writeJson } from "../utils/files"
const MANAGED_INSTALL_MANIFEST = "install-manifest.json"
const LEGACY_MANAGED_SEGMENT = "compound-engineering"
export type ManagedInstallManifest = {
version: 1
pluginName: string
groups: Record<string, string[]>
}
export function sanitizeManagedPluginName(name: string): string {
return sanitizePathName(name).replace(/[\\/]/g, "-")
}
/**
* Returns the directory segment used to namespace managed install artifacts
* (manifest, legacy-backup) under a target's root. When a sanitized plugin
* name is supplied, it is used verbatim so multiple plugins installed into
* the same target root keep independent manifests. When no plugin name is
* supplied (legacy callers / bundles without `pluginName`), the historical
* `compound-engineering` segment is returned to preserve pre-existing paths.
*/
export function resolveManagedSegment(pluginName?: string): string {
return pluginName ?? LEGACY_MANAGED_SEGMENT
}
/**
* Resolves the legacy shared managed directory that lived next to the
* current plugin-scoped directory before the per-plugin namespacing fix.
* `managedDir` is the plugin-scoped path (e.g. `<root>/coding-tutor`);
* the legacy sibling is `<root>/compound-engineering`. When `pluginName`
* is the historical `compound-engineering`, the legacy path and the
* current path are the same, so there is nothing to migrate -- this
* returns null in that case.
*/
export function resolveLegacyManagedDir(managedDir: string, pluginName: string): string | null {
if (pluginName === LEGACY_MANAGED_SEGMENT) return null
return path.join(path.dirname(managedDir), LEGACY_MANAGED_SEGMENT)
}
/**
* Reads the plugin-scoped install manifest, falling back to the legacy
* shared manifest at `<root>/compound-engineering/install-manifest.json`
* when the plugin-scoped one is missing. The legacy manifest is only
* returned when its recorded `pluginName` matches the current plugin --
* `readManagedInstallManifest` enforces that match, so a legacy manifest
* belonging to a different plugin is left untouched for that plugin's
* own next install to migrate.
*/
export async function readManagedInstallManifestWithLegacyFallback(
managedDir: string,
pluginName: string,
): Promise<ManagedInstallManifest | null> {
const current = await readManagedInstallManifest(managedDir, pluginName)
if (current) return current
const legacyDir = resolveLegacyManagedDir(managedDir, pluginName)
if (!legacyDir) return null
return readManagedInstallManifest(legacyDir, pluginName)
}
/**
* After a plugin-scoped manifest has been written, archive the legacy
* shared manifest if it belongs to the current plugin, so the legacy
* path doesn't keep shadowing or misleading a future install. The
* legacy file is renamed into a timestamped backup under the new
* plugin-scoped managed dir rather than deleted outright, for parity
* with the `legacy-backup/` archival done for removed artifacts.
*
* If the legacy manifest does not exist, or it exists but is owned by
* a different plugin, this is a no-op.
*/
export async function archiveLegacyInstallManifestIfOwned(
managedDir: string,
pluginName: string,
): Promise<void> {
const legacyDir = resolveLegacyManagedDir(managedDir, pluginName)
if (!legacyDir) return
const legacyManifestPath = path.join(legacyDir, MANAGED_INSTALL_MANIFEST)
if (!(await pathExists(legacyManifestPath))) return
// Only archive when the legacy manifest belongs to the current plugin;
// `readManagedInstallManifest` validates `pluginName` and returns null
// otherwise, so a null result means "not ours, leave it alone."
const owned = await readManagedInstallManifest(legacyDir, pluginName)
if (!owned) return
const timestamp = new Date().toISOString().replace(/[:.]/g, "-")
const backupPath = path.join(managedDir, "legacy-backup", timestamp, MANAGED_INSTALL_MANIFEST)
await ensureDir(path.dirname(backupPath))
await fs.rename(legacyManifestPath, backupPath)
console.warn(`Moved legacy install manifest to ${backupPath}`)
}
export async function readManagedInstallManifest(
managedDir: string,
pluginName: string,
): Promise<ManagedInstallManifest | null> {
const manifestPath = path.join(managedDir, MANAGED_INSTALL_MANIFEST)
try {
const raw = await readText(manifestPath)
const parsed = JSON.parse(raw) as Partial<ManagedInstallManifest>
if (
parsed.version === 1 &&
parsed.pluginName === pluginName &&
parsed.groups &&
typeof parsed.groups === "object" &&
!Array.isArray(parsed.groups) &&
Object.values(parsed.groups).every((entries) => Array.isArray(entries))
) {
// Filter manifest entries at read time: cleanup joins these strings
// into fs.rm paths, so a corrupted or tampered manifest with entries
// like `../../config.toml` could delete outside the managed root.
// We drop unsafe entries here (primary defense) and warn so operators
// see the corruption signal. Cleanup functions also re-check each
// entry (defense in depth).
const safeGroups: Record<string, string[]> = {}
for (const [group, entries] of Object.entries(parsed.groups)) {
const safe: string[] = []
for (const entry of entries as unknown[]) {
if (isSafeManagedPath(managedDir, entry)) {
safe.push(entry)
} else {
console.warn(
`Dropping unsafe install-manifest entry in ${manifestPath} (group "${group}"): ${JSON.stringify(entry)}`,
)
}
}
safeGroups[group] = safe
}
return { version: 1, pluginName, groups: safeGroups }
}
} catch (err) {
if ((err as NodeJS.ErrnoException).code !== "ENOENT") {
console.warn(`Ignoring unreadable install manifest at ${manifestPath}.`)
}
}
return null
}
export async function writeManagedInstallManifest(
managedDir: string,
manifest: ManagedInstallManifest,
): Promise<void> {
await writeJson(path.join(managedDir, MANAGED_INSTALL_MANIFEST), manifest)
}
export async function cleanupRemovedManagedDirectories(
rootDir: string,
manifest: ManagedInstallManifest | null,
group: string,
currentEntries: string[],
): Promise<void> {
if (!manifest) return
const current = new Set(currentEntries)
for (const relativePath of manifest.groups[group] ?? []) {
if (current.has(relativePath)) continue
// Defense in depth: `readManagedInstallManifest` already drops unsafe
// entries, but re-check here so any future caller that bypasses the
// read layer cannot trigger out-of-tree deletes.
if (!isSafeManagedPath(rootDir, relativePath)) continue
await fs.rm(resolveArtifactPath(rootDir, relativePath), { recursive: true, force: true })
}
}
export async function cleanupRemovedManagedFiles(
rootDir: string,
manifest: ManagedInstallManifest | null,
group: string,
currentEntries: string[],
): Promise<void> {
if (!manifest) return
const current = new Set(currentEntries)
for (const relativePath of manifest.groups[group] ?? []) {
if (current.has(relativePath)) continue
if (!isSafeManagedPath(rootDir, relativePath)) continue
await fs.rm(resolveArtifactPath(rootDir, relativePath), { force: true })
}
}
export async function cleanupCurrentManagedDirectory(
targetDir: string,
manifest: ManagedInstallManifest | null,
group: string,
entryName: string,
): Promise<void> {
if (!manifest?.groups[group]?.includes(entryName)) return
await fs.rm(targetDir, { recursive: true, force: true })
}
export async function moveLegacyArtifactToBackup(
managedDir: string,
kind: string,
artifactRoot: string,
relativePath: string,
label: string,
): Promise<void> {
const artifactPath = resolveArtifactPath(artifactRoot, relativePath)
if (!(await pathExists(artifactPath))) return
const timestamp = new Date().toISOString().replace(/[:.]/g, "-")
const backupPath = path.join(managedDir, "legacy-backup", timestamp, kind, ...relativePath.split("/"))
await ensureDir(path.dirname(backupPath))
await fs.rename(artifactPath, backupPath)
console.warn(`Moved legacy ${label} artifact to ${backupPath}`)
}
function resolveArtifactPath(rootDir: string, relativePath: string): string {
return path.join(rootDir, ...relativePath.split("/"))
}

View File

@@ -1,101 +0,0 @@
import path from "path"
import { promises as fs } from "fs"
import { backupFile, copyDir, ensureDir, pathExists, readJson, sanitizePathName, walkFiles, writeJson, writeText } from "../utils/files"
import type { OpenClawBundle } from "../types/openclaw"
import { cleanupStaleSkillDirs, cleanupStaleAgents } from "../utils/legacy-cleanup"
export async function writeOpenClawBundle(outputRoot: string, bundle: OpenClawBundle): Promise<void> {
const paths = resolveOpenClawPaths(outputRoot)
await ensureDir(paths.root)
// TODO(cleanup): Remove after v3 transition (circa Q3 2026)
await cleanupStaleSkillDirs(paths.skillsDir)
await cleanupStaleAgents(paths.skillsDir, null, "agent-") // agents are converted to agent-* skill dirs in OpenClaw
// Write openclaw.plugin.json
await writeJson(paths.manifestPath, bundle.manifest)
// Write package.json
await writeJson(paths.packageJsonPath, bundle.packageJson)
// Write index.ts entry point
await writeText(paths.entryPointPath, bundle.entryPoint)
// Write generated skills (agents + commands converted to SKILL.md)
for (const skill of bundle.skills) {
const skillDir = path.join(paths.skillsDir, sanitizePathName(skill.dir))
await ensureDir(skillDir)
await writeText(path.join(skillDir, "SKILL.md"), skill.content + "\n")
}
// Copy original skill directories (preserving references/, assets/, scripts/)
// and rewrite .claude/ paths to .openclaw/ in markdown files
for (const skill of bundle.skillDirCopies) {
const destDir = path.join(paths.skillsDir, sanitizePathName(skill.name))
await copyDir(skill.sourceDir, destDir)
await rewritePathsInDir(destDir)
}
// Write openclaw.json config fragment if MCP servers exist
if (bundle.openclawConfig) {
const configPath = path.join(paths.root, "openclaw.json")
const backupPath = await backupFile(configPath)
if (backupPath) {
console.log(`Backed up existing config to ${backupPath}`)
}
const merged = await mergeOpenClawConfig(configPath, bundle.openclawConfig)
await writeJson(configPath, merged)
}
}
function resolveOpenClawPaths(outputRoot: string) {
return {
root: outputRoot,
manifestPath: path.join(outputRoot, "openclaw.plugin.json"),
packageJsonPath: path.join(outputRoot, "package.json"),
entryPointPath: path.join(outputRoot, "index.ts"),
skillsDir: path.join(outputRoot, "skills"),
}
}
async function rewritePathsInDir(dir: string): Promise<void> {
const files = await walkFiles(dir)
for (const file of files) {
if (!file.endsWith(".md")) continue
const content = await fs.readFile(file, "utf8")
const rewritten = content
.replace(/~\/\.claude\//g, "~/.openclaw/")
.replace(/\.claude\//g, ".openclaw/")
.replace(/\.claude-plugin\//g, "openclaw-plugin/")
if (rewritten !== content) {
await fs.writeFile(file, rewritten, "utf8")
}
}
}
async function mergeOpenClawConfig(
configPath: string,
incoming: Record<string, unknown>,
): Promise<Record<string, unknown>> {
if (!(await pathExists(configPath))) return incoming
let existing: Record<string, unknown>
try {
existing = await readJson<Record<string, unknown>>(configPath)
} catch {
console.warn(
`Warning: existing ${configPath} is not valid JSON. Writing plugin config without merging.`,
)
return incoming
}
// Merge MCP servers: existing takes precedence on conflict
const incomingMcp = (incoming.mcpServers ?? {}) as Record<string, unknown>
const existingMcp = (existing.mcpServers ?? {}) as Record<string, unknown>
const mergedMcp = { ...incomingMcp, ...existingMcp }
return {
...existing,
mcpServers: Object.keys(mergedMcp).length > 0 ? mergedMcp : undefined,
}
}

View File

@@ -1,45 +1,48 @@
import path from "path"
import { backupFile, copySkillDir, ensureDir, pathExists, readJson, resolveCommandPath, sanitizePathName, writeJson, writeText } from "../utils/files"
import { backupFile, copySkillDir, ensureDir, pathExists, readJson, sanitizePathName, writeJson, writeText } from "../utils/files"
import { transformSkillContentForOpenCode } from "../converters/claude-to-opencode"
import type { OpenCodeBundle, OpenCodeConfig } from "../types/opencode"
import { cleanupStaleSkillDirs, cleanupStaleAgents } from "../utils/legacy-cleanup"
import { getLegacyOpenCodeArtifacts } from "../data/plugin-legacy-artifacts"
import {
archiveLegacyInstallManifestIfOwned,
cleanupCurrentManagedDirectory,
cleanupRemovedManagedDirectories,
cleanupRemovedManagedFiles,
moveLegacyArtifactToBackup,
readManagedInstallManifestWithLegacyFallback,
resolveManagedSegment,
sanitizeManagedPluginName,
writeManagedInstallManifest,
} from "./managed-artifacts"
// Merges plugin config into existing opencode.json. User keys win on conflict. See ADR-002.
async function mergeOpenCodeConfig(
configPath: string,
incoming: OpenCodeConfig,
): Promise<OpenCodeConfig> {
// If no existing config, write plugin config as-is
if (!(await pathExists(configPath))) return incoming
let existing: OpenCodeConfig
try {
existing = await readJson<OpenCodeConfig>(configPath)
} catch {
// Safety first per AGENTS.md -- do not destroy user data even if their config is malformed.
// Warn and fall back to plugin-only config rather than crashing.
console.warn(
`Warning: existing ${configPath} is not valid JSON. Writing plugin config without merging.`
)
return incoming
}
// User config wins on conflict -- see ADR-002
// MCP servers: add plugin entry, skip keys already in user config.
const mergedMcp = {
...(incoming.mcp ?? {}),
...(existing.mcp ?? {}), // existing takes precedence (overwrites same-named plugin entry)
...(existing.mcp ?? {}),
}
// Permission: add plugin entry, skip keys already in user config.
const mergedPermission = incoming.permission
? {
...(incoming.permission),
...(existing.permission ?? {}), // existing takes precedence
...(existing.permission ?? {}),
}
: existing.permission
// Tools: same pattern
const mergedTools = incoming.tools
? {
...(incoming.tools),
@@ -48,7 +51,7 @@ async function mergeOpenCodeConfig(
: existing.tools
return {
...existing, // all user keys preserved
...existing,
$schema: incoming.$schema ?? existing.$schema,
mcp: Object.keys(mergedMcp).length > 0 ? mergedMcp : undefined,
permission: mergedPermission,
@@ -56,9 +59,26 @@ async function mergeOpenCodeConfig(
}
}
export async function writeOpenCodeBundle(outputRoot: string, bundle: OpenCodeBundle): Promise<void> {
const openCodePaths = resolveOpenCodePaths(outputRoot)
export async function writeOpenCodeBundle(
outputRoot: string,
bundle: OpenCodeBundle,
scope?: string,
): Promise<void> {
const pluginName = bundle.pluginName ? sanitizeManagedPluginName(bundle.pluginName) : undefined
const openCodePaths = resolveOpenCodePaths(outputRoot, pluginName, scope)
const manifest = pluginName
? await readManagedInstallManifestWithLegacyFallback(openCodePaths.managedDir, pluginName)
: null
const currentAgents = bundle.agents.map((agent) => `${sanitizePathName(agent.name)}.md`)
const currentCommands = bundle.commandFiles.map((commandFile) => `${commandFile.name.split(":").join("/")}.md`)
const currentPlugins = bundle.plugins.map((plugin) => plugin.name)
const currentSkills = bundle.skillDirs.map((skill) => sanitizePathName(skill.name))
await ensureDir(openCodePaths.root)
await cleanupRemovedManagedFiles(openCodePaths.agentsDir, manifest, "agents", currentAgents)
await cleanupRemovedManagedFiles(openCodePaths.commandDir, manifest, "commands", currentCommands)
await cleanupRemovedManagedFiles(openCodePaths.pluginsDir, manifest, "plugins", currentPlugins)
await cleanupRemovedManagedDirectories(openCodePaths.skillsDir, manifest, "skills", currentSkills)
const hadExistingConfig = await pathExists(openCodePaths.configPath)
const backupPath = await backupFile(openCodePaths.configPath)
@@ -71,11 +91,6 @@ export async function writeOpenCodeBundle(outputRoot: string, bundle: OpenCodeBu
console.log("Merged plugin config into existing opencode.json (user settings preserved)")
}
// TODO(cleanup): Remove after v3 transition (circa Q3 2026)
await cleanupStaleSkillDirs(openCodePaths.skillsDir)
await cleanupStaleAgents(openCodePaths.agentsDir, ".md")
const agentsDir = openCodePaths.agentsDir
const seenAgents = new Set<string>()
for (const agent of bundle.agents) {
const safeName = sanitizePathName(agent.name)
@@ -84,11 +99,11 @@ export async function writeOpenCodeBundle(outputRoot: string, bundle: OpenCodeBu
continue
}
seenAgents.add(safeName)
await writeText(path.join(agentsDir, `${safeName}.md`), agent.content + "\n")
await writeText(path.join(openCodePaths.agentsDir, `${safeName}.md`), agent.content + "\n")
}
for (const commandFile of bundle.commandFiles) {
const dest = await resolveCommandPath(openCodePaths.commandDir, commandFile.name, ".md")
const dest = path.join(openCodePaths.commandDir, ...commandFile.name.split(":")) + ".md"
const cmdBackupPath = await backupFile(dest)
if (cmdBackupPath) {
console.log(`Backed up existing command file to ${cmdBackupPath}`)
@@ -97,49 +112,87 @@ export async function writeOpenCodeBundle(outputRoot: string, bundle: OpenCodeBu
}
if (bundle.plugins.length > 0) {
const pluginsDir = openCodePaths.pluginsDir
for (const plugin of bundle.plugins) {
await writeText(path.join(pluginsDir, plugin.name), plugin.content + "\n")
await writeText(path.join(openCodePaths.pluginsDir, plugin.name), plugin.content + "\n")
}
}
if (bundle.skillDirs.length > 0) {
const skillsRoot = openCodePaths.skillsDir
for (const skill of bundle.skillDirs) {
const skillName = sanitizePathName(skill.name)
const targetDir = path.join(openCodePaths.skillsDir, skillName)
await cleanupCurrentManagedDirectory(targetDir, manifest, "skills", skillName)
await copySkillDir(
skill.sourceDir,
path.join(skillsRoot, sanitizePathName(skill.name)),
targetDir,
transformSkillContentForOpenCode,
true, // transform all .md files — FQ agent names appear in references too
true,
)
}
}
if (pluginName) {
await writeManagedInstallManifest(openCodePaths.managedDir, {
version: 1,
pluginName,
groups: {
agents: currentAgents,
commands: currentCommands,
plugins: currentPlugins,
skills: currentSkills,
},
})
await archiveLegacyInstallManifestIfOwned(openCodePaths.managedDir, pluginName)
await cleanupKnownLegacyOpenCodeArtifacts(openCodePaths, bundle)
}
}
function resolveOpenCodePaths(outputRoot: string) {
function resolveOpenCodePaths(outputRoot: string, pluginName?: string, scope?: string) {
// Namespace the managed install directory per plugin so multiple plugins
// installed into the same OpenCode root do not share (and overwrite) each
// other's install manifests. `resolveManagedSegment` falls back to the
// legacy "compound-engineering" segment when no plugin name is supplied.
const managedSegment = resolveManagedSegment(pluginName)
const base = path.basename(outputRoot)
// Global install: ~/.config/opencode (basename is "opencode")
// Project install: .opencode (basename is ".opencode")
if (base === "opencode" || base === ".opencode") {
// Global layout: explicit scope="global" (from OPENCODE_CONFIG_DIR or the XDG
// default), or a basename that matches OpenCode's conventional roots.
// Project layout: nested under ".opencode/".
const isGlobal = scope === "global" || base === "opencode" || base === ".opencode"
if (isGlobal) {
return {
root: outputRoot,
managedDir: path.join(outputRoot, managedSegment),
configPath: path.join(outputRoot, "opencode.json"),
agentsDir: path.join(outputRoot, "agents"),
pluginsDir: path.join(outputRoot, "plugins"),
skillsDir: path.join(outputRoot, "skills"),
// .md command files; alternative to the command key in opencode.json
commandDir: path.join(outputRoot, "commands"),
}
}
// Custom output directory - nest under .opencode subdirectory
return {
root: outputRoot,
managedDir: path.join(outputRoot, ".opencode", managedSegment),
configPath: path.join(outputRoot, "opencode.json"),
agentsDir: path.join(outputRoot, ".opencode", "agents"),
pluginsDir: path.join(outputRoot, ".opencode", "plugins"),
skillsDir: path.join(outputRoot, ".opencode", "skills"),
// .md command files; alternative to the command key in opencode.json
commandDir: path.join(outputRoot, ".opencode", "commands"),
}
}
async function cleanupKnownLegacyOpenCodeArtifacts(
paths: ReturnType<typeof resolveOpenCodePaths>,
bundle: OpenCodeBundle,
): Promise<void> {
const legacyArtifacts = getLegacyOpenCodeArtifacts(bundle)
for (const skillName of legacyArtifacts.skills) {
await moveLegacyArtifactToBackup(paths.managedDir, "skills", paths.skillsDir, skillName, "OpenCode skill")
}
for (const commandPath of legacyArtifacts.commands) {
await moveLegacyArtifactToBackup(paths.managedDir, "commands", paths.commandDir, commandPath, "OpenCode command")
}
for (const agentPath of legacyArtifacts.agents) {
await moveLegacyArtifactToBackup(paths.managedDir, "agents", paths.agentsDir, agentPath, "OpenCode agent")
}
}

View File

@@ -1,8 +1,10 @@
import fs from "fs/promises"
import path from "path"
import {
backupFile,
copySkillDir,
ensureDir,
isSafeManagedPath,
pathExists,
readText,
sanitizePathName,
@@ -11,10 +13,13 @@ import {
} from "../utils/files"
import { transformContentForPi } from "../converters/claude-to-pi"
import type { PiBundle } from "../types/pi"
import { cleanupStaleSkillDirs, cleanupStaleAgents } from "../utils/legacy-cleanup"
import { getLegacyPiArtifacts } from "../data/plugin-legacy-artifacts"
import { cleanupStaleAgents } from "../utils/legacy-cleanup"
import { resolveLegacyManagedDir, resolveManagedSegment } from "./managed-artifacts"
const PI_AGENTS_BLOCK_START = "<!-- BEGIN COMPOUND PI TOOL MAP -->"
const PI_AGENTS_BLOCK_END = "<!-- END COMPOUND PI TOOL MAP -->"
const PI_INSTALL_MANIFEST = "install-manifest.json"
const PI_AGENTS_BLOCK_BODY = `## Compound Engineering (Pi compatibility)
@@ -28,27 +33,61 @@ Compatibility notes:
- MCPorter config path: .pi/compound-engineering/mcporter.json (project) or ~/.pi/agent/compound-engineering/mcporter.json (global)
`
export type PiInstallManifest = {
version: 1
pluginName: string
skills: string[]
prompts: string[]
extensions: string[]
}
type PiPaths = {
managedDir: string
skillsDir: string
promptsDir: string
extensionsDir: string
mcporterConfigPath: string
agentsPath: string
}
export async function writePiBundle(outputRoot: string, bundle: PiBundle): Promise<void> {
const paths = resolvePiPaths(outputRoot)
const pluginName = bundle.pluginName ? sanitizeCodexPathComponent(bundle.pluginName) : undefined
const paths = resolvePiPaths(outputRoot, pluginName)
const manifest = pluginName
? await readInstallManifestWithLegacyFallback(paths, pluginName)
: null
const currentPrompts = bundle.prompts.map((prompt) => `${sanitizePathName(prompt.name)}.md`)
const currentSkills = [
...bundle.skillDirs.map((skill) => sanitizePathName(skill.name)),
...bundle.generatedSkills.map((skill) => sanitizePathName(skill.name)),
]
const currentExtensions = bundle.extensions.map((extension) => extension.name)
await ensureDir(paths.skillsDir)
await ensureDir(paths.promptsDir)
await ensureDir(paths.extensionsDir)
// TODO(cleanup): Remove after v3 transition (circa Q3 2026)
await cleanupStaleSkillDirs(paths.skillsDir)
await cleanupStaleAgents(paths.skillsDir, null)
await cleanupRemovedPrompts(paths.promptsDir, manifest, currentPrompts)
await cleanupRemovedSkills(paths.skillsDir, manifest, currentSkills)
await cleanupRemovedExtensions(paths.extensionsDir, manifest, currentExtensions)
for (const prompt of bundle.prompts) {
await writeText(path.join(paths.promptsDir, `${sanitizePathName(prompt.name)}.md`), prompt.content + "\n")
}
for (const skill of bundle.skillDirs) {
await copySkillDir(skill.sourceDir, path.join(paths.skillsDir, sanitizePathName(skill.name)), transformContentForPi)
const skillName = sanitizePathName(skill.name)
const targetDir = path.join(paths.skillsDir, skillName)
await cleanupCurrentManagedSkillDir(targetDir, manifest, skillName)
await copySkillDir(skill.sourceDir, targetDir, transformContentForPi)
}
for (const skill of bundle.generatedSkills) {
await writeText(path.join(paths.skillsDir, sanitizePathName(skill.name), "SKILL.md"), skill.content + "\n")
const skillName = sanitizePathName(skill.name)
const targetDir = path.join(paths.skillsDir, skillName)
await cleanupCurrentManagedSkillDir(targetDir, manifest, skillName)
await writeText(path.join(targetDir, "SKILL.md"), skill.content + "\n")
}
for (const extension of bundle.extensions) {
@@ -64,39 +103,56 @@ export async function writePiBundle(outputRoot: string, bundle: PiBundle): Promi
}
await ensurePiAgentsBlock(paths.agentsPath)
if (pluginName) {
await writeInstallManifest(paths.managedDir, {
version: 1,
pluginName,
skills: currentSkills,
prompts: currentPrompts,
extensions: currentExtensions,
})
await archiveLegacyInstallManifestIfOwned(paths.managedDir, pluginName)
await cleanupKnownLegacyPiArtifacts(paths, bundle)
}
}
function resolvePiPaths(outputRoot: string) {
function resolvePiPaths(outputRoot: string, pluginName?: string): PiPaths {
// Namespace the managed install directory per plugin so multiple plugins
// installed into the same Pi root do not share (and overwrite) each other's
// install manifests. `resolveManagedSegment` falls back to the legacy
// "compound-engineering" segment when no plugin name is supplied.
const managedSegment = resolveManagedSegment(pluginName)
const base = path.basename(outputRoot)
// Global install root: ~/.pi/agent
if (base === "agent") {
return {
managedDir: path.join(outputRoot, managedSegment),
skillsDir: path.join(outputRoot, "skills"),
promptsDir: path.join(outputRoot, "prompts"),
extensionsDir: path.join(outputRoot, "extensions"),
mcporterConfigPath: path.join(outputRoot, "compound-engineering", "mcporter.json"),
mcporterConfigPath: path.join(outputRoot, managedSegment, "mcporter.json"),
agentsPath: path.join(outputRoot, "AGENTS.md"),
}
}
// Project local .pi directory
if (base === ".pi") {
return {
managedDir: path.join(outputRoot, managedSegment),
skillsDir: path.join(outputRoot, "skills"),
promptsDir: path.join(outputRoot, "prompts"),
extensionsDir: path.join(outputRoot, "extensions"),
mcporterConfigPath: path.join(outputRoot, "compound-engineering", "mcporter.json"),
mcporterConfigPath: path.join(outputRoot, managedSegment, "mcporter.json"),
agentsPath: path.join(outputRoot, "AGENTS.md"),
}
}
// Custom output root -> nest under .pi
return {
managedDir: path.join(outputRoot, ".pi", managedSegment),
skillsDir: path.join(outputRoot, ".pi", "skills"),
promptsDir: path.join(outputRoot, ".pi", "prompts"),
extensionsDir: path.join(outputRoot, ".pi", "extensions"),
mcporterConfigPath: path.join(outputRoot, ".pi", "compound-engineering", "mcporter.json"),
mcporterConfigPath: path.join(outputRoot, ".pi", managedSegment, "mcporter.json"),
agentsPath: path.join(outputRoot, "AGENTS.md"),
}
}
@@ -136,3 +192,209 @@ function upsertBlock(existing: string, block: string): string {
return existing.trimEnd() + "\n\n" + block + "\n"
}
function sanitizeCodexPathComponent(name: string): string {
return sanitizePathName(name).replace(/[\\/]/g, "-")
}
export async function readPiInstallManifest(
managedDir: string,
pluginName: string,
paths?: PiPaths,
): Promise<PiInstallManifest | null> {
return readInstallManifest(managedDir, pluginName, paths)
}
async function readInstallManifestWithLegacyFallback(
paths: PiPaths,
pluginName: string,
): Promise<PiInstallManifest | null> {
const current = await readInstallManifest(paths.managedDir, pluginName, paths)
if (current) return current
const legacyDir = resolveLegacyManagedDir(paths.managedDir, pluginName)
if (!legacyDir) return null
return readInstallManifest(legacyDir, pluginName, paths)
}
/**
* After the plugin-scoped Pi manifest is written, archive the legacy
* shared Pi manifest if it belongs to the current plugin so the legacy
* path doesn't keep shadowing a future install. No-op when the legacy
* manifest is missing or owned by a different plugin (that plugin's
* own next install will migrate it).
*/
async function archiveLegacyInstallManifestIfOwned(
managedDir: string,
pluginName: string,
): Promise<void> {
const legacyDir = resolveLegacyManagedDir(managedDir, pluginName)
if (!legacyDir) return
const legacyManifestPath = path.join(legacyDir, PI_INSTALL_MANIFEST)
if (!(await pathExists(legacyManifestPath))) return
const owned = await readInstallManifest(legacyDir, pluginName)
if (!owned) return
const timestamp = new Date().toISOString().replace(/[:.]/g, "-")
const backupPath = path.join(managedDir, "legacy-backup", timestamp, PI_INSTALL_MANIFEST)
await ensureDir(path.dirname(backupPath))
await fs.rename(legacyManifestPath, backupPath)
console.warn(`Moved legacy Pi install manifest to ${backupPath}`)
}
async function readInstallManifest(
managedDir: string,
pluginName: string,
paths?: PiPaths,
): Promise<PiInstallManifest | null> {
const manifestPath = path.join(managedDir, PI_INSTALL_MANIFEST)
try {
const raw = await readText(manifestPath)
const parsed = JSON.parse(raw) as Partial<PiInstallManifest>
if (
parsed.version === 1 &&
parsed.pluginName === pluginName &&
Array.isArray(parsed.skills) &&
Array.isArray(parsed.prompts) &&
Array.isArray(parsed.extensions)
) {
// Filter manifest entries at read time. Cleanup functions join these
// strings into `fs.rm` paths against the Pi skills/prompts/extensions
// directories, so a tampered or corrupted `install-manifest.json` with
// entries like `../../config.toml` or `/etc/passwd` would otherwise
// delete outside the Pi managed tree. Validate each group against the
// specific cleanup root it will be joined with; fall back to
// `managedDir` when no `PiPaths` context is supplied (e.g. an
// ownership-only read), which still rejects absolute paths and `..`
// segments and provides containment against *some* root.
const skillsRoot = paths?.skillsDir ?? managedDir
const promptsRoot = paths?.promptsDir ?? managedDir
const extensionsRoot = paths?.extensionsDir ?? managedDir
return {
version: 1,
pluginName,
skills: filterSafePiManifestEntries(parsed.skills, skillsRoot, manifestPath, "skills"),
prompts: filterSafePiManifestEntries(parsed.prompts, promptsRoot, manifestPath, "prompts"),
extensions: filterSafePiManifestEntries(parsed.extensions, extensionsRoot, manifestPath, "extensions"),
}
}
} catch (err) {
if ((err as NodeJS.ErrnoException).code !== "ENOENT") {
console.warn(`Ignoring unreadable Pi install manifest at ${manifestPath}.`)
}
}
return null
}
function filterSafePiManifestEntries(
entries: unknown[],
rootDir: string,
manifestPath: string,
group: string,
): string[] {
const safe: string[] = []
for (const entry of entries) {
if (isSafeManagedPath(rootDir, entry)) {
safe.push(entry)
} else {
console.warn(
`Dropping unsafe Pi install-manifest entry in ${manifestPath} (group "${group}"): ${JSON.stringify(entry)}`,
)
}
}
return safe
}
async function writeInstallManifest(managedDir: string, manifest: PiInstallManifest): Promise<void> {
await writeJson(path.join(managedDir, PI_INSTALL_MANIFEST), manifest)
}
async function cleanupRemovedSkills(
skillsDir: string,
manifest: PiInstallManifest | null,
currentSkills: string[],
): Promise<void> {
if (!manifest) return
const current = new Set(currentSkills)
for (const skillName of manifest.skills) {
if (current.has(skillName)) continue
// Defense in depth: `readInstallManifest` already drops unsafe entries,
// but re-check before any out-of-tree fs.rm can be issued from a future
// caller that bypasses the read layer.
if (!isSafeManagedPath(skillsDir, skillName)) continue
await fs.rm(path.join(skillsDir, skillName), { recursive: true, force: true })
}
}
async function cleanupRemovedPrompts(
promptsDir: string,
manifest: PiInstallManifest | null,
currentPrompts: string[],
): Promise<void> {
if (!manifest) return
const current = new Set(currentPrompts)
for (const promptFile of manifest.prompts) {
if (current.has(promptFile)) continue
if (!isSafeManagedPath(promptsDir, promptFile)) continue
await fs.rm(path.join(promptsDir, promptFile), { force: true })
}
}
async function cleanupRemovedExtensions(
extensionsDir: string,
manifest: PiInstallManifest | null,
currentExtensions: string[],
): Promise<void> {
if (!manifest) return
const current = new Set(currentExtensions)
for (const extensionFile of manifest.extensions) {
if (current.has(extensionFile)) continue
if (!isSafeManagedPath(extensionsDir, extensionFile)) continue
await fs.rm(path.join(extensionsDir, extensionFile), { force: true })
}
}
async function cleanupCurrentManagedSkillDir(
targetDir: string,
manifest: PiInstallManifest | null,
skillName: string,
): Promise<void> {
if (!manifest?.skills.includes(skillName)) return
await fs.rm(targetDir, { recursive: true, force: true })
}
async function cleanupKnownLegacyPiArtifacts(paths: PiPaths, bundle: PiBundle): Promise<void> {
const pluginName = bundle.pluginName
if (!pluginName) return
const legacyArtifacts = getLegacyPiArtifacts(bundle)
for (const skillName of legacyArtifacts.skills) {
const legacySkillPath = path.join(paths.skillsDir, skillName)
await moveLegacyArtifactToBackup(paths.managedDir, "skills", legacySkillPath)
}
for (const promptFile of legacyArtifacts.prompts) {
const legacyPromptPath = path.join(paths.promptsDir, promptFile)
await moveLegacyArtifactToBackup(paths.managedDir, "prompts", legacyPromptPath)
}
}
async function moveLegacyArtifactToBackup(
managedDir: string,
kind: "skills" | "prompts",
artifactPath: string,
): Promise<void> {
if (!(await pathExists(artifactPath))) return
const timestamp = new Date().toISOString().replace(/[:.]/g, "-")
const backupDir = path.join(managedDir, "legacy-backup", timestamp, kind)
const backupPath = path.join(backupDir, path.basename(artifactPath))
await ensureDir(backupDir)
await fs.rename(artifactPath, backupPath)
console.warn(`Moved legacy Pi ${kind.slice(0, -1)} artifact to ${backupPath}`)
}
export {
cleanupRemovedSkills as cleanupRemovedPiSkills,
cleanupRemovedPrompts as cleanupRemovedPiPrompts,
cleanupRemovedExtensions as cleanupRemovedPiExtensions,
}

View File

@@ -1,134 +0,0 @@
import path from "path"
import { backupFile, copyDir, ensureDir, readJson, resolveCommandPath, sanitizePathName, pathExists, writeJsonSecure, writeText } from "../utils/files"
import type { QwenBundle, QwenExtensionConfig } from "../types/qwen"
import { cleanupStaleSkillDirs, cleanupStaleAgents } from "../utils/legacy-cleanup"
export async function writeQwenBundle(outputRoot: string, bundle: QwenBundle): Promise<void> {
const qwenPaths = resolveQwenPaths(outputRoot)
await ensureDir(qwenPaths.root)
// Merge qwen-extension.json config, preserving existing user MCP servers
const configPath = qwenPaths.configPath
const backupPath = await backupFile(configPath)
if (backupPath) {
console.log(`Backed up existing config to ${backupPath}`)
}
const merged = await mergeQwenConfig(configPath, bundle.config)
await writeJsonSecure(configPath, merged)
// Write context file (QWEN.md)
if (bundle.contextFile) {
await writeText(qwenPaths.contextPath, bundle.contextFile + "\n")
}
// TODO(cleanup): Remove after v3 transition (circa Q3 2026)
await cleanupStaleSkillDirs(qwenPaths.skillsDir)
// Write agents
const agentsDir = qwenPaths.agentsDir
await ensureDir(agentsDir)
await cleanupStaleAgents(agentsDir, ".yaml")
await cleanupStaleAgents(agentsDir, ".md")
for (const agent of bundle.agents) {
const ext = agent.format === "yaml" ? "yaml" : "md"
await writeText(path.join(agentsDir, `${sanitizePathName(agent.name)}.${ext}`), agent.content + "\n")
}
// Write commands
const commandsDir = qwenPaths.commandsDir
await ensureDir(commandsDir)
for (const commandFile of bundle.commandFiles) {
const dest = await resolveCommandPath(commandsDir, commandFile.name, ".md")
await writeText(dest, commandFile.content + "\n")
}
// Copy skills
if (bundle.skillDirs.length > 0) {
const skillsRoot = qwenPaths.skillsDir
await ensureDir(skillsRoot)
for (const skill of bundle.skillDirs) {
await copyDir(skill.sourceDir, path.join(skillsRoot, sanitizePathName(skill.name)))
}
}
}
const MANAGED_KEY = "_compound_managed_mcp"
const MANAGED_KEYS_KEY = "_compound_managed_keys"
const TRACKING_KEYS = new Set([MANAGED_KEY, MANAGED_KEYS_KEY])
async function mergeQwenConfig(
configPath: string,
incoming: QwenExtensionConfig,
): Promise<QwenExtensionConfig> {
let existing: Record<string, unknown> = {}
if (await pathExists(configPath)) {
try {
const parsed = await readJson<unknown>(configPath)
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
existing = parsed as Record<string, unknown>
}
} catch {
// Unparseable file — proceed with incoming only
}
}
const existingMcp = (typeof existing.mcpServers === "object" && existing.mcpServers !== null && !Array.isArray(existing.mcpServers))
? { ...(existing.mcpServers as Record<string, unknown>) }
: {}
// Remove previously-managed plugin servers that are no longer in the bundle.
// Legacy migration: if no tracking key exists AND plugin has servers, assume all
// existing servers are plugin-managed (the old writer overwrote the entire file).
// When incoming is empty, skip pruning — there's nothing to migrate and we'd
// wrongly delete user servers from a pre-existing untracked config.
const incomingMcp = incoming.mcpServers ?? {}
const hasTrackingKey = Array.isArray(existing[MANAGED_KEY])
const prevManaged = hasTrackingKey
? existing[MANAGED_KEY] as string[]
: Object.keys(incomingMcp).length > 0 ? Object.keys(existingMcp) : []
for (const name of prevManaged) {
if (!(name in incomingMcp)) {
delete existingMcp[name]
}
}
const mergedMcp = { ...existingMcp, ...incomingMcp }
const { mcpServers: _, ...incomingRest } = incoming
const incomingTopKeys = Object.keys(incomingRest).filter((k) => !TRACKING_KEYS.has(k))
// Prune top-level keys from previous installs that are no longer in the incoming bundle.
// Only prune keys we previously tracked; skip on first install (no tracking key yet).
const prevManagedKeys = Array.isArray(existing[MANAGED_KEYS_KEY])
? existing[MANAGED_KEYS_KEY] as string[]
: []
for (const key of prevManagedKeys) {
if (!incomingTopKeys.includes(key) && key in existing) {
delete existing[key]
}
}
const merged = { ...existing, ...incomingRest } as QwenExtensionConfig & Record<string, unknown>
if (Object.keys(mergedMcp).length > 0) {
merged.mcpServers = mergedMcp as QwenExtensionConfig["mcpServers"]
} else {
delete merged.mcpServers
}
// Always write tracking keys (even as []) so future installs know what to prune.
merged[MANAGED_KEY] = Object.keys(incomingMcp)
merged[MANAGED_KEYS_KEY] = incomingTopKeys
return merged as QwenExtensionConfig
}
function resolveQwenPaths(outputRoot: string) {
return {
root: outputRoot,
configPath: path.join(outputRoot, "qwen-extension.json"),
contextPath: path.join(outputRoot, "QWEN.md"),
agentsDir: path.join(outputRoot, "agents"),
commandsDir: path.join(outputRoot, "commands"),
skillsDir: path.join(outputRoot, "skills"),
}
}

View File

@@ -1,114 +0,0 @@
import path from "path"
import { backupFile, copySkillDir, ensureDir, pathExists, readJson, sanitizePathName, writeJsonSecure, writeText } from "../utils/files"
import { formatFrontmatter } from "../utils/frontmatter"
import { transformContentForWindsurf } from "../converters/claude-to-windsurf"
import type { WindsurfBundle } from "../types/windsurf"
import type { TargetScope } from "./index"
import { cleanupStaleSkillDirs, cleanupStaleAgents } from "../utils/legacy-cleanup"
/**
* Write a WindsurfBundle directly into outputRoot.
*
* Unlike other target writers, this writer expects outputRoot to be the final
* resolved directory — the CLI handles scope-based nesting (global vs workspace).
*/
export async function writeWindsurfBundle(outputRoot: string, bundle: WindsurfBundle, scope?: TargetScope): Promise<void> {
await ensureDir(outputRoot)
// TODO(cleanup): Remove after v3 transition (circa Q3 2026)
const skillsDir = path.join(outputRoot, "skills")
await cleanupStaleSkillDirs(skillsDir)
await cleanupStaleAgents(skillsDir, null) // agents are written as skill dirs in Windsurf
// Write agent skills (before pass-through copies so pass-through takes precedence on collision)
if (bundle.agentSkills.length > 0) {
const skillsDir = path.join(outputRoot, "skills")
await ensureDir(skillsDir)
for (const skill of bundle.agentSkills) {
validatePathSafe(skill.name, "agent skill")
const destDir = path.join(skillsDir, sanitizePathName(skill.name))
const resolvedDest = path.resolve(destDir)
if (!resolvedDest.startsWith(path.resolve(skillsDir))) {
console.warn(`Warning: Agent skill name "${skill.name}" escapes skills/. Skipping.`)
continue
}
await ensureDir(destDir)
await writeText(path.join(destDir, "SKILL.md"), skill.content)
}
}
// Write command workflows (flat in global_workflows/ for global scope, workflows/ for workspace)
if (bundle.commandWorkflows.length > 0) {
const workflowsDirName = scope === "global" ? "global_workflows" : "workflows"
const workflowsDir = path.join(outputRoot, workflowsDirName)
await ensureDir(workflowsDir)
for (const workflow of bundle.commandWorkflows) {
validatePathSafe(workflow.name, "command workflow")
const content = formatWorkflowContent(workflow.name, workflow.description, workflow.body)
await writeText(path.join(workflowsDir, `${workflow.name}.md`), content)
}
}
// Copy pass-through skill directories (after generated skills so copies overwrite on collision)
if (bundle.skillDirs.length > 0) {
const skillsDir = path.join(outputRoot, "skills")
await ensureDir(skillsDir)
for (const skill of bundle.skillDirs) {
validatePathSafe(skill.name, "skill directory")
const destDir = path.join(skillsDir, sanitizePathName(skill.name))
const resolvedDest = path.resolve(destDir)
if (!resolvedDest.startsWith(path.resolve(skillsDir))) {
console.warn(`Warning: Skill name "${skill.name}" escapes skills/. Skipping.`)
continue
}
const knownAgentNames = bundle.agentSkills.map((s) => s.name)
await copySkillDir(skill.sourceDir, destDir, (content) =>
transformContentForWindsurf(content, knownAgentNames),
)
}
}
// Merge MCP config
if (bundle.mcpConfig) {
const mcpPath = path.join(outputRoot, "mcp_config.json")
const backupPath = await backupFile(mcpPath)
if (backupPath) {
console.log(`Backed up existing mcp_config.json to ${backupPath}`)
}
let existingConfig: Record<string, unknown> = {}
if (await pathExists(mcpPath)) {
try {
const parsed = await readJson<unknown>(mcpPath)
if (parsed && typeof parsed === "object" && !Array.isArray(parsed)) {
existingConfig = parsed as Record<string, unknown>
}
} catch {
console.warn("Warning: existing mcp_config.json could not be parsed and will be replaced.")
}
}
const existingServers =
existingConfig.mcpServers &&
typeof existingConfig.mcpServers === "object" &&
!Array.isArray(existingConfig.mcpServers)
? (existingConfig.mcpServers as Record<string, unknown>)
: {}
const merged = { ...existingConfig, mcpServers: { ...existingServers, ...bundle.mcpConfig.mcpServers } }
await writeJsonSecure(mcpPath, merged)
}
}
function validatePathSafe(name: string, label: string): void {
if (name.includes("..") || name.includes("/") || name.includes("\\")) {
throw new Error(`${label} name contains unsafe path characters: ${name}`)
}
}
function formatWorkflowContent(name: string, description: string, body: string): string {
return formatFrontmatter({ description }, `# ${name}\n\n${body}`) + "\n"
}

View File

@@ -22,10 +22,19 @@ export type CodexGeneratedSkillSidecarDir = {
targetName: string
}
export type CodexAgent = {
name: string
description: string
instructions: string
sidecarDirs?: CodexGeneratedSkillSidecarDir[]
}
export type CodexBundle = {
pluginName?: string
prompts: CodexPrompt[]
skillDirs: CodexSkillDir[]
generatedSkills: CodexGeneratedSkill[]
agents?: CodexAgent[]
invocationTargets?: CodexInvocationTargets
mcpServers?: Record<string, ClaudeMcpServer>
}

View File

@@ -24,6 +24,7 @@ export type CopilotMcpServer = {
}
export type CopilotBundle = {
pluginName?: string
agents: CopilotAgent[]
generatedSkills: CopilotGeneratedSkill[]
skillDirs: CopilotSkillDir[]

View File

@@ -14,6 +14,7 @@ export type DroidSkillDir = {
}
export type DroidBundle = {
pluginName?: string
commands: DroidCommandFile[]
droids: DroidAgentFile[]
skillDirs: DroidSkillDir[]

View File

@@ -13,6 +13,11 @@ export type GeminiCommand = {
content: string // Full TOML content
}
export type GeminiAgent = {
name: string
content: string // Full agent Markdown file with YAML frontmatter
}
export type GeminiMcpServer = {
command?: string
args?: string[]
@@ -22,8 +27,10 @@ export type GeminiMcpServer = {
}
export type GeminiBundle = {
generatedSkills: GeminiSkill[] // From agents
pluginName?: string
generatedSkills: GeminiSkill[] // Target-specific generated skills, if any
skillDirs: GeminiSkillDir[] // From skills (pass-through)
agents?: GeminiAgent[] // From Claude agents
commands: GeminiCommand[]
mcpServers?: Record<string, GeminiMcpServer>
}

View File

@@ -38,6 +38,7 @@ export type KiroMcpServer = {
}
export type KiroBundle = {
pluginName?: string
agents: KiroAgent[]
generatedSkills: KiroSkill[]
skillDirs: KiroSkillDir[]

View File

@@ -1,54 +0,0 @@
export type OpenClawPluginManifest = {
id: string
name: string
kind: "tool"
configSchema: OpenClawConfigSchema
uiHints?: Record<string, OpenClawUiHint>
skills?: string[]
}
export type OpenClawConfigSchema = {
type: "object"
properties: Record<string, OpenClawConfigProperty>
additionalProperties?: boolean
required?: string[]
}
export type OpenClawConfigProperty = {
type: string
description?: string
default?: unknown
}
export type OpenClawUiHint = {
label: string
sensitive?: boolean
placeholder?: string
}
export type OpenClawSkillFile = {
name: string
content: string
/** Subdirectory path inside skills/ (e.g. "agent-native-reviewer") */
dir: string
}
export type OpenClawCommandRegistration = {
name: string
description: string
acceptsArgs: boolean
/** The prompt body that becomes the command handler response */
body: string
}
export type OpenClawBundle = {
manifest: OpenClawPluginManifest
packageJson: Record<string, unknown>
entryPoint: string
skills: OpenClawSkillFile[]
/** Skill directories to copy verbatim (original Claude skills with references/) */
skillDirCopies: { sourceDir: string; name: string }[]
commands: OpenClawCommandRegistration[]
/** openclaw.json fragment for MCP servers */
openclawConfig?: Record<string, unknown>
}

View File

@@ -4,19 +4,37 @@ export type OpenCodeConfig = {
$schema?: string
model?: string
default_agent?: string
/** @deprecated OpenCode v1.1.1+ uses permission as the canonical control surface. */
tools?: Record<string, boolean>
permission?: Record<string, OpenCodePermission | Record<string, OpenCodePermission>>
agent?: Record<string, OpenCodeAgentConfig>
mcp?: Record<string, OpenCodeMcpServer>
skills?: OpenCodeSkillsConfig
}
export type OpenCodeAgentConfig = {
description?: string
mode?: "primary" | "subagent"
mode?: "primary" | "subagent" | "all"
model?: string
variant?: string
temperature?: number
top_p?: number
prompt?: string
disable?: boolean
hidden?: boolean
color?: string
steps?: number
/** @deprecated Use steps instead. */
maxSteps?: number
options?: Record<string, unknown>
/** @deprecated OpenCode v1.1.1+ uses permission as the canonical control surface. */
tools?: Record<string, boolean>
permission?: Record<string, OpenCodePermission>
permission?: Record<string, OpenCodePermission | Record<string, OpenCodePermission>>
}
export type OpenCodeSkillsConfig = {
paths?: string[]
urls?: string[]
}
export type OpenCodeMcpServer = {
@@ -44,6 +62,7 @@ export type OpenCodeCommandFile = {
}
export type OpenCodeBundle = {
pluginName?: string
config: OpenCodeConfig
agents: OpenCodeAgentFile[]
// Commands are written as individual .md files, not in opencode.json. See ADR-001.

View File

@@ -32,6 +32,7 @@ export type PiMcporterConfig = {
}
export type PiBundle = {
pluginName?: string
prompts: PiPrompt[]
skillDirs: PiSkillDir[]
generatedSkills: PiGeneratedSkill[]

View File

@@ -1,51 +0,0 @@
export type QwenExtensionConfig = {
name: string
version: string
mcpServers?: Record<string, QwenMcpServer>
contextFileName?: string
commands?: string
skills?: string
agents?: string
settings?: QwenSetting[]
}
export type QwenMcpServer = {
command?: string
args?: string[]
env?: Record<string, string>
cwd?: string
httpUrl?: string
url?: string
headers?: Record<string, string>
}
export type QwenSetting = {
name: string
description: string
envVar: string
sensitive?: boolean
}
export type QwenAgentFile = {
name: string
content: string
format: "yaml" | "markdown"
}
export type QwenSkillDir = {
sourceDir: string
name: string
}
export type QwenCommandFile = {
name: string
content: string
}
export type QwenBundle = {
config: QwenExtensionConfig
agents: QwenAgentFile[]
commandFiles: QwenCommandFile[]
skillDirs: QwenSkillDir[]
contextFile?: string
}

View File

@@ -1,35 +0,0 @@
export type WindsurfWorkflow = {
name: string
description: string
body: string
}
export type WindsurfGeneratedSkill = {
name: string
content: string
}
export type WindsurfSkillDir = {
name: string
sourceDir: string
}
export type WindsurfMcpServerEntry = {
command?: string
args?: string[]
env?: Record<string, string>
serverUrl?: string
url?: string
headers?: Record<string, string>
}
export type WindsurfMcpConfig = {
mcpServers: Record<string, WindsurfMcpServerEntry>
}
export type WindsurfBundle = {
agentSkills: WindsurfGeneratedSkill[]
commandWorkflows: WindsurfWorkflow[]
skillDirs: WindsurfSkillDir[]
mcpConfig: WindsurfMcpConfig | null
}

View File

@@ -1,6 +1,7 @@
export type CodexInvocationTargets = {
promptTargets: Record<string, string>
skillTargets: Record<string, string>
agentTargets?: Record<string, string>
}
export type CodexTransformOptions = {
@@ -27,20 +28,34 @@ export function transformContentForCodex(
let result = body
const promptTargets = targets?.promptTargets ?? {}
const skillTargets = targets?.skillTargets ?? {}
const agentTargets = targets?.agentTargets ?? {}
const unknownSlashBehavior = options.unknownSlashBehavior ?? "prompt"
const taskPattern = /^(\s*-?\s*)Task\s+([a-z][a-z0-9:-]*)\(([^)]*)\)/gm
result = result.replace(taskPattern, (_match, prefix: string, agentName: string, args: string) => {
const agentTarget = resolveAgentTarget(agentName, agentTargets)
const trimmedArgs = args.trim()
if (agentTarget) {
return trimmedArgs
? `${prefix}Spawn the custom agent \`${agentTarget}\` with task: ${trimmedArgs}`
: `${prefix}Spawn the custom agent \`${agentTarget}\``
}
// For namespaced calls like "compound-engineering:research:repo-research-analyst",
// use only the final segment as the skill name.
// use only the final segment as the skill name when no custom agent target exists.
const finalSegment = agentName.includes(":") ? agentName.split(":").pop()! : agentName
const skillName = normalizeCodexName(finalSegment)
const trimmedArgs = args.trim()
return trimmedArgs
? `${prefix}Use the $${skillName} skill to: ${trimmedArgs}`
: `${prefix}Use the $${skillName} skill`
})
const backtickedAgentPattern = /`([a-z][a-z0-9-]*(?::[a-z][a-z0-9-]*){1,2})`/gi
result = result.replace(backtickedAgentPattern, (match, agentName: string) => {
const agentTarget = resolveAgentTarget(agentName, agentTargets)
return agentTarget ? `custom agent \`${agentTarget}\`` : match
})
const slashCommandPattern = /(?<![:\w>}\]\)])\/([a-z][a-z0-9_:-]*?)(?=[\s,."')\]}`]|$)/gi
result = result.replace(slashCommandPattern, (match, commandName: string) => {
if (commandName.includes("/")) return match
@@ -65,6 +80,8 @@ export function transformContentForCodex(
const agentRefPattern = /@([a-z][a-z0-9-]*-(?:agent|reviewer|researcher|analyst|specialist|oracle|sentinel|guardian|strategist))/gi
result = result.replace(agentRefPattern, (_match, agentName: string) => {
const agentTarget = resolveAgentTarget(agentName, agentTargets)
if (agentTarget) return `custom agent \`${agentTarget}\``
const skillName = normalizeCodexName(agentName)
return `$${skillName} skill`
})
@@ -72,6 +89,21 @@ export function transformContentForCodex(
return result
}
function resolveAgentTarget(value: string, agentTargets: Record<string, string>): string | null {
const parts = value.split(":").filter(Boolean)
const candidates = [
normalizeCodexName(value),
parts.length >= 2 ? normalizeCodexName(parts.slice(-2).join(":")) : "",
parts.length >= 1 ? normalizeCodexName(parts[parts.length - 1]) : "",
].filter(Boolean)
for (const candidate of candidates) {
const target = agentTargets[candidate]
if (target) return target
}
return null
}
export function normalizeCodexName(value: string): string {
const trimmed = value.trim()
if (!trimmed) return "item"

View File

@@ -1,6 +1,7 @@
import os from "os"
import path from "path"
import { pathExists } from "./files"
import { syncTargets } from "../sync/registry"
import { resolveOpenCodeGlobalRoot } from "./opencode-config"
export type DetectedTool = {
name: string
@@ -8,12 +9,78 @@ export type DetectedTool = {
reason: string
}
type DetectableTool = {
name: string
detectPaths: (home: string, cwd: string) => string[]
}
const detectableTools: DetectableTool[] = [
{
name: "opencode",
detectPaths: (home, cwd) => {
// Resolve the OpenCode global root through the shared helper so that
// detection agrees with install/cleanup on `OPENCODE_CONFIG_DIR`. When
// the env var is unset, the helper falls back to `os.homedir()`, which
// may differ from the `home` arg threaded through for testability; in
// that case prefer the explicit `home` param so existing callers that
// override it keep working.
const envDir = process.env.OPENCODE_CONFIG_DIR?.trim()
const globalRoot = envDir
? resolveOpenCodeGlobalRoot()
: path.join(home, ".config", "opencode")
return [globalRoot, path.join(cwd, ".opencode")]
},
},
{
name: "codex",
detectPaths: (home) => [path.join(home, ".codex")],
},
{
name: "pi",
detectPaths: (home) => [path.join(home, ".pi")],
},
{
name: "droid",
detectPaths: (home) => [path.join(home, ".factory")],
},
{
name: "copilot",
detectPaths: (home, cwd) => [
path.join(home, ".copilot"),
path.join(cwd, ".github", "skills"),
path.join(cwd, ".github", "agents"),
path.join(cwd, ".github", "copilot-instructions.md"),
],
},
{
name: "gemini",
detectPaths: (home, cwd) => [
path.join(cwd, ".gemini"),
path.join(home, ".gemini"),
],
},
{
name: "kiro",
detectPaths: (home, cwd) => [
path.join(home, ".kiro"),
path.join(cwd, ".kiro"),
],
},
{
name: "qwen",
detectPaths: (home, cwd) => [
path.join(home, ".qwen"),
path.join(cwd, ".qwen"),
],
},
]
export async function detectInstalledTools(
home: string = os.homedir(),
cwd: string = process.cwd(),
): Promise<DetectedTool[]> {
const results: DetectedTool[] = []
for (const target of syncTargets) {
for (const target of detectableTools) {
let detected = false
let reason = "not found"
for (const p of target.detectPaths(home, cwd)) {

View File

@@ -85,6 +85,43 @@ export function sanitizePathName(name: string): string {
return name.replace(/:/g, "-")
}
/**
* Validate that a manifest-supplied relative path is safe to join against a
* managed root before deleting or moving anything at that location.
*
* Install manifests (`install-manifest.json`) are read back from disk during
* reinstall/cleanup and fed into `fs.rm`/`fs.rename`. An attacker or a
* corrupted file could include entries like `../../config.toml` or
* `/etc/passwd` that would cause the cleanup to operate outside the intended
* managed tree. This helper rejects:
*
* - non-string values
* - empty strings
* - absolute paths (POSIX `/foo`, Windows `C:\foo`)
* - any `..` path segment (including `foo/../bar`)
* - paths that, when joined with `rootDir`, resolve outside `rootDir`
*
* The `rootDir` check is defense-in-depth against edge cases the first two
* checks miss (e.g. platform-specific separators or encoded traversal the
* split-based check didn't catch).
*/
export function isSafeManagedPath(rootDir: string, candidate: unknown): candidate is string {
if (typeof candidate !== "string" || candidate.length === 0) return false
if (path.isAbsolute(candidate)) return false
// Reject any traversal segment (`..`) split on either separator so the
// check is uniform across platforms.
const segments = candidate.split(/[\\/]/)
if (segments.some((segment) => segment === "..")) return false
// Final containment check: the fully-resolved candidate must stay inside
// the resolved root. This catches anything the above two checks missed.
const resolvedRoot = path.resolve(rootDir)
const resolvedCandidate = path.resolve(resolvedRoot, candidate)
if (resolvedCandidate !== resolvedRoot && !resolvedCandidate.startsWith(resolvedRoot + path.sep)) {
return false
}
return true
}
/**
* Resolve a colon-separated command name into a filesystem path.
* e.g. resolveCommandPath("/commands", "ce:plan", ".md") -> "/commands/ce/plan.md"

View File

@@ -1,5 +1,5 @@
import path from "path"
import { pathExists, readJson, writeJsonSecure } from "../utils/files"
import { pathExists, readJson, writeJsonSecure } from "./files"
type JsonObject = Record<string, unknown>
@@ -19,7 +19,7 @@ export async function mergeJsonConfigAtKey(options: {
...existing,
[key]: {
...existingEntries,
...incoming, // incoming plugin entries overwrite same-named servers
...incoming,
},
}

View File

@@ -20,9 +20,30 @@ import { parseFrontmatter } from "./frontmatter"
/** Old skill directory names that no longer exist after the v3 rename. */
const STALE_SKILL_DIRS = [
// ce: -> ce- (dirs were already hyphenated by sanitizePathName, so these
// only collide if the old name was exactly the same after sanitization —
// which it was for all 8 workflow skills. No orphans from this group.)
// ce: -> ce-. Some targets sanitized these to ce-*; others left raw colon
// directories on filesystems that permit them.
"ce:brainstorm",
"ce:compound",
"ce:compound-refresh",
"ce:ideate",
"ce:plan",
"ce:plan-beta",
"ce:review",
"ce:review-beta",
"ce:work",
"ce:work-beta",
// workflows:* -> ce-*.
"workflows:brainstorm",
"workflows:compound",
"workflows:plan",
"workflows:review",
"workflows:work",
"workflows-brainstorm",
"workflows-compound",
"workflows-plan",
"workflows-review",
"workflows-work",
// git-* -> ce-*
"git-commit",
@@ -62,6 +83,8 @@ const STALE_SKILL_DIRS = [
// ce-review -> ce-code-review, ce-document-review -> ce-doc-review
"ce-review",
"ce-document-review",
"ce-plan-beta",
"ce-review-beta",
]
/** Old agent names (used as generated skill dirs or flat .md files). */
@@ -225,6 +248,14 @@ const LEGACY_ONLY_SKILL_DESCRIPTIONS: Record<string, string> = {
"This skill should be used when orchestrating multi-agent swarms using Claude Code's TeammateTool and Task system. It applies when coordinating multiple agents, running parallel code reviews, creating pipeline workflows with dependencies, building self-organizing task queues, or any task benefiting from divide-and-conquer patterns.",
"reproduce-bug":
"Systematically reproduce and investigate a bug from a GitHub issue. Use when the user provides a GitHub issue number or URL for a bug they want reproduced or investigated.",
"ce:plan-beta":
"[BETA] Transform feature descriptions or requirements into structured implementation plans grounded in repo patterns and research. Use when the user says 'plan this', 'create a plan', 'write a tech plan', 'plan the implementation', 'how should we build', 'what's the approach for', 'break this down', or when a brainstorm/requirements document is ready for technical planning. Best when requirements are at least roughly defined; for exploratory or ambiguous requests, prefer ce:brainstorm first.",
"ce-plan-beta":
"[BETA] Transform feature descriptions or requirements into structured implementation plans grounded in repo patterns and research. Use when the user says 'plan this', 'create a plan', 'write a tech plan', 'plan the implementation', 'how should we build', 'what's the approach for', 'break this down', or when a brainstorm/requirements document is ready for technical planning. Best when requirements are at least roughly defined; for exploratory or ambiguous requests, prefer ce:brainstorm first.",
"ce:review-beta":
"[BETA] Structured code review using tiered persona agents, confidence-gated findings, and a merge/dedup pipeline. Use when reviewing code changes before creating a PR.",
"ce-review-beta":
"[BETA] Structured code review using tiered persona agents, confidence-gated findings, and a merge/dedup pipeline. Use when reviewing code changes before creating a PR.",
}
/**
@@ -248,6 +279,19 @@ type LegacyFingerprints = {
let legacyFingerprintsPromise: Promise<LegacyFingerprints> | null = null
function currentSkillNameForLegacy(legacyName: string): string {
if (legacyName === "ce:review" || legacyName === "workflows:review" || legacyName === "workflows-review") {
return "ce-code-review"
}
if (legacyName.startsWith("ce:")) {
return legacyName.replace(/^ce:/, "ce-")
}
if (legacyName.startsWith("workflows:")) {
return `ce-${legacyName.slice("workflows:".length)}`
}
if (legacyName.startsWith("workflows-")) {
return `ce-${legacyName.slice("workflows-".length)}`
}
switch (legacyName) {
case "git-commit":
return "ce-commit"

View File

@@ -48,8 +48,8 @@ export function addProviderPrefix(model: string): string {
}
/**
* Normalize a model for targets that use provider-prefixed IDs
* (OpenCode, OpenClaw). Resolves bare aliases and adds provider prefix.
* Normalize a model for targets that use provider-prefixed IDs.
* Resolves bare aliases and adds provider prefix.
*
* "sonnet" -> "anthropic/claude-sonnet-4-6"
* "claude-sonnet-4-20250514" -> "anthropic/claude-sonnet-4-20250514"
@@ -66,4 +66,3 @@ export function normalizeModelWithProvider(model: string): string {
}
return addProviderPrefix(resolved)
}

View File

@@ -0,0 +1,25 @@
import os from "os"
import path from "path"
import { expandHome } from "./resolve-home"
/**
* Resolve the OpenCode global-config root.
*
* Order of precedence:
* 1. `OPENCODE_CONFIG_DIR` environment variable (NixOS, Docker, non-default
* `XDG_CONFIG_HOME` setups).
* 2. `~/.config/opencode` (XDG default).
*
* See: https://opencode.ai/docs/config/
*
* Both `install` and `cleanup` must agree on this resolution so that an
* install at `OPENCODE_CONFIG_DIR=/custom/path` is later cleaned at the same
* location.
*/
export function resolveOpenCodeGlobalRoot(): string {
const envDir = process.env.OPENCODE_CONFIG_DIR?.trim()
if (envDir) {
return path.resolve(expandHome(envDir))
}
return path.join(os.homedir(), ".config", "opencode")
}

View File

@@ -1,50 +1,49 @@
import os from "os"
import path from "path"
import type { TargetScope } from "../targets"
import { resolveOpenCodeGlobalRoot } from "./opencode-config"
export function resolveTargetOutputRoot(options: {
targetName: string
outputRoot: string
codexHome: string
piHome: string
openclawHome?: string
qwenHome?: string
pluginName?: string
hasExplicitOutput: boolean
scope?: TargetScope
}): string {
const { targetName, outputRoot, codexHome, piHome, openclawHome, qwenHome, pluginName, hasExplicitOutput, scope } = options
const { targetName, outputRoot, codexHome, piHome, hasExplicitOutput } = options
if (targetName === "codex") return codexHome
if (targetName === "pi") return piHome
if (targetName === "droid") return path.join(os.homedir(), ".factory")
if (targetName === "cursor") {
const base = hasExplicitOutput ? outputRoot : process.cwd()
return path.join(base, ".cursor")
}
if (targetName === "gemini") {
const base = hasExplicitOutput ? outputRoot : process.cwd()
return path.join(base, ".gemini")
}
if (targetName === "copilot") {
const base = hasExplicitOutput ? outputRoot : process.cwd()
return path.join(base, ".github")
}
if (targetName === "kiro") {
const base = hasExplicitOutput ? outputRoot : process.cwd()
return path.join(base, ".kiro")
}
if (targetName === "windsurf") {
if (hasExplicitOutput) return outputRoot
if (scope === "global") return path.join(os.homedir(), ".codeium", "windsurf")
return path.join(process.cwd(), ".windsurf")
}
if (targetName === "openclaw") {
const home = openclawHome ?? path.join(os.homedir(), ".openclaw", "extensions")
return path.join(home, pluginName ?? "plugin")
}
if (targetName === "qwen") {
const home = qwenHome ?? path.join(os.homedir(), ".qwen", "extensions")
return path.join(home, pluginName ?? "plugin")
if (targetName === "opencode") {
// Without an explicit --output, default to the OpenCode global-config root
// (OPENCODE_CONFIG_DIR or ~/.config/opencode). With an explicit --output,
// honor it as a workspace root and let the writer nest under .opencode/.
if (!hasExplicitOutput) return resolveOpenCodeGlobalRoot()
return outputRoot
}
return outputRoot
}
/**
* Returns "global" when the OpenCode writer should use the flat global-config
* layout (no `.opencode/` nesting). This is the case when the user did not
* pass `--output` and did not pass an explicit `--scope`. Returns the
* caller's requested scope otherwise so explicit `--scope workspace` still
* wins.
*/
export function resolveOpenCodeWriteScope(
hasExplicitOutput: boolean,
requestedScope: TargetScope | undefined,
): TargetScope | undefined {
if (requestedScope !== undefined) return requestedScope
if (!hasExplicitOutput) return "global"
return undefined
}

View File

@@ -1,82 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import os from "os"
import path from "path"
import { loadClaudeHome } from "../src/parsers/claude-home"
describe("loadClaudeHome", () => {
test("loads personal skills, commands, and MCP servers", async () => {
const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "claude-home-"))
const skillDir = path.join(tempHome, "skills", "reviewer")
const commandsDir = path.join(tempHome, "commands")
await fs.mkdir(skillDir, { recursive: true })
await fs.writeFile(path.join(skillDir, "SKILL.md"), "---\nname: reviewer\n---\nReview things.\n")
await fs.mkdir(path.join(commandsDir, "workflows"), { recursive: true })
await fs.writeFile(
path.join(commandsDir, "workflows", "plan.md"),
"---\ndescription: Planning command\nargument-hint: \"[feature]\"\n---\nPlan the work.\n",
)
await fs.writeFile(
path.join(commandsDir, "custom.md"),
"---\nname: custom-command\ndescription: Custom command\nallowed-tools: Bash, Read\n---\nDo custom work.\n",
)
await fs.writeFile(
path.join(tempHome, "settings.json"),
JSON.stringify({
mcpServers: {
context7: { url: "https://mcp.context7.com/mcp" },
},
}),
)
const config = await loadClaudeHome(tempHome)
expect(config.skills.map((skill) => skill.name)).toEqual(["reviewer"])
expect(config.commands?.map((command) => command.name)).toEqual([
"custom-command",
"workflows:plan",
])
expect(config.commands?.find((command) => command.name === "workflows:plan")?.argumentHint).toBe("[feature]")
expect(config.commands?.find((command) => command.name === "custom-command")?.allowedTools).toEqual(["Bash", "Read"])
expect(config.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp")
})
test("keeps personal skill directory names stable even when frontmatter name differs", async () => {
const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "claude-home-skill-name-"))
const skillDir = path.join(tempHome, "skills", "reviewer")
await fs.mkdir(skillDir, { recursive: true })
await fs.writeFile(
path.join(skillDir, "SKILL.md"),
"---\nname: ce-plan\ndescription: Reviewer skill\nargument-hint: \"[topic]\"\n---\nReview things.\n",
)
const config = await loadClaudeHome(tempHome)
expect(config.skills).toHaveLength(1)
expect(config.skills[0]?.name).toBe("reviewer")
expect(config.skills[0]?.description).toBe("Reviewer skill")
expect(config.skills[0]?.argumentHint).toBe("[topic]")
})
test("keeps personal skills when frontmatter is malformed", async () => {
const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "claude-home-skill-yaml-"))
const skillDir = path.join(tempHome, "skills", "reviewer")
await fs.mkdir(skillDir, { recursive: true })
await fs.writeFile(
path.join(skillDir, "SKILL.md"),
"---\nname: ce-plan\nfoo: [unterminated\n---\nReview things.\n",
)
const config = await loadClaudeHome(tempHome)
expect(config.skills).toHaveLength(1)
expect(config.skills[0]?.name).toBe("reviewer")
expect(config.skills[0]?.description).toBeUndefined()
expect(config.skills[0]?.argumentHint).toBeUndefined()
})
})

View File

@@ -6,6 +6,7 @@ import { loadClaudePlugin } from "../src/parsers/claude"
import { filterSkillsByPlatform } from "../src/types/claude"
const fixtureRoot = path.join(import.meta.dir, "fixtures", "sample-plugin")
const compoundPluginRoot = path.join(import.meta.dir, "..", "plugins", "compound-engineering")
const mcpFixtureRoot = path.join(import.meta.dir, "fixtures", "mcp-file")
const customPathsRoot = path.join(import.meta.dir, "fixtures", "custom-paths")
const invalidCommandPathRoot = path.join(import.meta.dir, "fixtures", "invalid-command-path")
@@ -32,6 +33,14 @@ async function makeMinimalPluginRoot(): Promise<string> {
}
describe("loadClaudePlugin", () => {
test("current compound-engineering plugin ships skills and agents but no source commands", async () => {
const plugin = await loadClaudePlugin(compoundPluginRoot)
expect(plugin.commands).toHaveLength(0)
expect(plugin.skills.length).toBeGreaterThan(0)
expect(plugin.agents.length).toBeGreaterThan(0)
})
test("loads manifest, agents, commands, skills, hooks", async () => {
const plugin = await loadClaudePlugin(fixtureRoot)

File diff suppressed because it is too large Load Diff

View File

@@ -46,7 +46,7 @@ const fixturePlugin: ClaudePlugin = {
}
describe("convertClaudeToCodex", () => {
test("converts commands to prompts and agents to skills", () => {
test("converts commands to prompts and agents to custom agents", () => {
const bundle = convertClaudeToCodex(fixturePlugin, {
agentMode: "subagent",
inferTemperature: false,
@@ -64,7 +64,8 @@ describe("convertClaudeToCodex", () => {
expect(parsedPrompt.body).toContain("Plan the work.")
expect(bundle.skillDirs[0]?.name).toBe("existing-skill")
expect(bundle.generatedSkills).toHaveLength(2)
expect(bundle.generatedSkills).toHaveLength(1)
expect(bundle.agents).toHaveLength(1)
const commandSkill = bundle.generatedSkills.find((skill) => skill.name === "workflows-plan")
expect(commandSkill).toBeDefined()
@@ -73,16 +74,14 @@ describe("convertClaudeToCodex", () => {
expect(parsedCommandSkill.data.description).toBe("Planning command")
expect(parsedCommandSkill.body).toContain("Allowed tools")
const agentSkill = bundle.generatedSkills.find((skill) => skill.name === "security-reviewer")
expect(agentSkill).toBeDefined()
const parsedSkill = parseFrontmatter(agentSkill!.content)
expect(parsedSkill.data.name).toBe("security-reviewer")
expect(parsedSkill.data.description).toBe("Security-focused agent")
expect(parsedSkill.body).toContain("Capabilities")
expect(parsedSkill.body).toContain("Threat modeling")
const agent = bundle.agents.find((item) => item.name === "security-reviewer")
expect(agent).toBeDefined()
expect(agent!.description).toBe("Security-focused agent")
expect(agent!.instructions).toContain("Capabilities")
expect(agent!.instructions).toContain("Threat modeling")
})
test("drops model field (Codex skill frontmatter does not support model)", () => {
test("drops model field from Codex custom agents", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
agents: [
@@ -104,8 +103,9 @@ describe("convertClaudeToCodex", () => {
permissions: "none",
})
const skill = bundle.generatedSkills.find((s) => s.name === "fast-agent")
expect(parseFrontmatter(skill!.content).data.model).toBeUndefined()
const agent = bundle.agents.find((s) => s.name === "fast-agent")
expect(agent).toBeDefined()
expect("model" in agent!).toBe(false)
})
test("copies workflow skills as regular skills and omits workflows aliases", () => {
@@ -190,7 +190,7 @@ describe("convertClaudeToCodex", () => {
expect(bundle.mcpServers?.local?.args).toEqual(["hello"])
})
test("transforms Task agent calls to skill references", () => {
test("transforms known Task agent calls to custom agent spawns", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
commands: [
@@ -208,7 +208,26 @@ Task best-practices-researcher(topic)`,
sourcePath: "/tmp/plugin/commands/plan.md",
},
],
agents: [],
agents: [
{
name: "repo-research-analyst",
description: "Repo research",
body: "Research repositories.",
sourcePath: "/tmp/plugin/agents/repo-research-analyst.md",
},
{
name: "learnings-researcher",
description: "Learning research",
body: "Search learnings.",
sourcePath: "/tmp/plugin/agents/learnings-researcher.md",
},
{
name: "best-practices-researcher",
description: "Best practices",
body: "Search best practices.",
sourcePath: "/tmp/plugin/agents/best-practices-researcher.md",
},
],
skills: [],
}
@@ -222,17 +241,16 @@ Task best-practices-researcher(topic)`,
expect(commandSkill).toBeDefined()
const parsed = parseFrontmatter(commandSkill!.content)
// Task calls should be transformed to skill references
expect(parsed.body).toContain("Use the $repo-research-analyst skill to: feature_description")
expect(parsed.body).toContain("Use the $learnings-researcher skill to: feature_description")
expect(parsed.body).toContain("Use the $best-practices-researcher skill to: topic")
expect(parsed.body).toContain("Spawn the custom agent `repo-research-analyst` with task: feature_description")
expect(parsed.body).toContain("Spawn the custom agent `learnings-researcher` with task: feature_description")
expect(parsed.body).toContain("Spawn the custom agent `best-practices-researcher` with task: topic")
// Original Task syntax should not remain
expect(parsed.body).not.toContain("Task repo-research-analyst")
expect(parsed.body).not.toContain("Task learnings-researcher")
})
test("transforms namespaced Task agent calls to skill references using final segment", () => {
test("transforms namespaced Task agent calls to category-qualified custom agents", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
commands: [
@@ -241,16 +259,35 @@ Task best-practices-researcher(topic)`,
description: "Planning with namespaced agents",
body: `Run these agents in parallel:
- Task compound-engineering:research:repo-research-analyst(feature_description)
- Task compound-engineering:research:learnings-researcher(feature_description)
- Task compound-engineering:research:ce-repo-research-analyst(feature_description)
- Task compound-engineering:research:ce-learnings-researcher(feature_description)
Then consolidate findings.
Task compound-engineering:review:security-reviewer(code_diff)`,
Task compound-engineering:review:ce-security-reviewer(code_diff)`,
sourcePath: "/tmp/plugin/commands/plan.md",
},
],
agents: [],
agents: [
{
name: "ce-repo-research-analyst",
description: "Repo research",
body: "Research repositories.",
sourcePath: "/tmp/plugin/agents/research/ce-repo-research-analyst.agent.md",
},
{
name: "ce-learnings-researcher",
description: "Learning research",
body: "Search learnings.",
sourcePath: "/tmp/plugin/agents/research/ce-learnings-researcher.agent.md",
},
{
name: "ce-security-reviewer",
description: "Security review",
body: "Review security.",
sourcePath: "/tmp/plugin/agents/review/ce-security-reviewer.agent.md",
},
],
skills: [],
}
@@ -264,10 +301,9 @@ Task compound-engineering:review:security-reviewer(code_diff)`,
expect(commandSkill).toBeDefined()
const parsed = parseFrontmatter(commandSkill!.content)
// Namespaced Task calls should use only the final segment as the skill name
expect(parsed.body).toContain("Use the $repo-research-analyst skill to: feature_description")
expect(parsed.body).toContain("Use the $learnings-researcher skill to: feature_description")
expect(parsed.body).toContain("Use the $security-reviewer skill to: code_diff")
expect(parsed.body).toContain("Spawn the custom agent `research-ce-repo-research-analyst` with task: feature_description")
expect(parsed.body).toContain("Spawn the custom agent `research-ce-learnings-researcher` with task: feature_description")
expect(parsed.body).toContain("Spawn the custom agent `review-ce-security-reviewer` with task: code_diff")
// Original namespaced Task syntax should not remain
expect(parsed.body).not.toContain("Task compound-engineering:")
@@ -284,7 +320,14 @@ Task compound-engineering:review:security-reviewer(code_diff)`,
sourcePath: "/tmp/plugin/commands/review.md",
},
],
agents: [],
agents: [
{
name: "ce-code-simplicity-reviewer",
description: "Simplicity review",
body: "Review simplicity.",
sourcePath: "/tmp/plugin/agents/review/ce-code-simplicity-reviewer.agent.md",
},
],
skills: [],
}
@@ -297,7 +340,7 @@ Task compound-engineering:review:security-reviewer(code_diff)`,
const commandSkill = bundle.generatedSkills.find((s) => s.name === "review")
expect(commandSkill).toBeDefined()
const parsed = parseFrontmatter(commandSkill!.content)
expect(parsed.body).toContain("Use the $code-simplicity-reviewer skill")
expect(parsed.body).toContain("Spawn the custom agent `review-ce-code-simplicity-reviewer`")
expect(parsed.body).not.toContain("compound-engineering:")
expect(parsed.body).not.toContain("skill to:")
})
@@ -372,15 +415,14 @@ Don't confuse with file paths like /tmp/output.md or /dev/null.`,
permissions: "none",
})
const agentSkill = bundle.generatedSkills.find((s) => s.name === "session-historian")
expect(agentSkill).toBeDefined()
expect(agentSkill!.sidecarDirs).toEqual([
const agent = bundle.agents.find((s) => s.name === "research-session-historian")
expect(agent).toBeDefined()
expect(agent!.sidecarDirs).toEqual([
{ sourceDir: scriptDir, targetName: "session-history-scripts" },
])
const parsed = parseFrontmatter(agentSkill!.content)
expect(parsed.body).toContain("<script-dir>/discover-sessions.sh")
expect(parsed.body).not.toContain("<script-dir>/prompts:discover-sessions.sh")
expect(agent!.instructions).toContain("<script-dir>/discover-sessions.sh")
expect(agent!.instructions).not.toContain("<script-dir>/prompts:discover-sessions.sh")
})
test("transforms workflow skill slash commands to Codex skill references", () => {
@@ -509,7 +551,7 @@ Run \`/compound-engineering-setup\` to create a settings file.`,
expect(parsed.body).toContain("compound-engineering.local.md")
})
test("rewrites .claude/ paths in agent skill bodies", () => {
test("preserves tool-agnostic paths in Codex custom agent instructions", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
commands: [],
@@ -530,15 +572,12 @@ Run \`/compound-engineering-setup\` to create a settings file.`,
permissions: "none",
})
const agentSkill = bundle.generatedSkills.find((s) => s.name === "config-reader")
expect(agentSkill).toBeDefined()
const parsed = parseFrontmatter(agentSkill!.content)
// Tool-agnostic path in project root — no rewriting needed
expect(parsed.body).toContain("compound-engineering.local.md")
const agent = bundle.agents.find((s) => s.name === "config-reader")
expect(agent).toBeDefined()
expect(agent!.instructions).toContain("compound-engineering.local.md")
})
test("truncates generated skill descriptions to Codex limits and single line", () => {
test("truncates custom agent descriptions to Codex limits and single line", () => {
const longDescription = `Line one\nLine two ${"a".repeat(2000)}`
const plugin: ClaudePlugin = {
...fixturePlugin,
@@ -560,9 +599,7 @@ Run \`/compound-engineering-setup\` to create a settings file.`,
permissions: "none",
})
const generated = bundle.generatedSkills[0]
const parsed = parseFrontmatter(generated.content)
const description = String(parsed.data.description ?? "")
const description = bundle.agents[0].description
expect(description.length).toBeLessThanOrEqual(1024)
expect(description).not.toContain("\n")
expect(description.endsWith("...")).toBe(true)

View File

@@ -4,6 +4,8 @@ import path from "path"
import os from "os"
import { mergeCodexConfig, renderCodexConfig, writeCodexBundle } from "../src/targets/codex"
import type { CodexBundle } from "../src/types/codex"
import { loadClaudePlugin } from "../src/parsers/claude"
import { convertClaudeToCodex } from "../src/converters/claude-to-codex"
async function exists(filePath: string): Promise<boolean> {
try {
@@ -14,6 +16,15 @@ async function exists(filePath: string): Promise<boolean> {
}
}
async function entryExists(filePath: string): Promise<boolean> {
try {
await fs.lstat(filePath)
return true
} catch {
return false
}
}
describe("writeCodexBundle", () => {
test("writes prompts, skills, and config", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "codex-test-"))
@@ -26,6 +37,13 @@ describe("writeCodexBundle", () => {
},
],
generatedSkills: [{ name: "agent-skill", content: "Skill content" }],
agents: [
{
name: "research-ce-repo-research-analyst",
description: "Repo research",
instructions: "Research the repository.",
},
],
mcpServers: {
local: { command: "echo", args: ["hello"], env: { KEY: "VALUE" } },
remote: {
@@ -40,6 +58,11 @@ describe("writeCodexBundle", () => {
expect(await exists(path.join(tempRoot, ".codex", "prompts", "command-one.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".codex", "skills", "skill-one", "SKILL.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".codex", "skills", "agent-skill", "SKILL.md"))).toBe(true)
const agentPath = path.join(tempRoot, ".codex", "agents", "research-ce-repo-research-analyst.toml")
expect(await exists(agentPath)).toBe(true)
const agentToml = await fs.readFile(agentPath, "utf8")
expect(agentToml).toContain('name = "research-ce-repo-research-analyst"')
expect(agentToml).toContain('developer_instructions = "Research the repository."')
const configPath = path.join(tempRoot, ".codex", "config.toml")
expect(await exists(configPath)).toBe(true)
@@ -56,6 +79,38 @@ describe("writeCodexBundle", () => {
expect(config).toContain("http_headers")
})
test("throws when two agents sanitize to the same Codex filename", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "codex-agent-collision-"))
const bundle: CodexBundle = {
prompts: [],
skillDirs: [],
generatedSkills: [],
agents: [
{
name: "research:ce-learnings-researcher",
description: "First",
instructions: "First agent body.",
},
{
name: "research-ce-learnings-researcher",
description: "Second",
instructions: "Second agent body.",
},
],
}
await expect(writeCodexBundle(tempRoot, bundle)).rejects.toThrow(
/Codex agent filename collision/,
)
// Verify neither agent was silently dropped: the first agent should not have
// been written before the collision was detected (guard runs before writes).
const agentsRoot = path.join(tempRoot, ".codex", "agents")
expect(
await exists(path.join(agentsRoot, "research-ce-learnings-researcher.toml")),
).toBe(false)
})
test("writes directly into a .codex output root", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "codex-home-"))
const codexRoot = path.join(tempRoot, ".codex")
@@ -123,6 +178,182 @@ describe("writeCodexBundle", () => {
expect(await exists(path.join(promptsDir, "ce-plan.md"))).toBe(true)
})
test("writes plugin skills under a namespaced Codex skills root without .agents symlinks", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "codex-managed-plugin-"))
const codexRoot = path.join(tempRoot, ".codex")
const bundle: CodexBundle = {
pluginName: "compound-engineering",
prompts: [{ name: "old-prompt", content: "Prompt content" }],
skillDirs: [
{
name: "skill-one",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
generatedSkills: [{ name: "old-command", content: "Old command" }],
agents: [{ name: "old-agent", description: "Old agent", instructions: "Old agent body" }],
}
await writeCodexBundle(codexRoot, bundle)
const managedSkillsRoot = path.join(codexRoot, "skills", "compound-engineering")
const managedAgentsRoot = path.join(codexRoot, "agents", "compound-engineering")
expect(await exists(path.join(managedSkillsRoot, "skill-one", "SKILL.md"))).toBe(true)
expect(await exists(path.join(managedSkillsRoot, "old-command", "SKILL.md"))).toBe(true)
expect(await exists(path.join(managedAgentsRoot, "old-agent.toml"))).toBe(true)
expect(await exists(path.join(tempRoot, ".agents", "skills", "skill-one"))).toBe(false)
expect(await exists(path.join(tempRoot, ".agents", "skills", "old-agent"))).toBe(false)
expect(await exists(path.join(codexRoot, "compound-engineering", "install-manifest.json"))).toBe(true)
await writeCodexBundle(codexRoot, {
pluginName: "compound-engineering",
prompts: [{ name: "new-prompt", content: "Prompt content" }],
skillDirs: [],
generatedSkills: [{ name: "new-command", content: "New command" }],
agents: [{ name: "new-agent", description: "New agent", instructions: "New agent body" }],
})
expect(await exists(path.join(managedSkillsRoot, "skill-one", "SKILL.md"))).toBe(false)
expect(await exists(path.join(managedSkillsRoot, "old-command", "SKILL.md"))).toBe(false)
expect(await exists(path.join(managedSkillsRoot, "new-command", "SKILL.md"))).toBe(true)
expect(await exists(path.join(managedAgentsRoot, "old-agent.toml"))).toBe(false)
expect(await exists(path.join(managedAgentsRoot, "new-agent.toml"))).toBe(true)
expect(await exists(path.join(tempRoot, ".agents", "skills", "new-agent"))).toBe(false)
expect(await exists(path.join(codexRoot, "prompts", "old-prompt.md"))).toBe(false)
expect(await exists(path.join(codexRoot, "prompts", "new-prompt.md"))).toBe(true)
})
test("removes legacy .agents symlinks that point to managed Codex skills", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "codex-flat-symlink-"))
const codexRoot = path.join(tempRoot, ".codex")
const previousManagedSkillsRoot = path.join(codexRoot, "compound-engineering", "skills")
const agentsSkillsDir = path.join(tempRoot, ".agents", "skills")
await fs.mkdir(path.join(previousManagedSkillsRoot, "old-agent"), { recursive: true })
await fs.mkdir(path.join(previousManagedSkillsRoot, "reproduce-bug"), { recursive: true })
await fs.writeFile(
path.join(codexRoot, "compound-engineering", "install-manifest.json"),
JSON.stringify({ version: 1, pluginName: "compound-engineering", skills: ["old-agent"], prompts: [] }),
)
await fs.mkdir(agentsSkillsDir, { recursive: true })
await fs.symlink(previousManagedSkillsRoot, path.join(agentsSkillsDir, "compound-engineering"))
await fs.symlink(
path.join(previousManagedSkillsRoot, "old-agent"),
path.join(agentsSkillsDir, "old-agent"),
)
await fs.symlink(
path.join(previousManagedSkillsRoot, "reproduce-bug"),
path.join(agentsSkillsDir, "reproduce-bug"),
)
const unrelatedRoot = path.join(tempRoot, "other-skills", "skill-one")
await fs.mkdir(unrelatedRoot, { recursive: true })
await fs.symlink(unrelatedRoot, path.join(agentsSkillsDir, "skill-one"))
await writeCodexBundle(codexRoot, {
pluginName: "compound-engineering",
prompts: [],
skillDirs: [
{
name: "skill-one",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
generatedSkills: [],
})
expect(await entryExists(path.join(agentsSkillsDir, "compound-engineering"))).toBe(false)
expect(await entryExists(path.join(agentsSkillsDir, "old-agent"))).toBe(false)
expect(await entryExists(path.join(agentsSkillsDir, "reproduce-bug"))).toBe(false)
expect(await fs.realpath(path.join(agentsSkillsDir, "skill-one"))).toBe(await fs.realpath(unrelatedRoot))
expect(await exists(previousManagedSkillsRoot)).toBe(false)
})
test("moves legacy flat Codex CE artifacts to a namespaced backup", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "codex-legacy-skill-"))
const codexRoot = path.join(tempRoot, ".codex")
await fs.mkdir(path.join(codexRoot, "skills", "ce-plan"), { recursive: true })
await fs.writeFile(path.join(codexRoot, "skills", "ce-plan", "SKILL.md"), "legacy current workflow skill")
await fs.mkdir(path.join(codexRoot, "skills", "ce:plan"), { recursive: true })
await fs.writeFile(path.join(codexRoot, "skills", "ce:plan", "SKILL.md"), "legacy raw colon workflow skill")
await fs.mkdir(path.join(codexRoot, "skills", "ce:plan-beta"), { recursive: true })
await fs.writeFile(path.join(codexRoot, "skills", "ce:plan-beta", "SKILL.md"), "legacy raw colon beta workflow skill")
await fs.mkdir(path.join(codexRoot, "skills", "repo-research-analyst"), { recursive: true })
await fs.writeFile(path.join(codexRoot, "skills", "repo-research-analyst", "SKILL.md"), "legacy current agent skill")
await fs.mkdir(path.join(codexRoot, "skills", "reproduce-bug"), { recursive: true })
await fs.writeFile(path.join(codexRoot, "skills", "reproduce-bug", "SKILL.md"), "legacy removed skill")
await fs.mkdir(path.join(codexRoot, "skills", "bug-reproduction-validator"), { recursive: true })
await fs.writeFile(path.join(codexRoot, "skills", "bug-reproduction-validator", "SKILL.md"), "legacy removed agent skill")
await fs.mkdir(path.join(codexRoot, "prompts"), { recursive: true })
await fs.writeFile(path.join(codexRoot, "prompts", "reproduce-bug.md"), "legacy removed prompt")
await fs.writeFile(path.join(codexRoot, "prompts", "report-bug.md"), "legacy deleted command prompt")
const plugin = await loadClaudePlugin(path.join(import.meta.dir, "..", "plugins", "compound-engineering"))
const bundle = convertClaudeToCodex(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
await writeCodexBundle(codexRoot, bundle)
expect(await exists(path.join(codexRoot, "skills", "ce-plan"))).toBe(false)
expect(await exists(path.join(codexRoot, "skills", "ce:plan"))).toBe(false)
expect(await exists(path.join(codexRoot, "skills", "ce:plan-beta"))).toBe(false)
expect(await exists(path.join(codexRoot, "skills", "repo-research-analyst"))).toBe(false)
expect(await exists(path.join(codexRoot, "skills", "reproduce-bug"))).toBe(false)
expect(await exists(path.join(codexRoot, "skills", "bug-reproduction-validator"))).toBe(false)
expect(await exists(path.join(codexRoot, "prompts", "reproduce-bug.md"))).toBe(false)
expect(await exists(path.join(codexRoot, "prompts", "report-bug.md"))).toBe(false)
expect(await exists(path.join(codexRoot, "compound-engineering", "legacy-backup"))).toBe(true)
})
test("preserves unrelated user skills at flat ~/.codex/skills/<name>/ that share a name with a current CE skill", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "codex-user-skill-collide-"))
const codexRoot = path.join(tempRoot, ".codex")
// ce-demo-reel is the name of a current CE skill, but it has never been
// shipped as a flat ~/.codex/skills/ce-demo-reel/ install (the historical
// flat name was "demo-reel"). A user could plausibly have authored their
// own ce-demo-reel skill at the flat path. The first install of CE must
// not move it to backup.
const userSkillDir = path.join(codexRoot, "skills", "ce-demo-reel")
await fs.mkdir(userSkillDir, { recursive: true })
const userSkillContent = "# user-authored skill, not from CE"
await fs.writeFile(path.join(userSkillDir, "SKILL.md"), userSkillContent)
// Same for ce-debug — current CE skill name, never in the historical
// flat-path allow-list, so a same-named user skill must be preserved.
const userDebugDir = path.join(codexRoot, "skills", "ce-debug")
await fs.mkdir(userDebugDir, { recursive: true })
await fs.writeFile(path.join(userDebugDir, "SKILL.md"), "# user debug skill")
const plugin = await loadClaudePlugin(path.join(import.meta.dir, "..", "plugins", "compound-engineering"))
const bundle = convertClaudeToCodex(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
await writeCodexBundle(codexRoot, bundle)
// The user skills survive the install — same path, same content.
expect(await exists(path.join(userSkillDir, "SKILL.md"))).toBe(true)
expect(await fs.readFile(path.join(userSkillDir, "SKILL.md"), "utf8")).toBe(userSkillContent)
expect(await exists(path.join(userDebugDir, "SKILL.md"))).toBe(true)
// And they are not silently relocated to the legacy backup.
const backupRoot = path.join(codexRoot, "compound-engineering", "legacy-backup")
if (await exists(backupRoot)) {
const timestamps = await fs.readdir(backupRoot)
for (const ts of timestamps) {
const skillsBackup = path.join(backupRoot, ts, "skills")
if (!(await exists(skillsBackup))) continue
const backed = await fs.readdir(skillsBackup)
expect(backed).not.toContain("ce-demo-reel")
expect(backed).not.toContain("ce-debug")
}
}
})
test("preserves existing user config when writing MCP servers", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "codex-backup-"))
const codexRoot = path.join(tempRoot, ".codex")

View File

@@ -15,6 +15,24 @@ const compoundEngineeringRoot = path.join(
)
describe("convertClaudeToOpenCode", () => {
test("current compound-engineering output is skills and subagents, not commands", async () => {
const plugin = await loadClaudePlugin(compoundEngineeringRoot)
const bundle = convertClaudeToOpenCode(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
expect(bundle.agents.length).toBeGreaterThan(0)
expect(bundle.skillDirs.length).toBeGreaterThan(0)
expect(bundle.commandFiles).toHaveLength(0)
expect(bundle.plugins).toHaveLength(0)
expect(bundle.config.tools).toBeUndefined()
const parsedAgents = bundle.agents.map((agent) => parseFrontmatter(agent.content))
expect(parsedAgents.every((agent) => agent.data.mode === "subagent")).toBe(true)
})
test("from-command mode: map allowedTools to global permission block", async () => {
const plugin = await loadClaudePlugin(fixtureRoot)
const bundle = convertClaudeToOpenCode(plugin, {
@@ -24,6 +42,7 @@ describe("convertClaudeToOpenCode", () => {
})
expect(bundle.config.command).toBeUndefined()
expect(bundle.config.tools).toBeUndefined()
expect(bundle.commandFiles.find((f) => f.name === "workflows:review")).toBeDefined()
expect(bundle.commandFiles.find((f) => f.name === "plan_review")).toBeDefined()
@@ -275,6 +294,7 @@ describe("convertClaudeToOpenCode", () => {
inferTemperature: false,
permissions: "broad",
})
expect(broadBundle.config.tools).toBeUndefined()
expect(broadBundle.config.permission).toEqual({
read: "allow",
write: "allow",

View File

@@ -1,395 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import path from "path"
import os from "os"
import { writeCopilotBundle } from "../src/targets/copilot"
import type { CopilotBundle } from "../src/types/copilot"
async function exists(filePath: string): Promise<boolean> {
try {
await fs.access(filePath)
return true
} catch {
return false
}
}
describe("writeCopilotBundle", () => {
test("writes agents, generated skills, copied skills, and MCP config", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-test-"))
const bundle: CopilotBundle = {
agents: [
{
name: "security-reviewer",
content: "---\ndescription: Security\nuser-invocable: true\n---\n\nReview code.",
},
],
generatedSkills: [
{
name: "plan",
content: "---\nname: plan\ndescription: Planning\n---\n\nPlan the work.",
},
],
skillDirs: [
{
name: "skill-one",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
mcpConfig: {
playwright: {
type: "local",
command: "npx",
args: ["-y", "@anthropic/mcp-playwright"],
tools: ["*"],
},
},
}
await writeCopilotBundle(tempRoot, bundle)
expect(await exists(path.join(tempRoot, ".github", "agents", "security-reviewer.agent.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".github", "skills", "plan", "SKILL.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".github", "skills", "skill-one", "SKILL.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".github", "copilot-mcp-config.json"))).toBe(true)
const agentContent = await fs.readFile(
path.join(tempRoot, ".github", "agents", "security-reviewer.agent.md"),
"utf8",
)
expect(agentContent).toContain("Review code.")
const skillContent = await fs.readFile(
path.join(tempRoot, ".github", "skills", "plan", "SKILL.md"),
"utf8",
)
expect(skillContent).toContain("Plan the work.")
const mcpContent = JSON.parse(
await fs.readFile(path.join(tempRoot, ".github", "copilot-mcp-config.json"), "utf8"),
)
expect(mcpContent.mcpServers.playwright.command).toBe("npx")
})
test("agents use .agent.md file extension", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-ext-"))
const bundle: CopilotBundle = {
agents: [{ name: "test-agent", content: "Agent content" }],
generatedSkills: [],
skillDirs: [],
}
await writeCopilotBundle(tempRoot, bundle)
expect(await exists(path.join(tempRoot, ".github", "agents", "test-agent.agent.md"))).toBe(true)
// Should NOT create a plain .md file
expect(await exists(path.join(tempRoot, ".github", "agents", "test-agent.md"))).toBe(false)
})
test("writes directly into .github output root without double-nesting", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-home-"))
const githubRoot = path.join(tempRoot, ".github")
const bundle: CopilotBundle = {
agents: [{ name: "reviewer", content: "Reviewer agent content" }],
generatedSkills: [{ name: "plan", content: "Plan content" }],
skillDirs: [],
}
await writeCopilotBundle(githubRoot, bundle)
expect(await exists(path.join(githubRoot, "agents", "reviewer.agent.md"))).toBe(true)
expect(await exists(path.join(githubRoot, "skills", "plan", "SKILL.md"))).toBe(true)
// Should NOT double-nest under .github/.github
expect(await exists(path.join(githubRoot, ".github"))).toBe(false)
})
test("handles empty bundles gracefully", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-empty-"))
const bundle: CopilotBundle = {
agents: [],
generatedSkills: [],
skillDirs: [],
}
await writeCopilotBundle(tempRoot, bundle)
expect(await exists(tempRoot)).toBe(true)
})
test("writes multiple agents as separate .agent.md files", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-multi-"))
const githubRoot = path.join(tempRoot, ".github")
const bundle: CopilotBundle = {
agents: [
{ name: "security-sentinel", content: "Security rules" },
{ name: "performance-oracle", content: "Performance rules" },
{ name: "code-simplicity-reviewer", content: "Simplicity rules" },
],
generatedSkills: [],
skillDirs: [],
}
await writeCopilotBundle(githubRoot, bundle)
expect(await exists(path.join(githubRoot, "agents", "security-sentinel.agent.md"))).toBe(true)
expect(await exists(path.join(githubRoot, "agents", "performance-oracle.agent.md"))).toBe(true)
expect(await exists(path.join(githubRoot, "agents", "code-simplicity-reviewer.agent.md"))).toBe(true)
})
test("backs up existing copilot-mcp-config.json before overwriting", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-backup-"))
const githubRoot = path.join(tempRoot, ".github")
await fs.mkdir(githubRoot, { recursive: true })
// Write an existing config
const mcpPath = path.join(githubRoot, "copilot-mcp-config.json")
await fs.writeFile(mcpPath, JSON.stringify({ mcpServers: { old: { type: "local", command: "old-cmd", tools: ["*"] } } }))
const bundle: CopilotBundle = {
agents: [],
generatedSkills: [],
skillDirs: [],
mcpConfig: {
newServer: { type: "local", command: "new-cmd", tools: ["*"] },
},
}
await writeCopilotBundle(githubRoot, bundle)
// New config should have the new content
const newContent = JSON.parse(await fs.readFile(mcpPath, "utf8"))
expect(newContent.mcpServers.newServer.command).toBe("new-cmd")
// A backup file should exist
const files = await fs.readdir(githubRoot)
const backupFiles = files.filter((f) => f.startsWith("copilot-mcp-config.json.bak."))
expect(backupFiles.length).toBeGreaterThanOrEqual(1)
})
test("transforms Task calls in copied SKILL.md files", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-skill-transform-"))
const sourceSkillDir = path.join(tempRoot, "source-skill")
await fs.mkdir(sourceSkillDir, { recursive: true })
await fs.writeFile(
path.join(sourceSkillDir, "SKILL.md"),
`---
name: ce-plan
description: Planning workflow
---
Run these research agents:
- Task compound-engineering:research:repo-research-analyst(feature_description)
- Task compound-engineering:research:learnings-researcher(feature_description)
- Task compound-engineering:review:code-simplicity-reviewer()
`,
)
const bundle: CopilotBundle = {
agents: [],
generatedSkills: [],
skillDirs: [{ name: "ce-plan", sourceDir: sourceSkillDir }],
}
await writeCopilotBundle(tempRoot, bundle)
const installedSkill = await fs.readFile(
path.join(tempRoot, ".github", "skills", "ce-plan", "SKILL.md"),
"utf8",
)
expect(installedSkill).toContain("Use the repo-research-analyst skill to: feature_description")
expect(installedSkill).toContain("Use the learnings-researcher skill to: feature_description")
expect(installedSkill).toContain("Use the code-simplicity-reviewer skill")
expect(installedSkill).not.toContain("Task compound-engineering:")
})
test("removes stale plugin MCP servers on re-install", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-converge-"))
const githubRoot = path.join(tempRoot, ".github")
const bundle1: CopilotBundle = {
agents: [],
generatedSkills: [],
skillDirs: [],
mcpConfig: { old: { type: "local", command: "old-server", tools: ["*"] } },
}
const bundle2: CopilotBundle = {
agents: [],
generatedSkills: [],
skillDirs: [],
mcpConfig: { fresh: { type: "local", command: "new-server", tools: ["*"] } },
}
await writeCopilotBundle(tempRoot, bundle1)
await writeCopilotBundle(tempRoot, bundle2)
const result = JSON.parse(await fs.readFile(path.join(githubRoot, "copilot-mcp-config.json"), "utf8"))
expect(result.mcpServers.fresh).toBeDefined()
expect(result.mcpServers.old).toBeUndefined()
})
test("cleans up all plugin MCP servers when bundle has none", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-zero-"))
const githubRoot = path.join(tempRoot, ".github")
const bundle1: CopilotBundle = {
agents: [],
generatedSkills: [],
skillDirs: [],
mcpConfig: { old: { type: "local", command: "old-server", tools: ["*"] } },
}
const bundle2: CopilotBundle = {
agents: [],
generatedSkills: [],
skillDirs: [],
// No mcpConfig
}
await writeCopilotBundle(tempRoot, bundle1)
await writeCopilotBundle(tempRoot, bundle2)
const result = JSON.parse(await fs.readFile(path.join(githubRoot, "copilot-mcp-config.json"), "utf8"))
expect(result.mcpServers.old).toBeUndefined()
expect(result._compound_managed_mcp).toEqual([])
})
test("does not prune untracked user config when plugin has zero MCP servers", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-untracked-"))
const githubRoot = path.join(tempRoot, ".github")
await fs.mkdir(githubRoot, { recursive: true })
// Pre-existing user config with no tracking key (never had the plugin before)
await fs.writeFile(
path.join(githubRoot, "copilot-mcp-config.json"),
JSON.stringify({
mcpServers: { "user-tool": { type: "local", command: "my-tool", tools: ["*"] } },
}),
)
// Plugin installs with zero MCP servers
await writeCopilotBundle(githubRoot, {
agents: [],
generatedSkills: [],
skillDirs: [],
})
const result = JSON.parse(await fs.readFile(path.join(githubRoot, "copilot-mcp-config.json"), "utf8"))
expect(result.mcpServers["user-tool"]).toBeDefined()
expect(result._compound_managed_mcp).toEqual([])
})
test("preserves user servers across zero-MCP-then-MCP round trip", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-roundtrip-"))
const githubRoot = path.join(tempRoot, ".github")
const mcpPath = path.join(githubRoot, "copilot-mcp-config.json")
// 1. Install with plugin MCP
await writeCopilotBundle(tempRoot, {
agents: [], generatedSkills: [], skillDirs: [],
mcpConfig: { plugin: { type: "local", command: "plugin-server", tools: ["*"] } },
})
// 2. User adds their own server
const afterInstall = JSON.parse(await fs.readFile(mcpPath, "utf8"))
afterInstall.mcpServers["user-tool"] = { type: "local", command: "my-tool", tools: ["*"] }
await fs.writeFile(mcpPath, JSON.stringify(afterInstall))
// 3. Install with zero plugin MCP
await writeCopilotBundle(tempRoot, {
agents: [], generatedSkills: [], skillDirs: [],
})
// 4. Install with plugin MCP again
await writeCopilotBundle(tempRoot, {
agents: [], generatedSkills: [], skillDirs: [],
mcpConfig: { new_plugin: { type: "local", command: "new-plugin", tools: ["*"] } },
})
const result = JSON.parse(await fs.readFile(mcpPath, "utf8"))
expect(result.mcpServers["user-tool"]).toBeDefined()
expect(result.mcpServers.new_plugin).toBeDefined()
expect(result.mcpServers.plugin).toBeUndefined()
})
test("preserves user-added MCP servers across re-installs", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-user-mcp-"))
const githubRoot = path.join(tempRoot, ".github")
await fs.mkdir(githubRoot, { recursive: true })
// User has their own MCP server alongside plugin-managed ones (tracking key present)
await fs.writeFile(
path.join(githubRoot, "copilot-mcp-config.json"),
JSON.stringify({
mcpServers: { "user-tool": { type: "local", command: "my-tool", tools: ["*"] } },
_compound_managed_mcp: [],
}),
)
const bundle: CopilotBundle = {
agents: [],
generatedSkills: [],
skillDirs: [],
mcpConfig: { plugin: { type: "local", command: "plugin-server", tools: ["*"] } },
}
await writeCopilotBundle(githubRoot, bundle)
const result = JSON.parse(await fs.readFile(path.join(githubRoot, "copilot-mcp-config.json"), "utf8"))
expect(result.mcpServers["user-tool"]).toBeDefined()
expect(result.mcpServers.plugin).toBeDefined()
})
test("prunes stale servers from legacy config without tracking key", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-legacy-"))
const githubRoot = path.join(tempRoot, ".github")
await fs.mkdir(githubRoot, { recursive: true })
// Simulate old writer output: has mcpServers but no _compound_managed_mcp
await fs.writeFile(
path.join(githubRoot, "copilot-mcp-config.json"),
JSON.stringify({
mcpServers: {
old: { type: "local", command: "old-server", tools: ["*"] },
renamed: { type: "local", command: "renamed-server", tools: ["*"] },
},
}),
)
const bundle: CopilotBundle = {
agents: [],
generatedSkills: [],
skillDirs: [],
mcpConfig: { fresh: { type: "local", command: "new-server", tools: ["*"] } },
}
await writeCopilotBundle(githubRoot, bundle)
const result = JSON.parse(await fs.readFile(path.join(githubRoot, "copilot-mcp-config.json"), "utf8"))
expect(result.mcpServers.fresh).toBeDefined()
expect(result.mcpServers.old).toBeUndefined()
expect(result.mcpServers.renamed).toBeUndefined()
expect(result._compound_managed_mcp).toEqual(["fresh"])
})
test("creates skill directories with SKILL.md", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "copilot-genskill-"))
const bundle: CopilotBundle = {
agents: [],
generatedSkills: [
{
name: "deploy",
content: "---\nname: deploy\ndescription: Deploy skill\n---\n\nDeploy steps.",
},
],
skillDirs: [],
}
await writeCopilotBundle(tempRoot, bundle)
const skillPath = path.join(tempRoot, ".github", "skills", "deploy", "SKILL.md")
expect(await exists(skillPath)).toBe(true)
const content = await fs.readFile(skillPath, "utf8")
expect(content).toContain("Deploy steps.")
})
})

View File

@@ -1,4 +1,4 @@
import { describe, expect, test } from "bun:test"
import { afterEach, describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import path from "path"
import os from "os"
@@ -11,7 +11,6 @@ describe("detectInstalledTools", () => {
// Create directories for some tools
await fs.mkdir(path.join(tempHome, ".codex"), { recursive: true })
await fs.mkdir(path.join(tempHome, ".codeium", "windsurf"), { recursive: true })
await fs.mkdir(path.join(tempHome, ".gemini"), { recursive: true })
await fs.mkdir(path.join(tempHome, ".copilot"), { recursive: true })
@@ -21,10 +20,6 @@ describe("detectInstalledTools", () => {
expect(codex?.detected).toBe(true)
expect(codex?.reason).toContain(".codex")
const windsurf = results.find((t) => t.name === "windsurf")
expect(windsurf?.detected).toBe(true)
expect(windsurf?.reason).toContain(".codeium/windsurf")
const gemini = results.find((t) => t.name === "gemini")
expect(gemini?.detected).toBe(true)
expect(gemini?.reason).toContain(".gemini")
@@ -50,7 +45,7 @@ describe("detectInstalledTools", () => {
const results = await detectInstalledTools(tempHome, tempCwd)
expect(results.length).toBe(10)
expect(results.length).toBe(8)
for (const tool of results) {
expect(tool.detected).toBe(false)
expect(tool.reason).toBe("not found")
@@ -64,14 +59,49 @@ describe("detectInstalledTools", () => {
await fs.mkdir(path.join(tempHome, ".config", "opencode"), { recursive: true })
await fs.mkdir(path.join(tempHome, ".factory"), { recursive: true })
await fs.mkdir(path.join(tempHome, ".pi"), { recursive: true })
await fs.mkdir(path.join(tempHome, ".openclaw"), { recursive: true })
const results = await detectInstalledTools(tempHome, tempCwd)
expect(results.find((t) => t.name === "opencode")?.detected).toBe(true)
expect(results.find((t) => t.name === "droid")?.detected).toBe(true)
expect(results.find((t) => t.name === "pi")?.detected).toBe(true)
expect(results.find((t) => t.name === "openclaw")?.detected).toBe(true)
})
describe("opencode OPENCODE_CONFIG_DIR", () => {
const originalEnv = process.env.OPENCODE_CONFIG_DIR
afterEach(() => {
if (originalEnv === undefined) {
delete process.env.OPENCODE_CONFIG_DIR
} else {
process.env.OPENCODE_CONFIG_DIR = originalEnv
}
})
test("detects opencode at OPENCODE_CONFIG_DIR when set, even if ~/.config/opencode is absent", async () => {
const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "detect-opencode-env-home-"))
const tempCwd = await fs.mkdtemp(path.join(os.tmpdir(), "detect-opencode-env-cwd-"))
const customRoot = await fs.mkdtemp(path.join(os.tmpdir(), "detect-opencode-env-root-"))
// Ensure no ~/.config/opencode exists under the sandbox home.
process.env.OPENCODE_CONFIG_DIR = customRoot
const results = await detectInstalledTools(tempHome, tempCwd)
const opencode = results.find((t) => t.name === "opencode")
expect(opencode?.detected).toBe(true)
expect(opencode?.reason).toContain(customRoot)
})
test("opencode is not detected when OPENCODE_CONFIG_DIR points at a missing directory", async () => {
const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "detect-opencode-missing-home-"))
const tempCwd = await fs.mkdtemp(path.join(os.tmpdir(), "detect-opencode-missing-cwd-"))
const missingRoot = path.join(os.tmpdir(), `detect-opencode-missing-${Date.now()}-${Math.random()}`)
process.env.OPENCODE_CONFIG_DIR = missingRoot
const results = await detectInstalledTools(tempHome, tempCwd)
expect(results.find((t) => t.name === "opencode")?.detected).toBe(false)
})
})
test("detects copilot from project-specific skills without generic .github false positives", async () => {

View File

@@ -1,138 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import path from "path"
import os from "os"
import { writeDroidBundle } from "../src/targets/droid"
import type { DroidBundle } from "../src/types/droid"
async function exists(filePath: string): Promise<boolean> {
try {
await fs.access(filePath)
return true
} catch {
return false
}
}
describe("writeDroidBundle", () => {
test("writes commands, droids, and skills", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "droid-test-"))
const bundle: DroidBundle = {
commands: [{ name: "plan", content: "Plan command content" }],
droids: [{ name: "security-reviewer", content: "Droid content" }],
skillDirs: [
{
name: "skill-one",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
}
await writeDroidBundle(tempRoot, bundle)
expect(await exists(path.join(tempRoot, ".factory", "commands", "plan.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".factory", "droids", "security-reviewer.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".factory", "skills", "skill-one", "SKILL.md"))).toBe(true)
const commandContent = await fs.readFile(
path.join(tempRoot, ".factory", "commands", "plan.md"),
"utf8",
)
expect(commandContent).toContain("Plan command content")
const droidContent = await fs.readFile(
path.join(tempRoot, ".factory", "droids", "security-reviewer.md"),
"utf8",
)
expect(droidContent).toContain("Droid content")
})
test("transforms Task calls in copied SKILL.md files", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "droid-skill-transform-"))
const sourceSkillDir = path.join(tempRoot, "source-skill")
await fs.mkdir(sourceSkillDir, { recursive: true })
await fs.writeFile(
path.join(sourceSkillDir, "SKILL.md"),
`---
name: ce-plan
description: Planning workflow
---
Run these research agents:
- Task compound-engineering:research:repo-research-analyst(feature_description)
- Task compound-engineering:research:learnings-researcher(feature_description)
- Task compound-engineering:review:code-simplicity-reviewer()
`,
)
const bundle: DroidBundle = {
commands: [],
droids: [],
skillDirs: [{ name: "ce-plan", sourceDir: sourceSkillDir }],
}
await writeDroidBundle(tempRoot, bundle)
const installedSkill = await fs.readFile(
path.join(tempRoot, ".factory", "skills", "ce-plan", "SKILL.md"),
"utf8",
)
expect(installedSkill).toContain("Task repo-research-analyst: feature_description")
expect(installedSkill).toContain("Task learnings-researcher: feature_description")
expect(installedSkill).toContain("Task code-simplicity-reviewer")
expect(installedSkill).not.toContain("Task compound-engineering:")
})
test("writes directly into a .factory output root", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "droid-home-"))
const factoryRoot = path.join(tempRoot, ".factory")
const bundle: DroidBundle = {
commands: [{ name: "plan", content: "Plan content" }],
droids: [{ name: "reviewer", content: "Reviewer content" }],
skillDirs: [],
}
await writeDroidBundle(factoryRoot, bundle)
expect(await exists(path.join(factoryRoot, "commands", "plan.md"))).toBe(true)
expect(await exists(path.join(factoryRoot, "droids", "reviewer.md"))).toBe(true)
// Should not double-nest under .factory/.factory
expect(await exists(path.join(factoryRoot, ".factory"))).toBe(false)
})
test("handles empty bundles gracefully", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "droid-empty-"))
const bundle: DroidBundle = {
commands: [],
droids: [],
skillDirs: [],
}
await writeDroidBundle(tempRoot, bundle)
// Root should exist but no subdirectories created
expect(await exists(tempRoot)).toBe(true)
})
test("writes multiple commands as separate files", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "droid-multi-"))
const factoryRoot = path.join(tempRoot, ".factory")
const bundle: DroidBundle = {
commands: [
{ name: "plan", content: "Plan content" },
{ name: "work", content: "Work content" },
{ name: "brainstorm", content: "Brainstorm content" },
],
droids: [],
skillDirs: [],
}
await writeDroidBundle(factoryRoot, bundle)
expect(await exists(path.join(factoryRoot, "commands", "plan.md"))).toBe(true)
expect(await exists(path.join(factoryRoot, "commands", "work.md"))).toBe(true)
expect(await exists(path.join(factoryRoot, "commands", "brainstorm.md"))).toBe(true)
})
})

View File

@@ -42,18 +42,19 @@ const fixturePlugin: ClaudePlugin = {
}
describe("convertClaudeToGemini", () => {
test("converts agents to skills with SKILL.md frontmatter", () => {
test("converts agents to Gemini subagent Markdown", () => {
const bundle = convertClaudeToGemini(fixturePlugin, {
agentMode: "subagent",
inferTemperature: false,
permissions: "none",
})
const skill = bundle.generatedSkills.find((s) => s.name === "security-reviewer")
expect(skill).toBeDefined()
const parsed = parseFrontmatter(skill!.content)
const agent = bundle.agents?.find((a) => a.name === "security-reviewer")
expect(agent).toBeDefined()
const parsed = parseFrontmatter(agent!.content)
expect(parsed.data.name).toBe("security-reviewer")
expect(parsed.data.description).toBe("Security-focused agent")
expect(parsed.data.kind).toBe("local")
expect(parsed.body).toContain("Focus on vulnerabilities.")
})
@@ -64,9 +65,9 @@ describe("convertClaudeToGemini", () => {
permissions: "none",
})
const skill = bundle.generatedSkills.find((s) => s.name === "security-reviewer")
expect(skill).toBeDefined()
const parsed = parseFrontmatter(skill!.content)
const agent = bundle.agents?.find((a) => a.name === "security-reviewer")
expect(agent).toBeDefined()
const parsed = parseFrontmatter(agent!.content)
expect(parsed.body).toContain("## Capabilities")
expect(parsed.body).toContain("- Threat modeling")
expect(parsed.body).toContain("- OWASP")
@@ -92,8 +93,8 @@ describe("convertClaudeToGemini", () => {
permissions: "none",
})
const parsed = parseFrontmatter(bundle.generatedSkills[0].content)
expect(parsed.data.description).toBe("Use this skill for my-agent tasks")
const parsed = parseFrontmatter(bundle.agents![0].content)
expect(parsed.data.description).toBe("Use this agent for my-agent tasks")
})
test("agent model field silently dropped", () => {
@@ -103,8 +104,8 @@ describe("convertClaudeToGemini", () => {
permissions: "none",
})
const skill = bundle.generatedSkills.find((s) => s.name === "security-reviewer")
const parsed = parseFrontmatter(skill!.content)
const agent = bundle.agents?.find((a) => a.name === "security-reviewer")
const parsed = parseFrontmatter(agent!.content)
expect(parsed.data.model).toBeUndefined()
})
@@ -129,7 +130,7 @@ describe("convertClaudeToGemini", () => {
permissions: "none",
})
const parsed = parseFrontmatter(bundle.generatedSkills[0].content)
const parsed = parseFrontmatter(bundle.agents![0].content)
expect(parsed.body).toContain("Instructions converted from the Empty Agent agent.")
})
@@ -232,7 +233,7 @@ describe("convertClaudeToGemini", () => {
expect(bundle.mcpServers?.local?.args).toEqual(["hello"])
})
test("plugin with zero agents produces empty generatedSkills", () => {
test("plugin with zero agents produces empty agents", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
agents: [],
@@ -246,7 +247,7 @@ describe("convertClaudeToGemini", () => {
permissions: "none",
})
expect(bundle.generatedSkills).toHaveLength(0)
expect(bundle.agents).toHaveLength(0)
})
test("plugin with only skills works correctly", () => {
@@ -262,12 +263,12 @@ describe("convertClaudeToGemini", () => {
permissions: "none",
})
expect(bundle.generatedSkills).toHaveLength(0)
expect(bundle.agents).toHaveLength(0)
expect(bundle.skillDirs).toHaveLength(1)
expect(bundle.commands).toHaveLength(0)
})
test("agent name colliding with skill name gets deduplicated", () => {
test("agent name can match a skill name because Gemini agents and skills are separate roots", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
skills: [{ name: "security-reviewer", description: "Existing skill", sourceDir: "/tmp/skill", skillPath: "/tmp/skill/SKILL.md" }],
@@ -281,8 +282,7 @@ describe("convertClaudeToGemini", () => {
permissions: "none",
})
// Agent should be deduplicated since skill already has "security-reviewer"
expect(bundle.generatedSkills[0].name).toBe("security-reviewer-2")
expect(bundle.agents![0].name).toBe("security-reviewer")
expect(bundle.skillDirs[0].name).toBe("security-reviewer")
})
@@ -323,7 +323,7 @@ describe("transformContentForGemini", () => {
expect(result).not.toContain("~/.claude/")
})
test("transforms Task agent(args) to natural language skill reference", () => {
test("transforms Task agent(args) to Gemini subagent reference", () => {
const input = `Run these:
- Task repo-research-analyst(feature_description)
@@ -332,9 +332,9 @@ describe("transformContentForGemini", () => {
Task best-practices-researcher(topic)`
const result = transformContentForGemini(input)
expect(result).toContain("Use the repo-research-analyst skill to: feature_description")
expect(result).toContain("Use the learnings-researcher skill to: feature_description")
expect(result).toContain("Use the best-practices-researcher skill to: topic")
expect(result).toContain("Use the @repo-research-analyst subagent to: feature_description")
expect(result).toContain("Use the @learnings-researcher subagent to: feature_description")
expect(result).toContain("Use the @best-practices-researcher subagent to: topic")
expect(result).not.toContain("Task repo-research-analyst")
})
@@ -345,8 +345,8 @@ Task best-practices-researcher(topic)`
- Task compound-engineering:review:security-reviewer(code_diff)`
const result = transformContentForGemini(input)
expect(result).toContain("Use the repo-research-analyst skill to: feature_description")
expect(result).toContain("Use the security-reviewer skill to: code_diff")
expect(result).toContain("Use the @repo-research-analyst subagent to: feature_description")
expect(result).toContain("Use the @security-reviewer subagent to: code_diff")
expect(result).not.toContain("compound-engineering:")
})
@@ -354,15 +354,14 @@ Task best-practices-researcher(topic)`
const input = `- Task compound-engineering:review:code-simplicity-reviewer()`
const result = transformContentForGemini(input)
expect(result).toContain("Use the code-simplicity-reviewer skill")
expect(result).toContain("Use the @code-simplicity-reviewer subagent")
expect(result).not.toContain("compound-engineering:")
expect(result).not.toContain("skill to:")
expect(result).not.toContain("subagent to:")
})
test("transforms @agent references to skill references", () => {
test("transforms @agent references to subagent references", () => {
const result = transformContentForGemini("Ask @security-sentinel for a review.")
expect(result).toContain("the security-sentinel skill")
expect(result).not.toContain("@security-sentinel")
expect(result).toContain("@security-sentinel subagent")
})
})

View File

@@ -4,6 +4,8 @@ import path from "path"
import os from "os"
import { writeGeminiBundle } from "../src/targets/gemini"
import type { GeminiBundle } from "../src/types/gemini"
import { loadClaudePlugin } from "../src/parsers/claude"
import { convertClaudeToGemini } from "../src/converters/claude-to-gemini"
async function exists(filePath: string): Promise<boolean> {
try {
@@ -41,10 +43,12 @@ describe("writeGeminiBundle", () => {
expect(rewritten).toContain("Fresh generated skill.")
})
test("writes skills, commands, and settings.json", async () => {
test("writes agents, skills, commands, and settings.json", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "gemini-test-"))
const bundle: GeminiBundle = {
generatedSkills: [
pluginName: "compound-engineering",
generatedSkills: [],
agents: [
{
name: "security-reviewer",
content: "---\nname: security-reviewer\ndescription: Security\n---\n\nReview code.",
@@ -69,16 +73,17 @@ describe("writeGeminiBundle", () => {
await writeGeminiBundle(tempRoot, bundle)
expect(await exists(path.join(tempRoot, ".gemini", "skills", "security-reviewer", "SKILL.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".gemini", "agents", "security-reviewer.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".gemini", "skills", "skill-one", "SKILL.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".gemini", "commands", "plan.toml"))).toBe(true)
expect(await exists(path.join(tempRoot, ".gemini", "settings.json"))).toBe(true)
expect(await exists(path.join(tempRoot, ".gemini", "compound-engineering", "install-manifest.json"))).toBe(true)
const skillContent = await fs.readFile(
path.join(tempRoot, ".gemini", "skills", "security-reviewer", "SKILL.md"),
const agentContent = await fs.readFile(
path.join(tempRoot, ".gemini", "agents", "security-reviewer.md"),
"utf8",
)
expect(skillContent).toContain("Review code.")
expect(agentContent).toContain("Review code.")
const commandContent = await fs.readFile(
path.join(tempRoot, ".gemini", "commands", "plan.toml"),
@@ -124,9 +129,9 @@ Run these research agents:
"utf8",
)
expect(installedSkill).toContain("Use the repo-research-analyst skill to: feature_description")
expect(installedSkill).toContain("Use the learnings-researcher skill to: feature_description")
expect(installedSkill).toContain("Use the code-simplicity-reviewer skill")
expect(installedSkill).toContain("Use the @repo-research-analyst subagent to: feature_description")
expect(installedSkill).toContain("Use the @learnings-researcher subagent to: feature_description")
expect(installedSkill).toContain("Use the @code-simplicity-reviewer subagent")
expect(installedSkill).not.toContain("Task compound-engineering:")
})
@@ -152,9 +157,8 @@ Run these research agents:
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "gemini-home-"))
const geminiRoot = path.join(tempRoot, ".gemini")
const bundle: GeminiBundle = {
generatedSkills: [
{ name: "reviewer", content: "Reviewer skill content" },
],
generatedSkills: [],
agents: [{ name: "reviewer", content: "Reviewer agent content" }],
skillDirs: [],
commands: [
{ name: "plan", content: "Plan content" },
@@ -163,7 +167,7 @@ Run these research agents:
await writeGeminiBundle(geminiRoot, bundle)
expect(await exists(path.join(geminiRoot, "skills", "reviewer", "SKILL.md"))).toBe(true)
expect(await exists(path.join(geminiRoot, "agents", "reviewer.md"))).toBe(true)
expect(await exists(path.join(geminiRoot, "commands", "plan.toml"))).toBe(true)
// Should NOT double-nest under .gemini/.gemini
expect(await exists(path.join(geminiRoot, ".gemini"))).toBe(false)
@@ -242,4 +246,119 @@ Run these research agents:
// Should add new MCP server
expect(content.mcpServers.newServer.command).toBe("new-cmd")
})
test("removes previously managed Gemini artifacts that disappear on reinstall", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "gemini-managed-cleanup-"))
await writeGeminiBundle(tempRoot, {
pluginName: "compound-engineering",
generatedSkills: [],
agents: [{ name: "old-agent", content: "---\nname: old-agent\n---\n\nBody" }],
skillDirs: [
{
name: "skill-one",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
commands: [{ name: "old/cmd", content: 'description = "Old"\nprompt = """\nold\n"""' }],
})
await writeGeminiBundle(tempRoot, {
pluginName: "compound-engineering",
generatedSkills: [],
agents: [{ name: "new-agent", content: "---\nname: new-agent\n---\n\nBody" }],
skillDirs: [],
commands: [{ name: "new/cmd", content: 'description = "New"\nprompt = """\nnew\n"""' }],
})
expect(await exists(path.join(tempRoot, ".gemini", "skills", "skill-one", "SKILL.md"))).toBe(false)
expect(await exists(path.join(tempRoot, ".gemini", "agents", "old-agent.md"))).toBe(false)
expect(await exists(path.join(tempRoot, ".gemini", "agents", "new-agent.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".gemini", "commands", "old", "cmd.toml"))).toBe(false)
expect(await exists(path.join(tempRoot, ".gemini", "commands", "new", "cmd.toml"))).toBe(true)
})
test("namespaces managed install manifests per plugin so installs do not collide", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "gemini-multi-plugin-"))
// Install plugin A first, with a skill and an agent
await writeGeminiBundle(tempRoot, {
pluginName: "compound-engineering",
generatedSkills: [],
agents: [{ name: "ce-agent", content: "---\nname: ce-agent\n---\n\nBody" }],
skillDirs: [
{
name: "ce-skill",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
commands: [],
})
// Install plugin B into the same Gemini root
await writeGeminiBundle(tempRoot, {
pluginName: "coding-tutor",
generatedSkills: [],
agents: [{ name: "tutor-agent", content: "---\nname: tutor-agent\n---\n\nBody" }],
skillDirs: [
{
name: "tutor-skill",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
commands: [],
})
// Both plugins must keep their own namespaced manifest
expect(await exists(path.join(tempRoot, ".gemini", "compound-engineering", "install-manifest.json"))).toBe(true)
expect(await exists(path.join(tempRoot, ".gemini", "coding-tutor", "install-manifest.json"))).toBe(true)
// Reinstall plugin A with no agents/skills — it must clean up only its own
// managed artifacts, leaving plugin B's intact (the bug the namespacing fix
// addresses: a shared manifest path would have lost B's manifest after A
// was installed, and a later A reinstall would skip B's stale-file cleanup).
await writeGeminiBundle(tempRoot, {
pluginName: "compound-engineering",
generatedSkills: [],
agents: [],
skillDirs: [],
commands: [],
})
expect(await exists(path.join(tempRoot, ".gemini", "agents", "ce-agent.md"))).toBe(false)
expect(await exists(path.join(tempRoot, ".gemini", "skills", "ce-skill"))).toBe(false)
expect(await exists(path.join(tempRoot, ".gemini", "agents", "tutor-agent.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".gemini", "skills", "tutor-skill"))).toBe(true)
expect(await exists(path.join(tempRoot, ".gemini", "coding-tutor", "install-manifest.json"))).toBe(true)
})
test("moves legacy Gemini CE artifacts to a namespaced backup", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "gemini-legacy-artifacts-"))
const geminiRoot = path.join(tempRoot, ".gemini")
await fs.mkdir(path.join(geminiRoot, "skills", "reproduce-bug"), { recursive: true })
await fs.writeFile(path.join(geminiRoot, "skills", "reproduce-bug", "SKILL.md"), "legacy removed skill")
await fs.mkdir(path.join(geminiRoot, "skills", "bug-reproduction-validator"), { recursive: true })
await fs.writeFile(path.join(geminiRoot, "skills", "bug-reproduction-validator", "SKILL.md"), "legacy removed agent skill")
await fs.mkdir(path.join(geminiRoot, "agents"), { recursive: true })
await fs.writeFile(path.join(geminiRoot, "agents", "bug-reproduction-validator.md"), "legacy removed agent")
await fs.mkdir(path.join(geminiRoot, "commands"), { recursive: true })
await fs.writeFile(path.join(geminiRoot, "commands", "reproduce-bug.toml"), "legacy removed command")
await fs.writeFile(path.join(geminiRoot, "commands", "report-bug.toml"), "legacy deleted command")
const plugin = await loadClaudePlugin(path.join(import.meta.dir, "..", "plugins", "compound-engineering"))
const bundle = convertClaudeToGemini(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
await writeGeminiBundle(geminiRoot, bundle)
expect(await exists(path.join(geminiRoot, "skills", "reproduce-bug"))).toBe(false)
expect(await exists(path.join(geminiRoot, "skills", "bug-reproduction-validator"))).toBe(false)
expect(await exists(path.join(geminiRoot, "agents", "bug-reproduction-validator.md"))).toBe(false)
expect(await exists(path.join(geminiRoot, "commands", "reproduce-bug.toml"))).toBe(false)
expect(await exists(path.join(geminiRoot, "commands", "report-bug.toml"))).toBe(false)
expect(await exists(path.join(geminiRoot, "compound-engineering", "legacy-backup"))).toBe(true)
})
})

View File

@@ -64,6 +64,34 @@ describe("writeKiroBundle", () => {
expect(await exists(path.join(kiroRoot, "agents", "prompts", "session-historian.md"))).toBe(false)
})
test("moves historical CE Kiro artifacts to backup during install", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "kiro-legacy-artifacts-"))
const kiroRoot = path.join(tempRoot, ".kiro")
const sourceSkillDir = path.join(tempRoot, "source-skill")
await fs.mkdir(sourceSkillDir, { recursive: true })
await fs.writeFile(
path.join(sourceSkillDir, "SKILL.md"),
"---\nname: ce-plan\ndescription: Plan\n---\n\nPlan.",
)
await fs.mkdir(path.join(kiroRoot, "skills", "reproduce-bug"), { recursive: true })
await fs.writeFile(path.join(kiroRoot, "skills", "reproduce-bug", "SKILL.md"), "legacy skill")
await fs.mkdir(path.join(kiroRoot, "agents", "prompts"), { recursive: true })
await fs.writeFile(path.join(kiroRoot, "agents", "repo-research-analyst.json"), "{}")
await fs.writeFile(path.join(kiroRoot, "agents", "prompts", "repo-research-analyst.md"), "legacy prompt")
await writeKiroBundle(kiroRoot, {
...emptyBundle,
pluginName: "compound-engineering",
skillDirs: [{ name: "ce-plan", sourceDir: sourceSkillDir }],
})
expect(await exists(path.join(kiroRoot, "skills", "reproduce-bug"))).toBe(false)
expect(await exists(path.join(kiroRoot, "agents", "repo-research-analyst.json"))).toBe(false)
expect(await exists(path.join(kiroRoot, "agents", "prompts", "repo-research-analyst.md"))).toBe(false)
expect(await exists(path.join(kiroRoot, "skills", "ce-plan", "SKILL.md"))).toBe(true)
expect(await exists(path.join(kiroRoot, "compound-engineering", "legacy-backup"))).toBe(true)
})
test("writes agents, skills, steering, and mcp.json", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "kiro-test-"))
const bundle: KiroBundle = {

View File

@@ -135,6 +135,38 @@ describe("cleanupStaleSkillDirs", () => {
expect(await exists(path.join(root, "ce-document-review"))).toBe(false)
})
test("removes raw colon workflow skill directories", async () => {
const root = await fs.mkdtemp(path.join(os.tmpdir(), "cleanup-colon-workflows-"))
await createDir(
path.join(root, "ce:plan"),
skillContent(
"ce:plan",
await pluginDescription("plugins/compound-engineering/skills/ce-plan/SKILL.md"),
),
)
await createDir(
path.join(root, "workflows:review"),
skillContent(
"workflows:review",
await pluginDescription("plugins/compound-engineering/skills/ce-code-review/SKILL.md"),
),
)
await createDir(
path.join(root, "ce:plan-beta"),
skillContent(
"ce:plan-beta",
"[BETA] Transform feature descriptions or requirements into structured implementation plans grounded in repo patterns and research. Use when the user says 'plan this', 'create a plan', 'write a tech plan', 'plan the implementation', 'how should we build', 'what's the approach for', 'break this down', or when a brainstorm/requirements document is ready for technical planning. Best when requirements are at least roughly defined; for exploratory or ambiguous requests, prefer ce:brainstorm first.",
),
)
const removed = await cleanupStaleSkillDirs(root)
expect(removed).toBe(3)
expect(await exists(path.join(root, "ce:plan"))).toBe(false)
expect(await exists(path.join(root, "workflows:review"))).toBe(false)
expect(await exists(path.join(root, "ce:plan-beta"))).toBe(false)
})
test("returns 0 when directory does not exist", async () => {
const removed = await cleanupStaleSkillDirs("/tmp/nonexistent-cleanup-dir-12345")
expect(removed).toBe(0)

View File

@@ -0,0 +1,423 @@
import { afterEach, beforeEach, describe, expect, test } from "bun:test"
import fs from "fs/promises"
import os from "os"
import path from "path"
import { isSafeManagedPath } from "../src/utils/files"
import {
readManagedInstallManifest,
writeManagedInstallManifest,
cleanupRemovedManagedDirectories,
cleanupRemovedManagedFiles,
} from "../src/targets/managed-artifacts"
import { readCodexInstallManifest } from "../src/targets/codex"
import {
cleanupRemovedPiExtensions,
cleanupRemovedPiPrompts,
cleanupRemovedPiSkills,
readPiInstallManifest,
} from "../src/targets/pi"
describe("isSafeManagedPath", () => {
const root = "/tmp/managed-root"
test("accepts simple relative names", () => {
expect(isSafeManagedPath(root, "skill-name")).toBe(true)
expect(isSafeManagedPath(root, "foo.md")).toBe(true)
expect(isSafeManagedPath(root, "foo/bar")).toBe(true)
expect(isSafeManagedPath(root, "foo/bar/baz.toml")).toBe(true)
})
test("rejects non-string values", () => {
expect(isSafeManagedPath(root, undefined as unknown)).toBe(false)
expect(isSafeManagedPath(root, null as unknown)).toBe(false)
expect(isSafeManagedPath(root, 42 as unknown)).toBe(false)
expect(isSafeManagedPath(root, {} as unknown)).toBe(false)
})
test("rejects empty strings", () => {
expect(isSafeManagedPath(root, "")).toBe(false)
})
test("rejects absolute POSIX paths", () => {
expect(isSafeManagedPath(root, "/etc/passwd")).toBe(false)
expect(isSafeManagedPath(root, "/tmp/anything")).toBe(false)
})
test("rejects path traversal segments", () => {
expect(isSafeManagedPath(root, "..")).toBe(false)
expect(isSafeManagedPath(root, "../escape")).toBe(false)
expect(isSafeManagedPath(root, "../../../etc/passwd")).toBe(false)
expect(isSafeManagedPath(root, "foo/../bar")).toBe(false)
expect(isSafeManagedPath(root, "foo/../../escape")).toBe(false)
})
test("rejects windows-style absolute paths", () => {
// path.isAbsolute recognizes drive letters on win32 only; on posix
// the backslash form is treated as a literal filename, but the
// traversal split catches mixed separators.
expect(isSafeManagedPath(root, "..\\escape")).toBe(false)
expect(isSafeManagedPath(root, "foo\\..\\..\\escape")).toBe(false)
})
test("rejects entries that resolve outside root", () => {
// Even without `..` segments, the final containment check catches
// anything that would resolve outside the root.
expect(isSafeManagedPath(root, "..")).toBe(false)
})
})
describe("readManagedInstallManifest filters unsafe entries", () => {
let tempRoot: string
beforeEach(async () => {
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "managed-manifest-"))
})
afterEach(async () => {
await fs.rm(tempRoot, { recursive: true, force: true })
})
test("drops traversal and absolute entries, keeps safe ones", async () => {
const managedDir = path.join(tempRoot, "managed")
await fs.mkdir(managedDir, { recursive: true })
const manifest = {
version: 1,
pluginName: "compound-engineering",
groups: {
skills: [
"safe-skill",
"../../../etc/passwd",
"/etc/passwd",
"foo/../bar",
"foo/../../escape",
"another-safe",
],
commands: ["ok.md"],
},
}
await fs.writeFile(path.join(managedDir, "install-manifest.json"), JSON.stringify(manifest))
const result = await readManagedInstallManifest(managedDir, "compound-engineering")
expect(result).not.toBeNull()
expect(result!.groups.skills).toEqual(["safe-skill", "another-safe"])
expect(result!.groups.commands).toEqual(["ok.md"])
})
test("returns null for wrong pluginName", async () => {
const managedDir = path.join(tempRoot, "managed")
await fs.mkdir(managedDir, { recursive: true })
const manifest = {
version: 1,
pluginName: "other-plugin",
groups: { skills: ["safe"] },
}
await fs.writeFile(path.join(managedDir, "install-manifest.json"), JSON.stringify(manifest))
const result = await readManagedInstallManifest(managedDir, "compound-engineering")
expect(result).toBeNull()
})
})
describe("cleanupRemovedManagedFiles does not escape root (defense in depth)", () => {
let tempRoot: string
beforeEach(async () => {
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "managed-cleanup-"))
})
afterEach(async () => {
await fs.rm(tempRoot, { recursive: true, force: true })
})
test("skips unsafe entries even when fed directly (bypass read-time filter)", async () => {
const rootDir = path.join(tempRoot, "root")
await fs.mkdir(rootDir, { recursive: true })
const outsideFile = path.join(tempRoot, "outside.txt")
await fs.writeFile(outsideFile, "keep me")
// Simulate a manifest object assembled without going through
// readManagedInstallManifest's filter.
const hostileManifest = {
version: 1 as const,
pluginName: "compound-engineering",
groups: {
prompts: ["../outside.txt", "/etc/passwd"],
},
}
await cleanupRemovedManagedFiles(rootDir, hostileManifest, "prompts", [])
expect(await fs.readFile(outsideFile, "utf8")).toBe("keep me")
})
test("skips unsafe directory entries", async () => {
const rootDir = path.join(tempRoot, "root")
await fs.mkdir(rootDir, { recursive: true })
const outsideDir = path.join(tempRoot, "outside")
await fs.mkdir(outsideDir)
await fs.writeFile(path.join(outsideDir, "file.txt"), "keep me")
const hostileManifest = {
version: 1 as const,
pluginName: "compound-engineering",
groups: {
skills: ["../outside"],
},
}
await cleanupRemovedManagedDirectories(rootDir, hostileManifest, "skills", [])
expect(await fs.readFile(path.join(outsideDir, "file.txt"), "utf8")).toBe("keep me")
})
test("still cleans up safe entries correctly", async () => {
const rootDir = path.join(tempRoot, "root")
await fs.mkdir(rootDir, { recursive: true })
const safeFile = path.join(rootDir, "safe-prompt.md")
await fs.writeFile(safeFile, "remove me")
await writeManagedInstallManifest(rootDir, {
version: 1,
pluginName: "compound-engineering",
groups: { prompts: ["safe-prompt.md"] },
})
const manifest = await readManagedInstallManifest(rootDir, "compound-engineering")
expect(manifest).not.toBeNull()
// Simulate a follow-up install where "safe-prompt.md" is no longer
// in the current bundle — cleanup should remove it.
await cleanupRemovedManagedFiles(rootDir, manifest, "prompts", [])
let exists = true
try {
await fs.stat(safeFile)
} catch {
exists = false
}
expect(exists).toBe(false)
})
})
describe("readCodexInstallManifest filters unsafe entries", () => {
let tempRoot: string
beforeEach(async () => {
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "codex-manifest-"))
})
afterEach(async () => {
await fs.rm(tempRoot, { recursive: true, force: true })
})
test("drops traversal/absolute entries from skills, prompts, agents", async () => {
const codexRoot = path.join(tempRoot, ".codex")
const pluginDir = path.join(codexRoot, "compound-engineering")
await fs.mkdir(pluginDir, { recursive: true })
const manifest = {
version: 1,
pluginName: "compound-engineering",
skills: ["safe-skill", "../../../etc/passwd", "/etc/passwd"],
prompts: ["ok.md", "../../evil.md", "foo/../../escape.md"],
agents: ["safe-agent.toml", "/tmp/abs.toml", "../escape.toml"],
}
await fs.writeFile(path.join(pluginDir, "install-manifest.json"), JSON.stringify(manifest))
const result = await readCodexInstallManifest(codexRoot, "compound-engineering")
expect(result).not.toBeNull()
expect(result!.skills).toEqual(["safe-skill"])
expect(result!.prompts).toEqual(["ok.md"])
expect(result!.agents).toEqual(["safe-agent.toml"])
})
test("keeps all entries when all are safe", async () => {
const codexRoot = path.join(tempRoot, ".codex")
const pluginDir = path.join(codexRoot, "compound-engineering")
await fs.mkdir(pluginDir, { recursive: true })
const manifest = {
version: 1,
pluginName: "compound-engineering",
skills: ["a", "b", "c"],
prompts: ["p.md"],
agents: ["agent.toml"],
}
await fs.writeFile(path.join(pluginDir, "install-manifest.json"), JSON.stringify(manifest))
const result = await readCodexInstallManifest(codexRoot, "compound-engineering")
expect(result).not.toBeNull()
expect(result!.skills).toEqual(["a", "b", "c"])
expect(result!.prompts).toEqual(["p.md"])
expect(result!.agents).toEqual(["agent.toml"])
})
})
describe("readPiInstallManifest filters unsafe entries", () => {
let tempRoot: string
beforeEach(async () => {
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "pi-manifest-"))
})
afterEach(async () => {
await fs.rm(tempRoot, { recursive: true, force: true })
})
test("drops traversal/absolute entries from skills, prompts, extensions", async () => {
const piRoot = path.join(tempRoot, ".pi")
const managedDir = path.join(piRoot, "compound-engineering")
await fs.mkdir(managedDir, { recursive: true })
const paths = {
managedDir,
skillsDir: path.join(piRoot, "skills"),
promptsDir: path.join(piRoot, "prompts"),
extensionsDir: path.join(piRoot, "extensions"),
mcporterConfigPath: path.join(managedDir, "mcporter.json"),
agentsPath: path.join(piRoot, "AGENTS.md"),
}
const manifest = {
version: 1,
pluginName: "compound-engineering",
skills: ["safe-skill", "../../../etc/passwd", "/etc/passwd", "foo/../../escape"],
prompts: ["ok.md", "../../evil.md", "foo/../bar.md"],
extensions: ["safe.ext", "/tmp/abs.ext", "..\\escape.ext"],
}
await fs.writeFile(path.join(managedDir, "install-manifest.json"), JSON.stringify(manifest))
const result = await readPiInstallManifest(managedDir, "compound-engineering", paths)
expect(result).not.toBeNull()
expect(result!.skills).toEqual(["safe-skill"])
expect(result!.prompts).toEqual(["ok.md"])
expect(result!.extensions).toEqual(["safe.ext"])
})
test("keeps all entries when all are safe", async () => {
const piRoot = path.join(tempRoot, ".pi")
const managedDir = path.join(piRoot, "compound-engineering")
await fs.mkdir(managedDir, { recursive: true })
const paths = {
managedDir,
skillsDir: path.join(piRoot, "skills"),
promptsDir: path.join(piRoot, "prompts"),
extensionsDir: path.join(piRoot, "extensions"),
mcporterConfigPath: path.join(managedDir, "mcporter.json"),
agentsPath: path.join(piRoot, "AGENTS.md"),
}
const manifest = {
version: 1,
pluginName: "compound-engineering",
skills: ["a", "b", "c"],
prompts: ["p.md"],
extensions: ["ext.js"],
}
await fs.writeFile(path.join(managedDir, "install-manifest.json"), JSON.stringify(manifest))
const result = await readPiInstallManifest(managedDir, "compound-engineering", paths)
expect(result).not.toBeNull()
expect(result!.skills).toEqual(["a", "b", "c"])
expect(result!.prompts).toEqual(["p.md"])
expect(result!.extensions).toEqual(["ext.js"])
})
})
describe("Pi cleanup helpers do not escape root (defense in depth)", () => {
let tempRoot: string
beforeEach(async () => {
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "pi-cleanup-"))
})
afterEach(async () => {
await fs.rm(tempRoot, { recursive: true, force: true })
})
test("cleanupRemovedPiSkills skips unsafe entries fed directly", async () => {
const skillsDir = path.join(tempRoot, "skills")
await fs.mkdir(skillsDir, { recursive: true })
const outsideDir = path.join(tempRoot, "outside-skill")
await fs.mkdir(outsideDir)
await fs.writeFile(path.join(outsideDir, "file.txt"), "keep me")
const hostileManifest = {
version: 1 as const,
pluginName: "compound-engineering",
skills: ["../outside-skill", "/etc/passwd"],
prompts: [],
extensions: [],
}
await cleanupRemovedPiSkills(skillsDir, hostileManifest, [])
expect(await fs.readFile(path.join(outsideDir, "file.txt"), "utf8")).toBe("keep me")
})
test("cleanupRemovedPiPrompts skips unsafe entries fed directly", async () => {
const promptsDir = path.join(tempRoot, "prompts")
await fs.mkdir(promptsDir, { recursive: true })
const outsideFile = path.join(tempRoot, "outside.txt")
await fs.writeFile(outsideFile, "keep me")
const hostileManifest = {
version: 1 as const,
pluginName: "compound-engineering",
skills: [],
prompts: ["../outside.txt", "/etc/passwd"],
extensions: [],
}
await cleanupRemovedPiPrompts(promptsDir, hostileManifest, [])
expect(await fs.readFile(outsideFile, "utf8")).toBe("keep me")
})
test("cleanupRemovedPiExtensions skips unsafe entries fed directly", async () => {
const extensionsDir = path.join(tempRoot, "extensions")
await fs.mkdir(extensionsDir, { recursive: true })
const outsideFile = path.join(tempRoot, "outside-ext")
await fs.writeFile(outsideFile, "keep me")
const hostileManifest = {
version: 1 as const,
pluginName: "compound-engineering",
skills: [],
prompts: [],
extensions: ["../outside-ext", "/etc/passwd", "foo/../../escape"],
}
await cleanupRemovedPiExtensions(extensionsDir, hostileManifest, [])
expect(await fs.readFile(outsideFile, "utf8")).toBe("keep me")
})
test("still cleans up safe entries correctly", async () => {
const skillsDir = path.join(tempRoot, "skills")
const promptsDir = path.join(tempRoot, "prompts")
const extensionsDir = path.join(tempRoot, "extensions")
await fs.mkdir(skillsDir, { recursive: true })
await fs.mkdir(promptsDir, { recursive: true })
await fs.mkdir(extensionsDir, { recursive: true })
const staleSkillDir = path.join(skillsDir, "stale-skill")
await fs.mkdir(staleSkillDir)
await fs.writeFile(path.join(staleSkillDir, "SKILL.md"), "old")
const stalePrompt = path.join(promptsDir, "stale.md")
await fs.writeFile(stalePrompt, "old")
const staleExt = path.join(extensionsDir, "stale.ext")
await fs.writeFile(staleExt, "old")
const manifest = {
version: 1 as const,
pluginName: "compound-engineering",
skills: ["stale-skill"],
prompts: ["stale.md"],
extensions: ["stale.ext"],
}
await cleanupRemovedPiSkills(skillsDir, manifest, [])
await cleanupRemovedPiPrompts(promptsDir, manifest, [])
await cleanupRemovedPiExtensions(extensionsDir, manifest, [])
for (const p of [staleSkillDir, stalePrompt, staleExt]) {
let exists = true
try {
await fs.stat(p)
} catch {
exists = false
}
expect(exists).toBe(false)
}
})
})

View File

@@ -1,269 +0,0 @@
import { describe, expect, test } from "bun:test"
import { convertClaudeToOpenClaw } from "../src/converters/claude-to-openclaw"
import { parseFrontmatter } from "../src/utils/frontmatter"
import type { ClaudePlugin } from "../src/types/claude"
const fixturePlugin: ClaudePlugin = {
root: "/tmp/plugin",
manifest: { name: "compound-engineering", version: "1.0.0", description: "A plugin" },
agents: [
{
name: "security-reviewer",
description: "Security-focused agent",
capabilities: ["Threat modeling", "OWASP"],
model: "claude-sonnet-4-20250514",
body: "Focus on vulnerabilities in ~/.claude/settings.",
sourcePath: "/tmp/plugin/agents/security-reviewer.md",
},
],
commands: [
{
name: "workflows:plan",
description: "Planning command",
argumentHint: "[FOCUS]",
model: "inherit",
allowedTools: ["Read"],
body: "Plan the work. See ~/.claude/settings for config.",
sourcePath: "/tmp/plugin/commands/workflows/plan.md",
},
{
name: "disabled-cmd",
description: "Disabled command",
model: "inherit",
allowedTools: [],
body: "Should be excluded.",
disableModelInvocation: true,
sourcePath: "/tmp/plugin/commands/disabled-cmd.md",
},
],
skills: [
{
name: "existing-skill",
description: "Existing skill",
sourceDir: "/tmp/plugin/skills/existing-skill",
skillPath: "/tmp/plugin/skills/existing-skill/SKILL.md",
},
],
hooks: undefined,
mcpServers: {
local: { command: "npx", args: ["-y", "some-mcp-server"] },
remote: { url: "https://mcp.example.com/api", headers: { Authorization: "Bearer token" } },
},
}
const defaultOptions = {
agentMode: "subagent" as const,
inferTemperature: false,
permissions: "none" as const,
}
describe("convertClaudeToOpenClaw", () => {
test("converts agents to skill files with SKILL.md content", () => {
const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions)
const skill = bundle.skills.find((s) => s.name === "security-reviewer")
expect(skill).toBeDefined()
expect(skill!.dir).toBe("agent-security-reviewer")
const parsed = parseFrontmatter(skill!.content)
expect(parsed.data.name).toBe("security-reviewer")
expect(parsed.data.description).toBe("Security-focused agent")
expect(parsed.data.model).toBe("anthropic/claude-sonnet-4-20250514")
expect(parsed.body).toContain("Focus on vulnerabilities")
})
test("resolves bare model aliases to provider-prefixed IDs", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
agents: [
{
name: "fast-agent",
description: "Fast agent",
model: "sonnet",
body: "Do things quickly.",
sourcePath: "/tmp/plugin/agents/fast.md",
},
],
}
const bundle = convertClaudeToOpenClaw(plugin, defaultOptions)
const skill = bundle.skills.find((s) => s.name === "fast-agent")
const parsed = parseFrontmatter(skill!.content)
expect(parsed.data.model).toBe("anthropic/claude-sonnet-4-6")
})
test("prefixes minimax models with minimax/ provider", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
agents: [
{
name: "minimax-agent",
description: "MiniMax agent",
model: "minimax-m2.7",
body: "Use MiniMax model.",
sourcePath: "/tmp/plugin/agents/minimax.md",
},
],
}
const bundle = convertClaudeToOpenClaw(plugin, defaultOptions)
const skill = bundle.skills.find((s) => s.name === "minimax-agent")
const parsed = parseFrontmatter(skill!.content)
expect(parsed.data.model).toBe("minimax/minimax-m2.7")
})
test("converts commands to skill files (excluding disableModelInvocation)", () => {
const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions)
const cmdSkill = bundle.skills.find((s) => s.name === "workflows:plan")
expect(cmdSkill).toBeDefined()
expect(cmdSkill!.dir).toBe("cmd-workflows:plan")
const disabledSkill = bundle.skills.find((s) => s.name === "disabled-cmd")
expect(disabledSkill).toBeUndefined()
})
test("commands list excludes disableModelInvocation commands", () => {
const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions)
const cmd = bundle.commands.find((c) => c.name === "workflows-plan")
expect(cmd).toBeDefined()
expect(cmd!.description).toBe("Planning command")
expect(cmd!.acceptsArgs).toBe(true)
const disabled = bundle.commands.find((c) => c.name === "disabled-cmd")
expect(disabled).toBeUndefined()
})
test("command colons are replaced with dashes in command registrations", () => {
const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions)
const cmd = bundle.commands.find((c) => c.name === "workflows-plan")
expect(cmd).toBeDefined()
expect(cmd!.name).not.toContain(":")
})
test("manifest includes plugin id, display name, and skills list", () => {
const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions)
expect(bundle.manifest.id).toBe("compound-engineering")
expect(bundle.manifest.name).toBe("Compound Engineering")
expect(bundle.manifest.kind).toBe("tool")
expect(bundle.manifest.configSchema).toEqual({
type: "object",
properties: {},
})
expect(bundle.manifest.skills).toContain("skills/agent-security-reviewer")
expect(bundle.manifest.skills).toContain("skills/cmd-workflows-plan")
expect(bundle.manifest.skills).toContain("skills/existing-skill")
})
test("package.json uses plugin name and version", () => {
const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions)
expect(bundle.packageJson.name).toBe("openclaw-compound-engineering")
expect(bundle.packageJson.version).toBe("1.0.0")
expect(bundle.packageJson.type).toBe("module")
})
test("skillDirCopies includes original skill directories", () => {
const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions)
const copy = bundle.skillDirCopies.find((s) => s.name === "existing-skill")
expect(copy).toBeDefined()
expect(copy!.sourceDir).toBe("/tmp/plugin/skills/existing-skill")
})
test("stdio MCP servers included in openclaw config", () => {
const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions)
expect(bundle.openclawConfig).toBeDefined()
const mcp = (bundle.openclawConfig!.mcpServers as Record<string, unknown>)
expect(mcp.local).toBeDefined()
expect((mcp.local as any).type).toBe("stdio")
expect((mcp.local as any).command).toBe("npx")
})
test("HTTP MCP servers included as http type in openclaw config", () => {
const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions)
const mcp = (bundle.openclawConfig!.mcpServers as Record<string, unknown>)
expect(mcp.remote).toBeDefined()
expect((mcp.remote as any).type).toBe("http")
expect((mcp.remote as any).url).toBe("https://mcp.example.com/api")
})
test("paths are rewritten from .claude/ to .openclaw/ in skill content", () => {
const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions)
const agentSkill = bundle.skills.find((s) => s.name === "security-reviewer")
expect(agentSkill!.content).toContain("~/.openclaw/settings")
expect(agentSkill!.content).not.toContain("~/.claude/settings")
const cmdSkill = bundle.skills.find((s) => s.name === "workflows:plan")
expect(cmdSkill!.content).toContain("~/.openclaw/settings")
expect(cmdSkill!.content).not.toContain("~/.claude/settings")
})
test("generateEntryPoint uses JSON.stringify for safe string escaping", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
commands: [
{
name: "tricky-cmd",
description: 'Has "quotes" and \\backslashes\\ and\nnewlines',
model: "inherit",
allowedTools: [],
body: "body",
sourcePath: "/tmp/cmd.md",
},
],
}
const bundle = convertClaudeToOpenClaw(plugin, defaultOptions)
// Entry point must be valid JS/TS — JSON.stringify handles all special chars
expect(bundle.entryPoint).toContain('"tricky-cmd"')
expect(bundle.entryPoint).toContain('\\"quotes\\"')
expect(bundle.entryPoint).toContain("\\\\backslashes\\\\")
expect(bundle.entryPoint).toContain("\\n")
// No raw unescaped newline inside a string literal
const lines = bundle.entryPoint.split("\n")
const nameLine = lines.find((l) => l.includes("tricky-cmd") && l.includes("name:"))
expect(nameLine).toBeDefined()
})
test("generateEntryPoint inlines command bodies for sync registration", () => {
const bundle = convertClaudeToOpenClaw(fixturePlugin, defaultOptions)
expect(bundle.entryPoint).not.toContain("const skills: Record<string, string> = {}")
expect(bundle.entryPoint).toContain('text: "Plan the work. See ~/.openclaw/settings for config."')
expect(bundle.entryPoint).toContain("export default function register(api)")
})
test("plugin without MCP servers has no openclawConfig", () => {
const plugin: ClaudePlugin = { ...fixturePlugin, mcpServers: undefined }
const bundle = convertClaudeToOpenClaw(plugin, defaultOptions)
expect(bundle.openclawConfig).toBeUndefined()
})
test("manifest skill paths use sanitized names matching filesystem output", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
skills: [
{
name: "ce-plan",
description: "Planning skill",
sourceDir: "/tmp/plugin/skills/ce-plan",
skillPath: "/tmp/plugin/skills/ce-plan/SKILL.md",
},
],
}
const bundle = convertClaudeToOpenClaw(plugin, defaultOptions)
// Manifest paths must not contain colons
for (const skillPath of bundle.manifest.skills) {
expect(skillPath).not.toContain(":")
}
expect(bundle.manifest.skills).toContain("skills/ce-plan")
expect(bundle.manifest.skills).toContain("skills/cmd-workflows-plan")
})
})

View File

@@ -1,104 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import os from "os"
import path from "path"
import { writeOpenClawBundle } from "../src/targets/openclaw"
import { parseFrontmatter } from "../src/utils/frontmatter"
import type { OpenClawBundle } from "../src/types/openclaw"
async function exists(targetPath: string): Promise<boolean> {
try {
await fs.stat(targetPath)
return true
} catch {
return false
}
}
async function pluginDescription(relativePath: string): Promise<string> {
const raw = await fs.readFile(path.join(import.meta.dir, "..", relativePath), "utf8")
const { data } = parseFrontmatter(raw, relativePath)
if (typeof data.description !== "string") {
throw new Error(`Missing description in ${relativePath}`)
}
return data.description
}
function legacyAgentSkillContent(name: string, description: string): string {
return `---\nname: ${name}\ndescription: ${JSON.stringify(description)}\n---\n\n# ${name}\n`
}
describe("writeOpenClawBundle", () => {
test("writes openclaw.plugin.json with a configSchema", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-writer-"))
const bundle: OpenClawBundle = {
manifest: {
id: "compound-engineering",
name: "Compound Engineering",
kind: "tool",
configSchema: {
type: "object",
properties: {},
},
skills: [],
},
packageJson: {
name: "openclaw-compound-engineering",
version: "1.0.0",
},
entryPoint: "export default async function register() {}",
skills: [],
skillDirCopies: [],
commands: [],
}
await writeOpenClawBundle(tempRoot, bundle)
const manifest = JSON.parse(
await fs.readFile(path.join(tempRoot, "openclaw.plugin.json"), "utf8"),
)
expect(manifest.configSchema).toEqual({
type: "object",
properties: {},
})
})
test("removes stale legacy OpenClaw agent skill directories before writing", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-writer-cleanup-"))
const staleDir = path.join(tempRoot, "skills", "agent-adversarial-reviewer")
await fs.mkdir(staleDir, { recursive: true })
await fs.writeFile(
path.join(staleDir, "SKILL.md"),
legacyAgentSkillContent(
"adversarial-reviewer",
await pluginDescription("plugins/compound-engineering/agents/review/ce-adversarial-reviewer.agent.md"),
),
)
const bundle: OpenClawBundle = {
manifest: {
id: "compound-engineering",
name: "Compound Engineering",
kind: "tool",
configSchema: {
type: "object",
properties: {},
},
skills: [],
},
packageJson: {
name: "openclaw-compound-engineering",
version: "1.0.0",
},
entryPoint: "export default async function register() {}",
skills: [],
skillDirCopies: [],
commands: [],
}
await writeOpenClawBundle(tempRoot, bundle)
expect(await exists(staleDir)).toBe(false)
})
})

View File

@@ -3,8 +3,10 @@ import { promises as fs } from "fs"
import path from "path"
import os from "os"
import { writeOpenCodeBundle } from "../src/targets/opencode"
import { mergeJsonConfigAtKey } from "../src/sync/json-config"
import { mergeJsonConfigAtKey } from "../src/utils/json-config"
import type { OpenCodeBundle } from "../src/types/opencode"
import { loadClaudePlugin } from "../src/parsers/claude"
import { convertClaudeToOpenCode } from "../src/converters/claude-to-opencode"
async function exists(filePath: string): Promise<boolean> {
try {
@@ -19,6 +21,7 @@ describe("writeOpenCodeBundle", () => {
test("writes config, agents, plugins, and skills", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-test-"))
const bundle: OpenCodeBundle = {
pluginName: "compound-engineering",
config: { $schema: "https://opencode.ai/config.json" },
agents: [{ name: "agent-one", content: "Agent content" }],
plugins: [{ name: "hook.ts", content: "export {}" }],
@@ -37,6 +40,7 @@ describe("writeOpenCodeBundle", () => {
expect(await exists(path.join(tempRoot, ".opencode", "agents", "agent-one.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".opencode", "plugins", "hook.ts"))).toBe(true)
expect(await exists(path.join(tempRoot, ".opencode", "skills", "skill-one", "SKILL.md"))).toBe(true)
expect(await exists(path.join(tempRoot, ".opencode", "compound-engineering", "install-manifest.json"))).toBe(true)
})
test("writes directly into a .opencode output root", async () => {
@@ -89,6 +93,32 @@ describe("writeOpenCodeBundle", () => {
expect(await exists(path.join(outputRoot, ".opencode"))).toBe(false)
})
test("scope='global' forces flat layout for OPENCODE_CONFIG_DIR-style roots with non-conventional basenames", async () => {
// Simulates OPENCODE_CONFIG_DIR pointing to a directory whose basename is
// neither "opencode" nor ".opencode" (e.g. NixOS, Docker, custom XDG_CONFIG_HOME).
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-env-dir-"))
const outputRoot = path.join(tempRoot, "custom-opencode-config")
const bundle: OpenCodeBundle = {
config: { $schema: "https://opencode.ai/config.json" },
agents: [{ name: "agent-one", content: "Agent content" }],
plugins: [],
commandFiles: [],
skillDirs: [
{
name: "skill-one",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
}
await writeOpenCodeBundle(outputRoot, bundle, "global")
expect(await exists(path.join(outputRoot, "opencode.json"))).toBe(true)
expect(await exists(path.join(outputRoot, "agents", "agent-one.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "skills", "skill-one", "SKILL.md"))).toBe(true)
expect(await exists(path.join(outputRoot, ".opencode"))).toBe(false)
})
test("merges plugin config into existing opencode.json without destroying user keys", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-backup-"))
const outputRoot = path.join(tempRoot, ".opencode")
@@ -325,6 +355,246 @@ describe("writeOpenCodeBundle", () => {
const backupContent = await fs.readFile(path.join(commandsDir, backupFileName!), "utf8")
expect(backupContent).toBe("old content\n")
})
test("removes previously managed OpenCode artifacts that disappear on reinstall", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-managed-cleanup-"))
const outputRoot = path.join(tempRoot, ".opencode")
await writeOpenCodeBundle(outputRoot, {
pluginName: "compound-engineering",
config: { $schema: "https://opencode.ai/config.json" },
agents: [{ name: "old-agent", content: "Agent content" }],
plugins: [{ name: "hook.ts", content: "export {}" }],
commandFiles: [{ name: "old:cmd", content: "old" }],
skillDirs: [
{
name: "skill-one",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
})
await writeOpenCodeBundle(outputRoot, {
pluginName: "compound-engineering",
config: { $schema: "https://opencode.ai/config.json" },
agents: [{ name: "new-agent", content: "Agent content" }],
plugins: [],
commandFiles: [{ name: "new:cmd", content: "new" }],
skillDirs: [],
})
expect(await exists(path.join(outputRoot, "agents", "old-agent.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "agents", "new-agent.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "plugins", "hook.ts"))).toBe(false)
expect(await exists(path.join(outputRoot, "commands", "old", "cmd.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "commands", "new", "cmd.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "skills", "skill-one", "SKILL.md"))).toBe(false)
})
test("namespaces managed install manifests per plugin so installs do not collide", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-multi-plugin-"))
const outputRoot = path.join(tempRoot, ".opencode")
// Install plugin A first, with a skill and an agent
await writeOpenCodeBundle(outputRoot, {
pluginName: "compound-engineering",
config: { $schema: "https://opencode.ai/config.json" },
agents: [{ name: "ce-agent", content: "ce agent" }],
plugins: [],
commandFiles: [],
skillDirs: [
{
name: "ce-skill",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
})
// Install plugin B into the same OpenCode root
await writeOpenCodeBundle(outputRoot, {
pluginName: "coding-tutor",
config: { $schema: "https://opencode.ai/config.json" },
agents: [{ name: "tutor-agent", content: "tutor agent" }],
plugins: [],
commandFiles: [],
skillDirs: [
{
name: "tutor-skill",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
})
// Both plugins must keep their own namespaced manifest
expect(await exists(path.join(outputRoot, "compound-engineering", "install-manifest.json"))).toBe(true)
expect(await exists(path.join(outputRoot, "coding-tutor", "install-manifest.json"))).toBe(true)
// Reinstall plugin A with no agents/skills — it must clean up only its own
// managed artifacts, leaving plugin B's intact (the bug the namespacing fix
// addresses: a shared manifest path would have lost B's manifest after A was
// installed, and a later A reinstall would skip B's stale-file cleanup).
await writeOpenCodeBundle(outputRoot, {
pluginName: "compound-engineering",
config: { $schema: "https://opencode.ai/config.json" },
agents: [],
plugins: [],
commandFiles: [],
skillDirs: [],
})
expect(await exists(path.join(outputRoot, "agents", "ce-agent.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "skills", "ce-skill"))).toBe(false)
expect(await exists(path.join(outputRoot, "agents", "tutor-agent.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "skills", "tutor-skill"))).toBe(true)
expect(await exists(path.join(outputRoot, "coding-tutor", "install-manifest.json"))).toBe(true)
})
test("moves legacy OpenCode CE artifacts to a namespaced backup", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-legacy-artifacts-"))
const outputRoot = path.join(tempRoot, ".opencode")
await fs.mkdir(path.join(outputRoot, "skills", "reproduce-bug"), { recursive: true })
await fs.writeFile(path.join(outputRoot, "skills", "reproduce-bug", "SKILL.md"), "legacy removed skill")
await fs.mkdir(path.join(outputRoot, "agents"), { recursive: true })
await fs.writeFile(path.join(outputRoot, "agents", "bug-reproduction-validator.md"), "legacy removed agent")
await fs.mkdir(path.join(outputRoot, "commands"), { recursive: true })
await fs.writeFile(path.join(outputRoot, "commands", "reproduce-bug.md"), "legacy removed command")
await fs.writeFile(path.join(outputRoot, "commands", "report-bug.md"), "legacy deleted command")
const plugin = await loadClaudePlugin(path.join(import.meta.dir, "..", "plugins", "compound-engineering"))
const bundle = convertClaudeToOpenCode(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
await writeOpenCodeBundle(outputRoot, bundle)
expect(await exists(path.join(outputRoot, "skills", "reproduce-bug"))).toBe(false)
expect(await exists(path.join(outputRoot, "agents", "bug-reproduction-validator.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "commands", "reproduce-bug.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "commands", "report-bug.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "compound-engineering", "legacy-backup"))).toBe(true)
})
test("upgrades from pre-namespacing legacy shared manifest for non-CE plugins", async () => {
// Pre-namespacing, ALL plugins wrote their install manifest to the same
// shared path: `<root>/compound-engineering/install-manifest.json`. After
// the namespacing fix, a plugin like `coding-tutor` reads from its own
// scoped path (`<root>/coding-tutor/install-manifest.json`), which does
// not exist on the first reinstall after upgrade. Without a fallback, the
// manifest resolves to null and the writer skips cleanup, leaving stale
// files from the pre-namespacing install in place. This test exercises
// the fallback read of the legacy shared manifest.
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-legacy-manifest-"))
const outputRoot = path.join(tempRoot, ".opencode")
// Seed the legacy shared manifest at the OLD path, recording artifacts
// that the previous coding-tutor install placed in the root.
await fs.mkdir(path.join(outputRoot, "compound-engineering"), { recursive: true })
await fs.writeFile(
path.join(outputRoot, "compound-engineering", "install-manifest.json"),
JSON.stringify({
version: 1,
pluginName: "coding-tutor",
groups: {
agents: ["stale-tutor-agent.md"],
commands: ["stale-tutor-cmd.md"],
plugins: [],
skills: ["stale-tutor-skill"],
},
}),
)
// Seed the stale artifacts on disk as they'd exist from the prior install.
await fs.mkdir(path.join(outputRoot, "agents"), { recursive: true })
await fs.writeFile(path.join(outputRoot, "agents", "stale-tutor-agent.md"), "stale")
await fs.mkdir(path.join(outputRoot, "commands"), { recursive: true })
await fs.writeFile(path.join(outputRoot, "commands", "stale-tutor-cmd.md"), "stale")
await fs.mkdir(path.join(outputRoot, "skills", "stale-tutor-skill"), { recursive: true })
await fs.writeFile(
path.join(outputRoot, "skills", "stale-tutor-skill", "SKILL.md"),
"stale",
)
// Reinstall coding-tutor with a new, non-overlapping set of artifacts.
await writeOpenCodeBundle(outputRoot, {
pluginName: "coding-tutor",
config: { $schema: "https://opencode.ai/config.json" },
agents: [{ name: "fresh-tutor-agent", content: "fresh" }],
plugins: [],
commandFiles: [],
skillDirs: [
{
name: "fresh-tutor-skill",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
})
// Stale artifacts from the legacy manifest must be cleaned up.
expect(await exists(path.join(outputRoot, "agents", "stale-tutor-agent.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "commands", "stale-tutor-cmd.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "skills", "stale-tutor-skill"))).toBe(false)
// Fresh artifacts must be written under the plugin-scoped manifest path.
expect(await exists(path.join(outputRoot, "agents", "fresh-tutor-agent.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "skills", "fresh-tutor-skill", "SKILL.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "coding-tutor", "install-manifest.json"))).toBe(true)
// The legacy shared manifest must be archived so it doesn't keep
// misleading a future install (and must no longer exist at the old path).
expect(await exists(path.join(outputRoot, "compound-engineering", "install-manifest.json"))).toBe(false)
expect(await exists(path.join(outputRoot, "coding-tutor", "legacy-backup"))).toBe(true)
})
test("leaves legacy shared manifest alone when it belongs to a different plugin", async () => {
// Reinforces the cross-plugin safety: a legacy manifest owned by plugin
// A must not be consumed or cleaned up by plugin B's first namespaced
// install. Plugin A's own next install is responsible for migrating it.
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "opencode-legacy-other-plugin-"))
const outputRoot = path.join(tempRoot, ".opencode")
await fs.mkdir(path.join(outputRoot, "compound-engineering"), { recursive: true })
const legacyManifest = {
version: 1,
pluginName: "some-other-plugin",
groups: {
agents: ["other-plugin-agent.md"],
commands: [],
plugins: [],
skills: [],
},
}
await fs.writeFile(
path.join(outputRoot, "compound-engineering", "install-manifest.json"),
JSON.stringify(legacyManifest),
)
await fs.mkdir(path.join(outputRoot, "agents"), { recursive: true })
await fs.writeFile(path.join(outputRoot, "agents", "other-plugin-agent.md"), "other")
await writeOpenCodeBundle(outputRoot, {
pluginName: "coding-tutor",
config: { $schema: "https://opencode.ai/config.json" },
agents: [{ name: "tutor-agent", content: "tutor" }],
plugins: [],
commandFiles: [],
skillDirs: [],
})
// Other plugin's artifact is left alone.
expect(await exists(path.join(outputRoot, "agents", "other-plugin-agent.md"))).toBe(true)
// Other plugin's legacy manifest is left at the legacy path.
expect(
await exists(path.join(outputRoot, "compound-engineering", "install-manifest.json")),
).toBe(true)
const preserved = JSON.parse(
await fs.readFile(
path.join(outputRoot, "compound-engineering", "install-manifest.json"),
"utf8",
),
)
expect(preserved.pluginName).toBe("some-other-plugin")
})
})
describe("mergeJsonConfigAtKey", () => {

View File

@@ -5,6 +5,8 @@ import os from "os"
import { writePiBundle } from "../src/targets/pi"
import { parseFrontmatter } from "../src/utils/frontmatter"
import type { PiBundle } from "../src/types/pi"
import { loadClaudePlugin } from "../src/parsers/claude"
import { convertClaudeToPi } from "../src/converters/claude-to-pi"
async function exists(filePath: string): Promise<boolean> {
try {
@@ -59,6 +61,7 @@ describe("writePiBundle", () => {
const outputRoot = path.join(tempRoot, ".pi")
const bundle: PiBundle = {
pluginName: "compound-engineering",
prompts: [{ name: "workflows-plan", content: "Prompt content" }],
skillDirs: [
{
@@ -82,6 +85,7 @@ describe("writePiBundle", () => {
expect(await exists(path.join(outputRoot, "skills", "repo-research-analyst", "SKILL.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "extensions", "compound-engineering-compat.ts"))).toBe(true)
expect(await exists(path.join(outputRoot, "compound-engineering", "mcporter.json"))).toBe(true)
expect(await exists(path.join(outputRoot, "compound-engineering", "install-manifest.json"))).toBe(true)
const agentsPath = path.join(outputRoot, "AGENTS.md")
const agentsContent = await fs.readFile(agentsPath, "utf8")
@@ -175,4 +179,125 @@ Run these research agents:
const currentConfig = JSON.parse(await fs.readFile(configPath, "utf8")) as { mcpServers: Record<string, unknown> }
expect(currentConfig.mcpServers.linear).toBeDefined()
})
test("removes previously managed Pi artifacts that disappear on reinstall", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "pi-managed-cleanup-"))
const outputRoot = path.join(tempRoot, ".pi")
await writePiBundle(outputRoot, {
pluginName: "compound-engineering",
prompts: [{ name: "old-prompt", content: "Prompt content" }],
skillDirs: [
{
name: "skill-one",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
generatedSkills: [{ name: "old-agent", content: "---\nname: old-agent\n---\n\nBody" }],
extensions: [{ name: "compound-engineering-compat.ts", content: "export default function first() {}" }],
})
await writePiBundle(outputRoot, {
pluginName: "compound-engineering",
prompts: [{ name: "new-prompt", content: "Prompt content" }],
skillDirs: [],
generatedSkills: [{ name: "new-agent", content: "---\nname: new-agent\n---\n\nBody" }],
extensions: [],
})
expect(await exists(path.join(outputRoot, "prompts", "old-prompt.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "prompts", "new-prompt.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "skills", "skill-one", "SKILL.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "skills", "old-agent", "SKILL.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "skills", "new-agent", "SKILL.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "extensions", "compound-engineering-compat.ts"))).toBe(false)
})
test("namespaces managed install manifests per plugin so installs do not collide", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "pi-multi-plugin-"))
const outputRoot = path.join(tempRoot, ".pi")
// Install plugin A first, with a prompt, skill, generated skill, and extension
await writePiBundle(outputRoot, {
pluginName: "compound-engineering",
prompts: [{ name: "ce-prompt", content: "CE prompt" }],
skillDirs: [
{
name: "ce-skill",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
generatedSkills: [{ name: "ce-gen-skill", content: "---\nname: ce-gen-skill\n---\n\nBody" }],
extensions: [{ name: "ce-ext.ts", content: "export default function () {}" }],
})
// Install plugin B into the same Pi root
await writePiBundle(outputRoot, {
pluginName: "coding-tutor",
prompts: [{ name: "tutor-prompt", content: "Tutor prompt" }],
skillDirs: [
{
name: "tutor-skill",
sourceDir: path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one"),
},
],
generatedSkills: [{ name: "tutor-gen-skill", content: "---\nname: tutor-gen-skill\n---\n\nBody" }],
extensions: [{ name: "tutor-ext.ts", content: "export default function () {}" }],
})
// Both plugins must keep their own namespaced manifest
expect(await exists(path.join(outputRoot, "compound-engineering", "install-manifest.json"))).toBe(true)
expect(await exists(path.join(outputRoot, "coding-tutor", "install-manifest.json"))).toBe(true)
// Reinstall plugin A with no artifacts — it must clean up only its own
// managed artifacts, leaving plugin B's intact (the bug the namespacing fix
// addresses: a shared manifest path would have lost B's manifest after A
// was installed, and a later A reinstall would skip B's stale-file cleanup).
await writePiBundle(outputRoot, {
pluginName: "compound-engineering",
prompts: [],
skillDirs: [],
generatedSkills: [],
extensions: [],
})
expect(await exists(path.join(outputRoot, "prompts", "ce-prompt.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "skills", "ce-skill"))).toBe(false)
expect(await exists(path.join(outputRoot, "skills", "ce-gen-skill"))).toBe(false)
expect(await exists(path.join(outputRoot, "extensions", "ce-ext.ts"))).toBe(false)
expect(await exists(path.join(outputRoot, "prompts", "tutor-prompt.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "skills", "tutor-skill"))).toBe(true)
expect(await exists(path.join(outputRoot, "skills", "tutor-gen-skill"))).toBe(true)
expect(await exists(path.join(outputRoot, "extensions", "tutor-ext.ts"))).toBe(true)
expect(await exists(path.join(outputRoot, "coding-tutor", "install-manifest.json"))).toBe(true)
})
test("moves legacy flat Pi CE artifacts to a namespaced backup", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "pi-legacy-artifacts-"))
const outputRoot = path.join(tempRoot, ".pi")
await fs.mkdir(path.join(outputRoot, "skills", "reproduce-bug"), { recursive: true })
await fs.writeFile(path.join(outputRoot, "skills", "reproduce-bug", "SKILL.md"), "legacy removed skill")
await fs.mkdir(path.join(outputRoot, "skills", "bug-reproduction-validator"), { recursive: true })
await fs.writeFile(path.join(outputRoot, "skills", "bug-reproduction-validator", "SKILL.md"), "legacy removed agent skill")
await fs.mkdir(path.join(outputRoot, "prompts"), { recursive: true })
await fs.writeFile(path.join(outputRoot, "prompts", "reproduce-bug.md"), "legacy removed prompt")
await fs.writeFile(path.join(outputRoot, "prompts", "report-bug.md"), "legacy deleted command prompt")
const plugin = await loadClaudePlugin(path.join(import.meta.dir, "..", "plugins", "compound-engineering"))
const bundle = convertClaudeToPi(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
await writePiBundle(outputRoot, bundle)
expect(await exists(path.join(outputRoot, "skills", "reproduce-bug"))).toBe(false)
expect(await exists(path.join(outputRoot, "skills", "bug-reproduction-validator"))).toBe(false)
expect(await exists(path.join(outputRoot, "prompts", "reproduce-bug.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "prompts", "report-bug.md"))).toBe(false)
expect(await exists(path.join(outputRoot, "skills", "ce-plan", "SKILL.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "skills", "ce-repo-research-analyst", "SKILL.md"))).toBe(true)
expect(await exists(path.join(outputRoot, "compound-engineering", "legacy-backup"))).toBe(true)
})
})

View File

@@ -0,0 +1,166 @@
import { describe, expect, test } from "bun:test"
import path from "path"
import { loadClaudePlugin } from "../src/parsers/claude"
import { convertClaudeToCodex } from "../src/converters/claude-to-codex"
import { convertClaudeToPi } from "../src/converters/claude-to-pi"
import { convertClaudeToKiro } from "../src/converters/claude-to-kiro"
import { getLegacyCodexArtifacts, getLegacyKiroArtifacts, getLegacyPiArtifacts, getLegacyWindsurfArtifacts } from "../src/data/plugin-legacy-artifacts"
describe("plugin legacy artifacts", () => {
test("Codex legacy detection is restricted to the explicit historical allow-list", async () => {
const plugin = await loadClaudePlugin(path.join(import.meta.dir, "..", "plugins", "compound-engineering"))
const bundle = convertClaudeToCodex(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
const artifacts = getLegacyCodexArtifacts(bundle)
// Historical CE skills (renamed/removed since) are detected. These are
// explicitly enumerated in EXTRA_LEGACY_ARTIFACTS_BY_PLUGIN.
expect(artifacts.skills).toContain("ce-plan")
expect(artifacts.skills).toContain("ce:plan")
expect(artifacts.skills).toContain("ce:plan-beta")
expect(artifacts.skills).toContain("ce-review")
expect(artifacts.skills).toContain("ce:review-beta")
expect(artifacts.skills).toContain("ce-document-review")
expect(artifacts.skills).toContain("demo-reel")
expect(artifacts.skills).toContain("ce:polish-beta")
expect(artifacts.skills).toContain("ce:release-notes")
expect(artifacts.skills).toContain("ce-update")
expect(artifacts.skills).toContain("creating-agent-skills")
expect(artifacts.skills).toContain("repo-research-analyst")
expect(artifacts.skills).toContain("bug-reproduction-validator")
expect(artifacts.skills).toContain("report-bug")
expect(artifacts.skills).toContain("reproduce-bug")
expect(artifacts.skills).toContain("resolve_pr_parallel")
// Current CE skill names that were never on the historical allow-list MUST
// NOT be flagged as legacy candidates. Otherwise a first install would
// sweep an unrelated user skill at ~/.codex/skills/<name>/ into backup
// simply because its name collides with a current CE skill.
expect(artifacts.skills).not.toContain("ce-demo-reel")
// Synthesized agent name variants (e.g. ce-<final-segment>) are not on
// the historical allow-list either, so they should not be probed against
// unrelated user skills at flat ~/.codex/skills/<name>/ paths.
expect(artifacts.skills).not.toContain("ce-repo-research-analyst")
expect(artifacts.skills).not.toContain("research-ce-repo-research-analyst")
expect(artifacts.prompts).toContain("codify.md")
expect(artifacts.prompts).toContain("compound-plan.md")
expect(artifacts.prompts).toContain("plan.md")
expect(artifacts.prompts).toContain("report-bug.md")
expect(artifacts.prompts).toContain("workflows-review.md")
expect(artifacts.prompts).toContain("technical_review.md")
})
test("Codex legacy detection ignores current bundle skills/agents not in the historical allow-list", () => {
const artifacts = getLegacyCodexArtifacts({
pluginName: "compound-engineering",
prompts: [],
skillDirs: [
// A current skill name that was NEVER shipped historically. A user
// could plausibly have an unrelated skill at ~/.codex/skills/my-novel-skill/
// and a first install of CE must not touch it.
{ name: "my-novel-skill", sourceDir: "/tmp/unused" },
],
generatedSkills: [
{ name: "another-novel-skill", content: "" },
],
agents: [
{ name: "my-novel-agent", description: "x", instructions: "y" },
],
})
expect(artifacts.skills).not.toContain("my-novel-skill")
expect(artifacts.skills).not.toContain("another-novel-skill")
expect(artifacts.skills).not.toContain("my-novel-agent")
expect(artifacts.skills).not.toContain("ce-my-novel-agent")
})
test("Codex legacy detection returns nothing for plugins without an allow-list", () => {
const artifacts = getLegacyCodexArtifacts({
pluginName: "some-third-party-plugin",
prompts: [{ name: "anything", content: "" }],
skillDirs: [{ name: "shared-name", sourceDir: "/tmp/x" }],
generatedSkills: [],
agents: [{ name: "shared-name", description: "x", instructions: "y" }],
})
expect(artifacts.skills).toEqual([])
expect(artifacts.prompts).toEqual([])
})
test("includes current and historical CE artifacts for Pi cleanup", async () => {
const plugin = await loadClaudePlugin(path.join(import.meta.dir, "..", "plugins", "compound-engineering"))
const bundle = convertClaudeToPi(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
const artifacts = getLegacyPiArtifacts(bundle)
expect(artifacts.skills).toContain("bug-reproduction-validator")
expect(artifacts.skills).toContain("creating-agent-skills")
expect(artifacts.skills).toContain("repo-research-analyst")
expect(artifacts.skills).toContain("reproduce-bug")
expect(artifacts.skills).toContain("resolve_pr_parallel")
expect(artifacts.skills).not.toContain("ce:plan")
expect(artifacts.skills).not.toContain("ce-plan")
expect(artifacts.prompts).toContain("codify.md")
expect(artifacts.prompts).toContain("compound-plan.md")
expect(artifacts.prompts).toContain("plan.md")
expect(artifacts.prompts).toContain("report-bug.md")
expect(artifacts.prompts).toContain("workflows-review.md")
expect(artifacts.prompts).toContain("technical_review.md")
})
test("includes historical CE artifacts for Kiro install cleanup", async () => {
const plugin = await loadClaudePlugin(path.join(import.meta.dir, "..", "plugins", "compound-engineering"))
const bundle = convertClaudeToKiro(plugin, {
agentMode: "subagent",
inferTemperature: true,
permissions: "none",
})
const artifacts = getLegacyKiroArtifacts(bundle)
expect(artifacts.skills).toContain("reproduce-bug")
expect(artifacts.skills).toContain("repo-research-analyst")
expect(artifacts.skills).toContain("creating-agent-skills")
expect(artifacts.skills).toContain("compound-plan")
expect(artifacts.skills).toContain("plan")
expect(artifacts.skills).toContain("resolve_pr_parallel")
expect(artifacts.skills).not.toContain("ce-plan")
expect(artifacts.agents).toContain("repo-research-analyst")
expect(artifacts.agents).not.toContain("ce-repo-research-analyst")
})
test("includes only historical CE artifacts for deprecated Windsurf cleanup", async () => {
const plugin = await loadClaudePlugin(path.join(import.meta.dir, "..", "plugins", "compound-engineering"))
const artifacts = getLegacyWindsurfArtifacts(plugin)
expect(artifacts.skills).toContain("ce-review")
expect(artifacts.skills).toContain("creating-agent-skills")
expect(artifacts.skills).toContain("reproduce-bug")
expect(artifacts.skills).toContain("resolve_pr_parallel")
expect(artifacts.skills).toContain("repo-research-analyst")
expect(artifacts.workflows).toContain("codify.md")
expect(artifacts.workflows).toContain("compound-plan.md")
expect(artifacts.workflows).toContain("plan.md")
expect(artifacts.workflows).toContain("workflows-plan.md")
expect(artifacts.workflows).toContain("ce-plan.md")
expect(artifacts.workflows).toContain("technical_review.md")
// Names present in the current CE bundle but NOT on the historical
// allow-list must never be cleanup candidates, so user-authored files at
// those paths survive `cleanup --target windsurf`.
expect(artifacts.skills).not.toContain("ce-debug")
})
})

View File

@@ -1,268 +0,0 @@
import { describe, expect, test } from "bun:test"
import { convertClaudeToQwen } from "../src/converters/claude-to-qwen"
import { parseFrontmatter } from "../src/utils/frontmatter"
import type { ClaudePlugin } from "../src/types/claude"
const fixturePlugin: ClaudePlugin = {
root: "/tmp/plugin",
manifest: { name: "compound-engineering", version: "1.2.0", description: "A plugin for engineers" },
agents: [
{
name: "security-sentinel",
description: "Security-focused agent",
capabilities: ["Threat modeling", "OWASP"],
model: "claude-sonnet-4-20250514",
body: "Focus on vulnerabilities in ~/.claude/settings.",
sourcePath: "/tmp/plugin/agents/security-sentinel.md",
},
{
name: "brainstorm-agent",
description: "Creative brainstormer",
model: "inherit",
body: "Generate ideas.",
sourcePath: "/tmp/plugin/agents/brainstorm-agent.md",
},
],
commands: [
{
name: "workflows:plan",
description: "Planning command",
argumentHint: "[FOCUS]",
model: "inherit",
allowedTools: ["Read"],
body: "Plan the work. Config at ~/.claude/settings.",
sourcePath: "/tmp/plugin/commands/workflows/plan.md",
},
{
name: "disabled-cmd",
description: "Disabled",
model: "inherit",
allowedTools: [],
body: "Should be excluded.",
disableModelInvocation: true,
sourcePath: "/tmp/plugin/commands/disabled-cmd.md",
},
],
skills: [
{
name: "existing-skill",
description: "Existing skill",
sourceDir: "/tmp/plugin/skills/existing-skill",
skillPath: "/tmp/plugin/skills/existing-skill/SKILL.md",
},
],
hooks: undefined,
mcpServers: {
local: { command: "npx", args: ["-y", "some-mcp"], env: { API_KEY: "${YOUR_API_KEY}" } },
remote: { url: "https://mcp.example.com/api", headers: { Authorization: "Bearer token" } },
},
}
const defaultOptions = {
agentMode: "subagent" as const,
inferTemperature: false,
}
describe("convertClaudeToQwen", () => {
test("converts agents to yaml format with frontmatter", () => {
const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions)
const agent = bundle.agents.find((a) => a.name === "security-sentinel")
expect(agent).toBeDefined()
expect(agent!.format).toBe("yaml")
const parsed = parseFrontmatter(agent!.content)
expect(parsed.data.name).toBe("security-sentinel")
expect(parsed.data.description).toBe("Security-focused agent")
expect(parsed.data.model).toBe("anthropic/claude-sonnet-4-20250514")
expect(parsed.body).toContain("Focus on vulnerabilities")
})
test("agent with inherit model has no model field in frontmatter", () => {
const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions)
const agent = bundle.agents.find((a) => a.name === "brainstorm-agent")
expect(agent).toBeDefined()
const parsed = parseFrontmatter(agent!.content)
expect(parsed.data.model).toBeUndefined()
})
test("inferTemperature injects temperature based on agent name/description", () => {
const bundle = convertClaudeToQwen(fixturePlugin, { ...defaultOptions, inferTemperature: true })
const sentinel = bundle.agents.find((a) => a.name === "security-sentinel")
const parsed = parseFrontmatter(sentinel!.content)
expect(parsed.data.temperature).toBe(0.1) // review/security → 0.1
const brainstorm = bundle.agents.find((a) => a.name === "brainstorm-agent")
const bParsed = parseFrontmatter(brainstorm!.content)
expect(bParsed.data.temperature).toBe(0.6) // brainstorm → 0.6
})
test("inferTemperature returns undefined for unrecognized agents (no temperature set)", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
agents: [{ name: "my-helper", description: "Generic helper", model: "inherit", body: "help", sourcePath: "/tmp/a.md" }],
}
const bundle = convertClaudeToQwen(plugin, { ...defaultOptions, inferTemperature: true })
const agent = bundle.agents[0]
const parsed = parseFrontmatter(agent.content)
expect(parsed.data.temperature).toBeUndefined()
})
test("converts commands to command files excluding disableModelInvocation", () => {
const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions)
const planCmd = bundle.commandFiles.find((c) => c.name === "workflows:plan")
expect(planCmd).toBeDefined()
const parsed = parseFrontmatter(planCmd!.content)
expect(parsed.data.description).toBe("Planning command")
expect(parsed.data.allowedTools).toEqual(["Read"])
const disabled = bundle.commandFiles.find((c) => c.name === "disabled-cmd")
expect(disabled).toBeUndefined()
})
test("config uses plugin manifest name and version", () => {
const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions)
expect(bundle.config.name).toBe("compound-engineering")
expect(bundle.config.version).toBe("1.2.0")
expect(bundle.config.commands).toBe("commands")
expect(bundle.config.skills).toBe("skills")
expect(bundle.config.agents).toBe("agents")
})
test("stdio MCP servers are included in config", () => {
const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions)
expect(bundle.config.mcpServers).toBeDefined()
const local = bundle.config.mcpServers!.local
expect(local.command).toBe("npx")
expect(local.args).toEqual(["-y", "some-mcp"])
// No cwd field
expect((local as any).cwd).toBeUndefined()
})
test("remote MCP servers are skipped with a warning (not converted to curl)", () => {
const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions)
// Only local (stdio) server should be present
expect(bundle.config.mcpServers).toBeDefined()
expect(bundle.config.mcpServers!.remote).toBeUndefined()
expect(bundle.config.mcpServers!.local).toBeDefined()
})
test("placeholder env vars are extracted as settings", () => {
const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions)
expect(bundle.config.settings).toBeDefined()
const apiKeySetting = bundle.config.settings!.find((s) => s.envVar === "API_KEY")
expect(apiKeySetting).toBeDefined()
expect(apiKeySetting!.sensitive).toBe(true)
expect(apiKeySetting!.name).toBe("Api Key")
})
test("plugin with no MCP servers has no mcpServers in config", () => {
const plugin: ClaudePlugin = { ...fixturePlugin, mcpServers: undefined }
const bundle = convertClaudeToQwen(plugin, defaultOptions)
expect(bundle.config.mcpServers).toBeUndefined()
})
test("context file uses plugin.manifest.name and manifest.description", () => {
const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions)
expect(bundle.contextFile).toContain("# compound-engineering")
expect(bundle.contextFile).toContain("A plugin for engineers")
expect(bundle.contextFile).toContain("## Agents")
expect(bundle.contextFile).toContain("security-sentinel")
expect(bundle.contextFile).toContain("## Commands")
expect(bundle.contextFile).toContain("/workflows:plan")
// Disabled commands excluded
expect(bundle.contextFile).not.toContain("disabled-cmd")
expect(bundle.contextFile).toContain("## Skills")
expect(bundle.contextFile).toContain("existing-skill")
})
test("paths are rewritten from .claude/ to .qwen/ in agent and command content", () => {
const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions)
const agent = bundle.agents.find((a) => a.name === "security-sentinel")
expect(agent!.content).toContain("~/.qwen/settings")
expect(agent!.content).not.toContain("~/.claude/settings")
const cmd = bundle.commandFiles.find((c) => c.name === "workflows:plan")
expect(cmd!.content).toContain("~/.qwen/settings")
expect(cmd!.content).not.toContain("~/.claude/settings")
})
test("opencode paths are NOT rewritten (only claude paths)", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
agents: [
{
name: "test-agent",
description: "test",
model: "inherit",
body: "See .opencode/config and ~/.config/opencode/settings",
sourcePath: "/tmp/a.md",
},
],
}
const bundle = convertClaudeToQwen(plugin, defaultOptions)
const agent = bundle.agents[0]
// opencode paths should NOT be rewritten
expect(agent.content).toContain(".opencode/config")
expect(agent.content).not.toContain(".qwen/config")
})
test("skillDirs passes through original skills", () => {
const bundle = convertClaudeToQwen(fixturePlugin, defaultOptions)
const skill = bundle.skillDirs.find((s) => s.name === "existing-skill")
expect(skill).toBeDefined()
expect(skill!.sourceDir).toBe("/tmp/plugin/skills/existing-skill")
})
test("normalizes bare aliases to provider-prefixed model IDs", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
agents: [{ name: "a", description: "d", model: "sonnet", body: "b", sourcePath: "/tmp/a.md" }],
}
const bundle = convertClaudeToQwen(plugin, defaultOptions)
const parsed = parseFrontmatter(bundle.agents[0].content)
expect(parsed.data.model).toBe("anthropic/claude-sonnet-4-6")
})
test("prefixes claude models with anthropic/", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
agents: [{ name: "a", description: "d", model: "claude-opus-4-5", body: "b", sourcePath: "/tmp/a.md" }],
}
const bundle = convertClaudeToQwen(plugin, defaultOptions)
const parsed = parseFrontmatter(bundle.agents[0].content)
expect(parsed.data.model).toBe("anthropic/claude-opus-4-5")
})
test("prefixes qwen models with qwen/ provider", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
agents: [{ name: "a", description: "d", model: "qwen-max", body: "b", sourcePath: "/tmp/a.md" }],
}
const bundle = convertClaudeToQwen(plugin, defaultOptions)
const parsed = parseFrontmatter(bundle.agents[0].content)
expect(parsed.data.model).toBe("qwen/qwen-max")
})
test("prefixes minimax models with minimax/ provider", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
agents: [{ name: "a", description: "d", model: "minimax-m2.7", body: "b", sourcePath: "/tmp/a.md" }],
}
const bundle = convertClaudeToQwen(plugin, defaultOptions)
const parsed = parseFrontmatter(bundle.agents[0].content)
expect(parsed.data.model).toBe("minimax/minimax-m2.7")
})
test("passes through already-namespaced models unchanged", () => {
const plugin: ClaudePlugin = {
...fixturePlugin,
agents: [{ name: "a", description: "d", model: "google/gemini-2.0", body: "b", sourcePath: "/tmp/a.md" }],
}
const bundle = convertClaudeToQwen(plugin, defaultOptions)
const parsed = parseFrontmatter(bundle.agents[0].content)
expect(parsed.data.model).toBe("google/gemini-2.0")
})
})

View File

@@ -1,204 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import os from "os"
import path from "path"
import { writeQwenBundle } from "../src/targets/qwen"
import type { QwenBundle } from "../src/types/qwen"
function makeBundle(mcpServers?: Record<string, { command: string }>): QwenBundle {
return {
config: {
name: "test-plugin",
version: "1.0.0",
commands: "commands",
skills: "skills",
agents: "agents",
mcpServers,
},
agents: [],
commandFiles: [],
skillDirs: [],
}
}
const LEGACY_LINT_DESCRIPTION = "Use this agent when you need to run linting and code quality checks on Ruby and ERB files. Run before pushing to origin."
describe("writeQwenBundle", () => {
test("cleans legacy agents before writing new agent files", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "qwen-agent-cleanup-order-"))
const bundle: QwenBundle = {
...makeBundle(),
agents: [
{
name: "lint",
format: "markdown",
content: `---\nname: lint\ndescription: ${JSON.stringify(LEGACY_LINT_DESCRIPTION)}\n---\n\nReplacement agent\n`,
},
],
}
await writeQwenBundle(tempRoot, bundle)
const lintPath = path.join(tempRoot, "agents", "lint.md")
expect(await fs.readFile(lintPath, "utf8")).toContain("Replacement agent")
})
test("removes stale plugin MCP servers on re-install", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "qwen-converge-"))
await writeQwenBundle(tempRoot, makeBundle({ old: { command: "old-server" } }))
await writeQwenBundle(tempRoot, makeBundle({ fresh: { command: "new-server" } }))
const result = JSON.parse(await fs.readFile(path.join(tempRoot, "qwen-extension.json"), "utf8"))
expect(result.mcpServers.fresh).toBeDefined()
expect(result.mcpServers.old).toBeUndefined()
})
test("preserves user-added MCP servers across re-installs", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "qwen-user-mcp-"))
// User has their own MCP server alongside plugin-managed ones (tracking key present)
await fs.writeFile(
path.join(tempRoot, "qwen-extension.json"),
JSON.stringify({
name: "user-project",
mcpServers: { "user-tool": { command: "my-tool" } },
_compound_managed_mcp: [],
}),
)
await writeQwenBundle(tempRoot, makeBundle({ plugin: { command: "plugin-server" } }))
const result = JSON.parse(await fs.readFile(path.join(tempRoot, "qwen-extension.json"), "utf8"))
expect(result.mcpServers["user-tool"]).toBeDefined()
expect(result.mcpServers.plugin).toBeDefined()
})
test("preserves unknown top-level keys from existing config", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "qwen-preserve-"))
await fs.writeFile(
path.join(tempRoot, "qwen-extension.json"),
JSON.stringify({ name: "user-project", customField: "should-survive" }),
)
await writeQwenBundle(tempRoot, makeBundle({ plugin: { command: "p" } }))
const result = JSON.parse(await fs.readFile(path.join(tempRoot, "qwen-extension.json"), "utf8"))
expect(result.customField).toBe("should-survive")
// Tracking key should be written so future installs can prune stale plugin keys
expect(result._compound_managed_keys).toBeInstanceOf(Array)
expect(result._compound_managed_keys).not.toContain("customField")
})
test("prunes stale servers from legacy config without tracking key", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "qwen-legacy-"))
// Simulate old writer output: has mcpServers but no _compound_managed_mcp
await fs.writeFile(
path.join(tempRoot, "qwen-extension.json"),
JSON.stringify({
name: "old-project",
mcpServers: { old: { command: "old-server" }, renamed: { command: "renamed-server" } },
}),
)
await writeQwenBundle(tempRoot, makeBundle({ fresh: { command: "new-server" } }))
const result = JSON.parse(await fs.readFile(path.join(tempRoot, "qwen-extension.json"), "utf8"))
expect(result.mcpServers.fresh).toBeDefined()
expect(result.mcpServers.old).toBeUndefined()
expect(result.mcpServers.renamed).toBeUndefined()
expect(result._compound_managed_mcp).toEqual(["fresh"])
})
test("does not prune untracked user config when plugin has zero MCP servers", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "qwen-untracked-"))
// Pre-existing user config with no tracking key (never had the plugin before)
await fs.writeFile(
path.join(tempRoot, "qwen-extension.json"),
JSON.stringify({
name: "user-project",
mcpServers: { "user-tool": { command: "my-tool" } },
}),
)
// Plugin installs with zero MCP servers
await writeQwenBundle(tempRoot, makeBundle())
const result = JSON.parse(await fs.readFile(path.join(tempRoot, "qwen-extension.json"), "utf8"))
expect(result.mcpServers["user-tool"]).toBeDefined()
expect(result._compound_managed_mcp).toEqual([])
})
test("cleans up all plugin MCP servers when bundle has none", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "qwen-zero-"))
await writeQwenBundle(tempRoot, makeBundle({ old: { command: "old-server" } }))
await writeQwenBundle(tempRoot, makeBundle())
const result = JSON.parse(await fs.readFile(path.join(tempRoot, "qwen-extension.json"), "utf8"))
expect(result.mcpServers).toBeUndefined()
expect(result._compound_managed_mcp).toEqual([])
})
test("preserves user servers across zero-MCP-then-MCP round trip", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "qwen-roundtrip-"))
// 1. Install with plugin MCP
await writeQwenBundle(tempRoot, makeBundle({ plugin: { command: "plugin-server" } }))
// 2. User adds their own server (with tracking key present)
const configPath = path.join(tempRoot, "qwen-extension.json")
const afterInstall = JSON.parse(await fs.readFile(configPath, "utf8"))
afterInstall.mcpServers["user-tool"] = { command: "my-tool" }
await fs.writeFile(configPath, JSON.stringify(afterInstall))
// 3. Install with zero plugin MCP
await writeQwenBundle(tempRoot, makeBundle())
// 4. Install with plugin MCP again
await writeQwenBundle(tempRoot, makeBundle({ new_plugin: { command: "new-plugin" } }))
const result = JSON.parse(await fs.readFile(configPath, "utf8"))
expect(result.mcpServers["user-tool"]).toBeDefined()
expect(result.mcpServers.new_plugin).toBeDefined()
expect(result.mcpServers.plugin).toBeUndefined()
})
test("prunes stale top-level plugin keys when incoming config drops them", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "qwen-stale-keys-"))
// First install with settings
const bundleWithSettings: QwenBundle = {
config: {
name: "test-plugin",
version: "1.0.0",
commands: "commands",
skills: "skills",
agents: "agents",
settings: [{ name: "api-key", description: "API key", envVar: "API_KEY", sensitive: true }],
},
agents: [],
commandFiles: [],
skillDirs: [],
}
await writeQwenBundle(tempRoot, bundleWithSettings)
// User adds their own top-level key
const configPath = path.join(tempRoot, "qwen-extension.json")
const afterInstall = JSON.parse(await fs.readFile(configPath, "utf8"))
afterInstall.userCustom = "should-survive"
await fs.writeFile(configPath, JSON.stringify(afterInstall))
// Second install without settings
await writeQwenBundle(tempRoot, makeBundle())
const result = JSON.parse(await fs.readFile(configPath, "utf8"))
expect(result.settings).toBeUndefined()
expect(result.userCustom).toBe("should-survive")
expect(result.name).toBe("test-plugin")
})
})

View File

@@ -1,7 +1,7 @@
import { describe, expect, test } from "bun:test"
import { afterEach, describe, expect, test } from "bun:test"
import os from "os"
import path from "path"
import { resolveTargetOutputRoot } from "../src/utils/resolve-output"
import { resolveOpenCodeWriteScope, resolveTargetOutputRoot } from "../src/utils/resolve-output"
const baseOptions = {
outputRoot: "/tmp/output",
@@ -21,111 +21,54 @@ describe("resolveTargetOutputRoot", () => {
expect(result).toBe(baseOptions.piHome)
})
test("droid returns ~/.factory", () => {
const result = resolveTargetOutputRoot({ ...baseOptions, targetName: "droid" })
expect(result).toBe(path.join(os.homedir(), ".factory"))
})
test("cursor with no explicit output uses cwd", () => {
const result = resolveTargetOutputRoot({ ...baseOptions, targetName: "cursor" })
expect(result).toBe(path.join(process.cwd(), ".cursor"))
})
test("cursor with explicit output uses outputRoot", () => {
test("opencode with explicit output returns outputRoot as-is", () => {
const result = resolveTargetOutputRoot({
...baseOptions,
targetName: "cursor",
hasExplicitOutput: true,
})
expect(result).toBe(path.join("/tmp/output", ".cursor"))
})
test("windsurf default scope (global) resolves to ~/.codeium/windsurf/", () => {
const result = resolveTargetOutputRoot({
...baseOptions,
targetName: "windsurf",
scope: "global",
})
expect(result).toBe(path.join(os.homedir(), ".codeium", "windsurf"))
})
test("windsurf workspace scope resolves to cwd/.windsurf/", () => {
const result = resolveTargetOutputRoot({
...baseOptions,
targetName: "windsurf",
scope: "workspace",
})
expect(result).toBe(path.join(process.cwd(), ".windsurf"))
})
test("windsurf with explicit output overrides global scope", () => {
const result = resolveTargetOutputRoot({
...baseOptions,
targetName: "windsurf",
hasExplicitOutput: true,
scope: "global",
targetName: "opencode",
})
expect(result).toBe("/tmp/output")
})
test("windsurf with explicit output overrides workspace scope", () => {
const result = resolveTargetOutputRoot({
...baseOptions,
targetName: "windsurf",
hasExplicitOutput: true,
scope: "workspace",
})
expect(result).toBe("/tmp/output")
describe("opencode without explicit output", () => {
const originalEnv = process.env.OPENCODE_CONFIG_DIR
afterEach(() => {
if (originalEnv === undefined) {
delete process.env.OPENCODE_CONFIG_DIR
} else {
process.env.OPENCODE_CONFIG_DIR = originalEnv
}
})
test("windsurf with no scope and no explicit output uses cwd/.windsurf/", () => {
const result = resolveTargetOutputRoot({
...baseOptions,
targetName: "windsurf",
})
expect(result).toBe(path.join(process.cwd(), ".windsurf"))
})
test("opencode returns outputRoot as-is", () => {
test("falls back to ~/.config/opencode when OPENCODE_CONFIG_DIR is unset", () => {
delete process.env.OPENCODE_CONFIG_DIR
const result = resolveTargetOutputRoot({ ...baseOptions, targetName: "opencode" })
expect(result).toBe("/tmp/output")
expect(result).toBe(path.join(os.homedir(), ".config", "opencode"))
})
test("openclaw uses openclawHome + pluginName", () => {
const result = resolveTargetOutputRoot({
...baseOptions,
targetName: "openclaw",
openclawHome: "/custom/openclaw/extensions",
pluginName: "my-plugin",
test("respects OPENCODE_CONFIG_DIR when set", () => {
process.env.OPENCODE_CONFIG_DIR = "/custom/opencode"
const result = resolveTargetOutputRoot({ ...baseOptions, targetName: "opencode" })
expect(result).toBe("/custom/opencode")
})
})
expect(result).toBe("/custom/openclaw/extensions/my-plugin")
})
test("openclaw falls back to default home when not provided", () => {
const result = resolveTargetOutputRoot({
...baseOptions,
targetName: "openclaw",
pluginName: "my-plugin",
})
expect(result).toBe(path.join(os.homedir(), ".openclaw", "extensions", "my-plugin"))
describe("resolveOpenCodeWriteScope", () => {
test("returns 'global' when no explicit output and no requested scope", () => {
expect(resolveOpenCodeWriteScope(false, undefined)).toBe("global")
})
test("qwen uses qwenHome + pluginName", () => {
const result = resolveTargetOutputRoot({
...baseOptions,
targetName: "qwen",
qwenHome: "/custom/qwen/extensions",
pluginName: "my-plugin",
})
expect(result).toBe("/custom/qwen/extensions/my-plugin")
test("returns undefined when explicit output is given and no requested scope", () => {
expect(resolveOpenCodeWriteScope(true, undefined)).toBeUndefined()
})
test("qwen falls back to default home when not provided", () => {
const result = resolveTargetOutputRoot({
...baseOptions,
targetName: "qwen",
pluginName: "my-plugin",
test("honors explicit requested scope even without explicit output", () => {
expect(resolveOpenCodeWriteScope(false, "workspace")).toBe("workspace")
})
expect(result).toBe(path.join(os.homedir(), ".qwen", "extensions", "my-plugin"))
test("honors explicit requested scope when explicit output is given", () => {
expect(resolveOpenCodeWriteScope(true, "global")).toBe("global")
})
})

View File

@@ -1,91 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import os from "os"
import path from "path"
import type { ClaudeHomeConfig } from "../src/parsers/claude-home"
import { syncToCodex } from "../src/sync/codex"
describe("syncToCodex", () => {
test("writes stdio and remote MCP servers into a managed block without clobbering user config", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-codex-"))
const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one")
const configPath = path.join(tempRoot, "config.toml")
await fs.writeFile(
configPath,
[
"[custom]",
"enabled = true",
"",
"# BEGIN compound-plugin Claude Code MCP",
"[mcp_servers.old]",
"command = \"old\"",
"# END compound-plugin Claude Code MCP",
"",
"[post]",
"value = 2",
"",
].join("\n"),
)
const config: ClaudeHomeConfig = {
skills: [
{
name: "skill-one",
sourceDir: fixtureSkillDir,
skillPath: path.join(fixtureSkillDir, "SKILL.md"),
},
],
mcpServers: {
local: { command: "echo", args: ["hello"], env: { KEY: "VALUE" } },
remote: { url: "https://example.com/mcp", headers: { Authorization: "Bearer token" } },
},
}
await syncToCodex(config, tempRoot)
const skillPath = path.join(tempRoot, "skills", "skill-one")
expect((await fs.lstat(skillPath)).isSymbolicLink()).toBe(true)
const content = await fs.readFile(configPath, "utf8")
expect(content).toContain("[custom]")
expect(content).toContain("[post]")
expect(content).not.toContain("[mcp_servers.old]")
expect(content).toContain("[mcp_servers.local]")
expect(content).toContain("command = \"echo\"")
expect(content).toContain("[mcp_servers.remote]")
expect(content).toContain("url = \"https://example.com/mcp\"")
expect(content).toContain("http_headers")
// Old markers should be replaced with new ones
expect(content).not.toContain("# BEGIN compound-plugin Claude Code MCP")
expect(content.match(/# BEGIN Compound Engineering plugin MCP/g)?.length).toBe(1)
const perms = (await fs.stat(configPath)).mode & 0o777
expect(perms).toBe(0o600)
})
test("cleans up stale managed block when syncing with zero MCP servers", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-codex-zero-"))
const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one")
const configPath = path.join(tempRoot, "config.toml")
// First sync with MCP servers
const configWithServers: ClaudeHomeConfig = {
skills: [{ name: "skill-one", sourceDir: fixtureSkillDir, skillPath: path.join(fixtureSkillDir, "SKILL.md") }],
mcpServers: { old: { command: "old-server" } },
}
await syncToCodex(configWithServers, tempRoot)
expect(await fs.readFile(configPath, "utf8")).toContain("[mcp_servers.old]")
// Second sync with zero MCP servers
const configEmpty: ClaudeHomeConfig = {
skills: [{ name: "skill-one", sourceDir: fixtureSkillDir, skillPath: path.join(fixtureSkillDir, "SKILL.md") }],
mcpServers: {},
}
await syncToCodex(configEmpty, tempRoot)
const content = await fs.readFile(configPath, "utf8")
expect(content).not.toContain("[mcp_servers.old]")
expect(content).not.toContain("# BEGIN")
})
})

View File

@@ -1,204 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import path from "path"
import os from "os"
import { syncToCopilot } from "../src/sync/copilot"
import type { ClaudeHomeConfig } from "../src/parsers/claude-home"
describe("syncToCopilot", () => {
test("symlinks skills to .github/skills/", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-"))
const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one")
const config: ClaudeHomeConfig = {
skills: [
{
name: "skill-one",
sourceDir: fixtureSkillDir,
skillPath: path.join(fixtureSkillDir, "SKILL.md"),
},
],
mcpServers: {},
}
await syncToCopilot(config, tempRoot)
const linkedSkillPath = path.join(tempRoot, "skills", "skill-one")
const linkedStat = await fs.lstat(linkedSkillPath)
expect(linkedStat.isSymbolicLink()).toBe(true)
})
test("converts personal commands into Copilot skills", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-cmd-"))
const config: ClaudeHomeConfig = {
skills: [],
commands: [
{
name: "workflows:plan",
description: "Planning command",
argumentHint: "[goal]",
body: "Plan the work carefully.",
sourcePath: "/tmp/workflows/plan.md",
},
],
mcpServers: {},
}
await syncToCopilot(config, tempRoot)
const skillContent = await fs.readFile(
path.join(tempRoot, "skills", "workflows-plan", "SKILL.md"),
"utf8",
)
expect(skillContent).toContain("name: workflows-plan")
expect(skillContent).toContain("Planning command")
expect(skillContent).toContain("## Arguments")
})
test("skips skills with invalid names", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-invalid-"))
const config: ClaudeHomeConfig = {
skills: [
{
name: "../escape-attempt",
sourceDir: "/tmp/bad-skill",
skillPath: "/tmp/bad-skill/SKILL.md",
},
],
mcpServers: {},
}
await syncToCopilot(config, tempRoot)
const skillsDir = path.join(tempRoot, "skills")
const entries = await fs.readdir(skillsDir).catch(() => [])
expect(entries).toHaveLength(0)
})
test("merges MCP config with existing file", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-merge-"))
const mcpPath = path.join(tempRoot, "mcp-config.json")
await fs.writeFile(
mcpPath,
JSON.stringify({
mcpServers: {
existing: { type: "local", command: "node", args: ["server.js"], tools: ["*"] },
},
}, null, 2),
)
const config: ClaudeHomeConfig = {
skills: [],
mcpServers: {
context7: { url: "https://mcp.context7.com/mcp" },
},
}
await syncToCopilot(config, tempRoot)
const merged = JSON.parse(await fs.readFile(mcpPath, "utf8")) as {
mcpServers: Record<string, { command?: string; url?: string; type: string }>
}
expect(merged.mcpServers.existing?.command).toBe("node")
expect(merged.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp")
expect(merged.mcpServers.context7?.type).toBe("http")
})
test("transforms MCP env var names to COPILOT_MCP_ prefix", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-env-"))
const config: ClaudeHomeConfig = {
skills: [],
mcpServers: {
server: {
command: "echo",
args: ["hello"],
env: { API_KEY: "secret", COPILOT_MCP_TOKEN: "already-prefixed" },
},
},
}
await syncToCopilot(config, tempRoot)
const mcpPath = path.join(tempRoot, "mcp-config.json")
const mcpConfig = JSON.parse(await fs.readFile(mcpPath, "utf8")) as {
mcpServers: Record<string, { env?: Record<string, string> }>
}
expect(mcpConfig.mcpServers.server?.env).toEqual({
COPILOT_MCP_API_KEY: "secret",
COPILOT_MCP_TOKEN: "already-prefixed",
})
})
test("writes MCP config with restricted permissions", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-perms-"))
const config: ClaudeHomeConfig = {
skills: [],
mcpServers: {
server: { command: "echo", args: ["hello"] },
},
}
await syncToCopilot(config, tempRoot)
const mcpPath = path.join(tempRoot, "mcp-config.json")
const stat = await fs.stat(mcpPath)
// Check owner read+write permission (0o600 = 33216 in decimal, masked to file perms)
const perms = stat.mode & 0o777
expect(perms).toBe(0o600)
})
test("does not write MCP config when no MCP servers", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-nomcp-"))
const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one")
const config: ClaudeHomeConfig = {
skills: [
{
name: "skill-one",
sourceDir: fixtureSkillDir,
skillPath: path.join(fixtureSkillDir, "SKILL.md"),
},
],
mcpServers: {},
}
await syncToCopilot(config, tempRoot)
const mcpExists = await fs.access(path.join(tempRoot, "mcp-config.json")).then(() => true).catch(() => false)
expect(mcpExists).toBe(false)
})
test("preserves explicit SSE transport for legacy remote servers", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-copilot-sse-"))
const config: ClaudeHomeConfig = {
skills: [],
mcpServers: {
legacy: {
type: "sse",
url: "https://example.com/sse",
},
},
}
await syncToCopilot(config, tempRoot)
const mcpPath = path.join(tempRoot, "mcp-config.json")
const mcpConfig = JSON.parse(await fs.readFile(mcpPath, "utf8")) as {
mcpServers: Record<string, { type?: string; url?: string }>
}
expect(mcpConfig.mcpServers.legacy).toEqual({
type: "sse",
tools: ["*"],
url: "https://example.com/sse",
})
})
})

View File

@@ -1,97 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import path from "path"
import os from "os"
import { syncToDroid } from "../src/sync/droid"
import type { ClaudeHomeConfig } from "../src/parsers/claude-home"
describe("syncToDroid", () => {
test("symlinks skills to factory skills dir and writes mcp.json", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-droid-"))
const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one")
const config: ClaudeHomeConfig = {
skills: [
{
name: "skill-one",
sourceDir: fixtureSkillDir,
skillPath: path.join(fixtureSkillDir, "SKILL.md"),
},
],
mcpServers: {
context7: { url: "https://mcp.context7.com/mcp" },
},
}
await syncToDroid(config, tempRoot)
const linkedSkillPath = path.join(tempRoot, "skills", "skill-one")
const linkedStat = await fs.lstat(linkedSkillPath)
expect(linkedStat.isSymbolicLink()).toBe(true)
const mcpConfig = JSON.parse(
await fs.readFile(path.join(tempRoot, "mcp.json"), "utf8"),
) as {
mcpServers: Record<string, { type: string; url?: string; disabled: boolean }>
}
expect(mcpConfig.mcpServers.context7?.type).toBe("http")
expect(mcpConfig.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp")
expect(mcpConfig.mcpServers.context7?.disabled).toBe(false)
})
test("merges existing mcp.json and overwrites same-named servers from Claude", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-droid-merge-"))
await fs.writeFile(
path.join(tempRoot, "mcp.json"),
JSON.stringify({
theme: "dark",
mcpServers: {
shared: { type: "http", url: "https://old.example.com", disabled: true },
existing: { type: "stdio", command: "node", disabled: false },
},
}, null, 2),
)
const config: ClaudeHomeConfig = {
skills: [],
mcpServers: {
shared: { url: "https://new.example.com" },
},
}
await syncToDroid(config, tempRoot)
const mcpConfig = JSON.parse(
await fs.readFile(path.join(tempRoot, "mcp.json"), "utf8"),
) as {
theme: string
mcpServers: Record<string, { type: string; url?: string; command?: string; disabled: boolean }>
}
expect(mcpConfig.theme).toBe("dark")
expect(mcpConfig.mcpServers.existing?.command).toBe("node")
expect(mcpConfig.mcpServers.shared?.url).toBe("https://new.example.com")
expect(mcpConfig.mcpServers.shared?.disabled).toBe(false)
})
test("skips skills with invalid names", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-droid-invalid-"))
const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one")
const config: ClaudeHomeConfig = {
skills: [
{
name: "../escape",
sourceDir: fixtureSkillDir,
skillPath: path.join(fixtureSkillDir, "SKILL.md"),
},
],
mcpServers: {},
}
await syncToDroid(config, tempRoot)
const entries = await fs.readdir(path.join(tempRoot, "skills"))
expect(entries).toHaveLength(0)
})
})

View File

@@ -1,160 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import path from "path"
import os from "os"
import { syncToGemini } from "../src/sync/gemini"
import type { ClaudeHomeConfig } from "../src/parsers/claude-home"
describe("syncToGemini", () => {
test("symlinks skills and writes settings.json", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-gemini-"))
const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one")
const config: ClaudeHomeConfig = {
skills: [
{
name: "skill-one",
sourceDir: fixtureSkillDir,
skillPath: path.join(fixtureSkillDir, "SKILL.md"),
},
],
mcpServers: {
context7: { url: "https://mcp.context7.com/mcp" },
local: { command: "echo", args: ["hello"], env: { FOO: "bar" } },
},
}
await syncToGemini(config, tempRoot)
// Check skill symlink
const linkedSkillPath = path.join(tempRoot, "skills", "skill-one")
const linkedStat = await fs.lstat(linkedSkillPath)
expect(linkedStat.isSymbolicLink()).toBe(true)
// Check settings.json
const settingsPath = path.join(tempRoot, "settings.json")
const settings = JSON.parse(await fs.readFile(settingsPath, "utf8")) as {
mcpServers: Record<string, { url?: string; command?: string; args?: string[]; env?: Record<string, string> }>
}
expect(settings.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp")
expect(settings.mcpServers.local?.command).toBe("echo")
expect(settings.mcpServers.local?.args).toEqual(["hello"])
expect(settings.mcpServers.local?.env).toEqual({ FOO: "bar" })
})
test("merges existing settings.json", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-gemini-merge-"))
const settingsPath = path.join(tempRoot, "settings.json")
await fs.writeFile(
settingsPath,
JSON.stringify({
theme: "dark",
mcpServers: { existing: { command: "node", args: ["server.js"] } },
}, null, 2),
)
const config: ClaudeHomeConfig = {
skills: [],
mcpServers: {
context7: { url: "https://mcp.context7.com/mcp" },
},
}
await syncToGemini(config, tempRoot)
const merged = JSON.parse(await fs.readFile(settingsPath, "utf8")) as {
theme: string
mcpServers: Record<string, { command?: string; url?: string }>
}
// Preserves existing settings
expect(merged.theme).toBe("dark")
// Preserves existing MCP servers
expect(merged.mcpServers.existing?.command).toBe("node")
// Adds new MCP servers
expect(merged.mcpServers.context7?.url).toBe("https://mcp.context7.com/mcp")
})
test("writes personal commands as Gemini TOML prompts", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-gemini-cmd-"))
const config: ClaudeHomeConfig = {
skills: [],
commands: [
{
name: "workflows:plan",
description: "Planning command",
argumentHint: "[goal]",
body: "Plan the work carefully.",
sourcePath: "/tmp/workflows/plan.md",
},
],
mcpServers: {},
}
await syncToGemini(config, tempRoot)
const content = await fs.readFile(
path.join(tempRoot, "commands", "workflows", "plan.toml"),
"utf8",
)
expect(content).toContain("Planning command")
expect(content).toContain("User request: {{args}}")
})
test("does not write settings.json when no MCP servers", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-gemini-nomcp-"))
const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one")
const config: ClaudeHomeConfig = {
skills: [
{
name: "skill-one",
sourceDir: fixtureSkillDir,
skillPath: path.join(fixtureSkillDir, "SKILL.md"),
},
],
mcpServers: {},
}
await syncToGemini(config, tempRoot)
// Skills should still be symlinked
const linkedSkillPath = path.join(tempRoot, "skills", "skill-one")
const linkedStat = await fs.lstat(linkedSkillPath)
expect(linkedStat.isSymbolicLink()).toBe(true)
// But settings.json should not exist
const settingsExists = await fs.access(path.join(tempRoot, "settings.json")).then(() => true).catch(() => false)
expect(settingsExists).toBe(false)
})
test("skips mirrored ~/.agents skills when syncing to ~/.gemini and removes stale duplicate symlinks", async () => {
const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "sync-gemini-home-"))
const geminiRoot = path.join(tempHome, ".gemini")
const agentsSkillDir = path.join(tempHome, ".agents", "skills", "skill-one")
await fs.mkdir(path.join(agentsSkillDir), { recursive: true })
await fs.writeFile(path.join(agentsSkillDir, "SKILL.md"), "# Skill One\n", "utf8")
await fs.mkdir(path.join(geminiRoot, "skills"), { recursive: true })
await fs.symlink(agentsSkillDir, path.join(geminiRoot, "skills", "skill-one"))
const config: ClaudeHomeConfig = {
skills: [
{
name: "skill-one",
sourceDir: agentsSkillDir,
skillPath: path.join(agentsSkillDir, "SKILL.md"),
},
],
mcpServers: {},
}
await syncToGemini(config, geminiRoot)
const duplicateExists = await fs.access(path.join(geminiRoot, "skills", "skill-one")).then(() => true).catch(() => false)
expect(duplicateExists).toBe(false)
})
})

View File

@@ -1,83 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import os from "os"
import path from "path"
import type { ClaudeHomeConfig } from "../src/parsers/claude-home"
import { syncToKiro } from "../src/sync/kiro"
describe("syncToKiro", () => {
test("writes user-scope settings/mcp.json with local and remote servers", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-kiro-"))
const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one")
const config: ClaudeHomeConfig = {
skills: [
{
name: "skill-one",
sourceDir: fixtureSkillDir,
skillPath: path.join(fixtureSkillDir, "SKILL.md"),
},
],
mcpServers: {
local: { command: "echo", args: ["hello"], env: { TOKEN: "secret" } },
remote: { url: "https://example.com/mcp", headers: { Authorization: "Bearer token" } },
},
}
await syncToKiro(config, tempRoot)
expect((await fs.lstat(path.join(tempRoot, "skills", "skill-one"))).isSymbolicLink()).toBe(true)
const content = JSON.parse(
await fs.readFile(path.join(tempRoot, "settings", "mcp.json"), "utf8"),
) as {
mcpServers: Record<string, {
command?: string
args?: string[]
env?: Record<string, string>
url?: string
headers?: Record<string, string>
}>
}
expect(content.mcpServers.local?.command).toBe("echo")
expect(content.mcpServers.local?.args).toEqual(["hello"])
expect(content.mcpServers.local?.env).toEqual({ TOKEN: "secret" })
expect(content.mcpServers.remote?.url).toBe("https://example.com/mcp")
expect(content.mcpServers.remote?.headers).toEqual({ Authorization: "Bearer token" })
})
test("merges existing settings/mcp.json", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-kiro-merge-"))
await fs.mkdir(path.join(tempRoot, "settings"), { recursive: true })
await fs.writeFile(
path.join(tempRoot, "settings", "mcp.json"),
JSON.stringify({
note: "preserve",
mcpServers: {
existing: { command: "node" },
},
}, null, 2),
)
const config: ClaudeHomeConfig = {
skills: [],
mcpServers: {
remote: { url: "https://example.com/mcp" },
},
}
await syncToKiro(config, tempRoot)
const content = JSON.parse(
await fs.readFile(path.join(tempRoot, "settings", "mcp.json"), "utf8"),
) as {
note: string
mcpServers: Record<string, { command?: string; url?: string }>
}
expect(content.note).toBe("preserve")
expect(content.mcpServers.existing?.command).toBe("node")
expect(content.mcpServers.remote?.url).toBe("https://example.com/mcp")
})
})

View File

@@ -1,51 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import os from "os"
import path from "path"
import type { ClaudeHomeConfig } from "../src/parsers/claude-home"
import { syncToOpenClaw } from "../src/sync/openclaw"
describe("syncToOpenClaw", () => {
test("symlinks skills and warns instead of writing unvalidated MCP config", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-openclaw-"))
const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one")
const warnings: string[] = []
const originalWarn = console.warn
console.warn = (message?: unknown) => {
warnings.push(String(message))
}
try {
const config: ClaudeHomeConfig = {
skills: [
{
name: "skill-one",
sourceDir: fixtureSkillDir,
skillPath: path.join(fixtureSkillDir, "SKILL.md"),
},
],
commands: [
{
name: "workflows:plan",
description: "Planning command",
body: "Plan the work.",
sourcePath: "/tmp/workflows/plan.md",
},
],
mcpServers: {
remote: { url: "https://example.com/mcp" },
},
}
await syncToOpenClaw(config, tempRoot)
} finally {
console.warn = originalWarn
}
expect((await fs.lstat(path.join(tempRoot, "skills", "skill-one"))).isSymbolicLink()).toBe(true)
const openclawConfigExists = await fs.access(path.join(tempRoot, "openclaw.json")).then(() => true).catch(() => false)
expect(openclawConfigExists).toBe(false)
expect(warnings.some((warning) => warning.includes("OpenClaw personal command sync is skipped"))).toBe(true)
expect(warnings.some((warning) => warning.includes("OpenClaw MCP sync is skipped"))).toBe(true)
})
})

View File

@@ -1,68 +0,0 @@
import { describe, expect, test } from "bun:test"
import { promises as fs } from "fs"
import path from "path"
import os from "os"
import { syncToPi } from "../src/sync/pi"
import type { ClaudeHomeConfig } from "../src/parsers/claude-home"
describe("syncToPi", () => {
test("symlinks skills and writes MCPorter config", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-pi-"))
const fixtureSkillDir = path.join(import.meta.dir, "fixtures", "sample-plugin", "skills", "skill-one")
const config: ClaudeHomeConfig = {
skills: [
{
name: "skill-one",
sourceDir: fixtureSkillDir,
skillPath: path.join(fixtureSkillDir, "SKILL.md"),
},
],
mcpServers: {
context7: { url: "https://mcp.context7.com/mcp" },
local: { command: "echo", args: ["hello"] },
},
}
await syncToPi(config, tempRoot)
const linkedSkillPath = path.join(tempRoot, "skills", "skill-one")
const linkedStat = await fs.lstat(linkedSkillPath)
expect(linkedStat.isSymbolicLink()).toBe(true)
const mcporterPath = path.join(tempRoot, "compound-engineering", "mcporter.json")
const mcporterConfig = JSON.parse(await fs.readFile(mcporterPath, "utf8")) as {
mcpServers: Record<string, { baseUrl?: string; command?: string }>
}
expect(mcporterConfig.mcpServers.context7?.baseUrl).toBe("https://mcp.context7.com/mcp")
expect(mcporterConfig.mcpServers.local?.command).toBe("echo")
})
test("merges existing MCPorter config", async () => {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "sync-pi-merge-"))
const mcporterPath = path.join(tempRoot, "compound-engineering", "mcporter.json")
await fs.mkdir(path.dirname(mcporterPath), { recursive: true })
await fs.writeFile(
mcporterPath,
JSON.stringify({ mcpServers: { existing: { baseUrl: "https://example.com/mcp" } } }, null, 2),
)
const config: ClaudeHomeConfig = {
skills: [],
mcpServers: {
context7: { url: "https://mcp.context7.com/mcp" },
},
}
await syncToPi(config, tempRoot)
const merged = JSON.parse(await fs.readFile(mcporterPath, "utf8")) as {
mcpServers: Record<string, { baseUrl?: string }>
}
expect(merged.mcpServers.existing?.baseUrl).toBe("https://example.com/mcp")
expect(merged.mcpServers.context7?.baseUrl).toBe("https://mcp.context7.com/mcp")
})
})

Some files were not shown because too many files have changed in this diff Show More