Compare commits

...

13 Commits

Author SHA1 Message Date
Stefano Fiorini
c344e96984 Remove external web automation provenance refs 2026-04-09 11:11:40 -05:00
Stefano Fiorini
879cccf383 Refine web automation provenance exclusions 2026-04-09 10:31:41 -05:00
Stefano Fiorini
c97b7d44e5 feat(web-automation): implement milestone M2 mirror and docs 2026-04-09 10:21:21 -05:00
Stefano Fiorini
99fe6eab4e feat(web-automation): implement milestone M1 canonical codex migration 2026-04-09 10:13:25 -05:00
Stefano Fiorini
e917387d4f Force sending Telegram notification upon stopping for user input 2026-03-25 11:59:10 -05:00
Stefano Fiorini
63a048a26c Align reviewer runtime and Telegram notifications 2026-03-24 11:45:58 -05:00
Stefano Fiorini
4d37674626 fix: isolate claude reviewer templates 2026-03-08 18:04:07 -05:00
Stefano Fiorini
d44a2288b4 Update Atlassian skill prereq checks 2026-03-06 08:23:49 -06:00
Stefano Fiorini
783bcfa037 fix(atlassian): tighten health checks and review coverage 2026-03-06 07:06:38 -06:00
Stefano Fiorini
c3afee091b feat(atlassian): implement milestone M4 - packaging and doc sync 2026-03-06 00:58:23 -06:00
Stefano Fiorini
972789c240 feat(atlassian): implement milestone M3 - confluence and safety controls 2026-03-06 00:53:13 -06:00
Stefano Fiorini
73c4bff901 feat(atlassian): implement milestone M2 - jira command surface 2026-03-06 00:39:54 -06:00
Stefano Fiorini
e56f0c9941 feat(atlassian): implement milestone M1 - scaffold skill and shared runtime 2026-03-06 00:23:21 -06:00
141 changed files with 15207 additions and 2245 deletions

5
.gitignore vendored
View File

@@ -1,2 +1,7 @@
/ai_plan/ /ai_plan/
/.worktrees/ /.worktrees/
/skills/atlassian/shared/scripts/.env
/skills/atlassian/shared/scripts/node_modules/
/skills/atlassian/*/scripts/.env
/skills/atlassian/*/scripts/node_modules/
/skills/web-automation/*/scripts/node_modules/

View File

@@ -16,12 +16,19 @@ ai-coding-skills/
├── README.md ├── README.md
├── docs/ ├── docs/
│ ├── README.md │ ├── README.md
│ ├── ATLASSIAN.md
│ ├── CREATE-PLAN.md │ ├── CREATE-PLAN.md
│ ├── IMPLEMENT-PLAN.md │ ├── IMPLEMENT-PLAN.md
│ └── WEB-AUTOMATION.md │ └── WEB-AUTOMATION.md
├── skills/ ├── skills/
│ ├── _template/ │ ├── _template/
│ │ └── SKILL.md │ │ └── SKILL.md
│ ├── atlassian/
│ │ ├── codex/
│ │ ├── claude-code/
│ │ ├── cursor/
│ │ ├── opencode/
│ │ └── shared/
│ ├── create-plan/ │ ├── create-plan/
│ │ ├── codex/ │ │ ├── codex/
│ │ ├── claude-code/ │ │ ├── claude-code/
@@ -49,6 +56,10 @@ ai-coding-skills/
| Skill | Agent Variant | Purpose | Status | Docs | | Skill | Agent Variant | Purpose | Status | Docs |
|---|---|---|---|---| |---|---|---|---|---|
| atlassian | codex | Portable Jira and Confluence workflows through a shared Cloud-first CLI | Ready | [ATLASSIAN](docs/ATLASSIAN.md) |
| atlassian | claude-code | Portable Jira and Confluence workflows through a shared Cloud-first CLI | Ready | [ATLASSIAN](docs/ATLASSIAN.md) |
| atlassian | opencode | Portable Jira and Confluence workflows through a shared Cloud-first CLI | Ready | [ATLASSIAN](docs/ATLASSIAN.md) |
| atlassian | cursor | Portable Jira and Confluence workflows through a shared Cloud-first CLI | Ready | [ATLASSIAN](docs/ATLASSIAN.md) |
| create-plan | codex | Structured planning with milestones, iterative cross-model review, and runbook-first execution workflow | Ready | [CREATE-PLAN](docs/CREATE-PLAN.md) | | create-plan | codex | Structured planning with milestones, iterative cross-model review, and runbook-first execution workflow | Ready | [CREATE-PLAN](docs/CREATE-PLAN.md) |
| create-plan | claude-code | Structured planning with milestones, iterative cross-model review, and runbook-first execution workflow | Ready | [CREATE-PLAN](docs/CREATE-PLAN.md) | | create-plan | claude-code | Structured planning with milestones, iterative cross-model review, and runbook-first execution workflow | Ready | [CREATE-PLAN](docs/CREATE-PLAN.md) |
| create-plan | opencode | Structured planning with milestones, iterative cross-model review, and runbook-first execution workflow | Ready | [CREATE-PLAN](docs/CREATE-PLAN.md) | | create-plan | opencode | Structured planning with milestones, iterative cross-model review, and runbook-first execution workflow | Ready | [CREATE-PLAN](docs/CREATE-PLAN.md) |
@@ -57,11 +68,12 @@ ai-coding-skills/
| implement-plan | claude-code | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) | | implement-plan | claude-code | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) |
| implement-plan | opencode | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) | | implement-plan | opencode | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) |
| implement-plan | cursor | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) | | implement-plan | cursor | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) |
| web-automation | codex | Playwright + Camoufox browsing/scraping/auth automation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) | | web-automation | codex | CloakBrowser-backed browsing, scraping, auth, flow automation, and install validation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) |
| web-automation | claude-code | Playwright + Camoufox browsing/scraping/auth automation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) | | web-automation | claude-code | CloakBrowser-backed browsing, scraping, auth, flow automation, and install validation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) |
| web-automation | opencode | Playwright + Camoufox browsing/scraping/auth automation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) | | web-automation | opencode | CloakBrowser-backed browsing, scraping, auth, flow automation, and install validation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) |
- Docs index: `docs/README.md` - Docs index: `docs/README.md`
- Atlassian guide: `docs/ATLASSIAN.md`
- Create-plan guide: `docs/CREATE-PLAN.md` - Create-plan guide: `docs/CREATE-PLAN.md`
- Implement-plan guide: `docs/IMPLEMENT-PLAN.md` - Implement-plan guide: `docs/IMPLEMENT-PLAN.md`
- Web-automation guide: `docs/WEB-AUTOMATION.md` - Web-automation guide: `docs/WEB-AUTOMATION.md`

156
docs/ATLASSIAN.md Normal file
View File

@@ -0,0 +1,156 @@
# ATLASSIAN
## Purpose
Provide a portable Atlassian Cloud skill for Codex, Claude Code, Cursor Agent, and OpenCode using one shared CLI surface for common Jira and Confluence workflows.
## Why This Skill Exists
The repo targets multiple agent environments with uneven MCP availability. This skill packages a consistent CLI contract so the same task-oriented workflows can be used across all supported agents without depending on MCP-specific integrations.
The canonical runtime lives in `skills/atlassian/shared/scripts/`. Installable per-agent `scripts/` bundles are generated from that source with:
```bash
pnpm --dir skills/atlassian/shared/scripts sync:agents
```
## Requirements
- Node.js 20+
- `pnpm`
- Atlassian Cloud access
- `ATLASSIAN_BASE_URL`
- `ATLASSIAN_EMAIL`
- `ATLASSIAN_API_TOKEN`
The `ATLASSIAN_*` values may come from the shell environment or a `.env` file in the installed agent-specific `scripts/` folder.
Optional:
- `ATLASSIAN_JIRA_BASE_URL`
- `ATLASSIAN_CONFLUENCE_BASE_URL`
- `ATLASSIAN_DEFAULT_PROJECT`
- `ATLASSIAN_DEFAULT_SPACE`
## Supported Commands
- `health`
- `jira-search`
- `jira-get`
- `jira-create`
- `jira-update`
- `jira-comment`
- `jira-transitions`
- `jira-transition`
- `conf-search`
- `conf-get`
- `conf-create`
- `conf-update`
- `conf-comment`
- `conf-children`
- `raw`
## Command Notes
- `health` validates local configuration, probes Jira and Confluence separately, and reports one product as unavailable without masking the other.
- `jira-create` requires `--type`, `--summary`, and either `--project` or `ATLASSIAN_DEFAULT_PROJECT`.
- `jira-update` requires `--issue` and at least one of `--summary` or `--description-file`.
- `conf-create` requires `--title`, `--body-file`, and either `--space` or `ATLASSIAN_DEFAULT_SPACE`.
- `conf-update` requires `--page`, `--title`, and `--body-file`; it fetches the current page version before building the update payload.
- `raw --body-file` expects a workspace-scoped JSON file and is limited to validated Atlassian API prefixes.
## Safety Model
- Default output is JSON.
- Mutating commands support `--dry-run`.
- Jira long-text fields are converted to ADF locally.
- Confluence page writes are storage-first in v1.
- `raw` is restricted to `GET|POST|PUT`.
- `--body-file` must stay within the active workspace.
## Install
### Codex
```bash
mkdir -p ~/.codex/skills/atlassian
cp -R skills/atlassian/codex/* ~/.codex/skills/atlassian/
cd ~/.codex/skills/atlassian/scripts
pnpm install
```
### Claude Code
```bash
mkdir -p ~/.claude/skills/atlassian
cp -R skills/atlassian/claude-code/* ~/.claude/skills/atlassian/
cd ~/.claude/skills/atlassian/scripts
pnpm install
```
### OpenCode
```bash
mkdir -p ~/.config/opencode/skills/atlassian
cp -R skills/atlassian/opencode/* ~/.config/opencode/skills/atlassian/
cd ~/.config/opencode/skills/atlassian/scripts
pnpm install
```
### Cursor
Repo-local install:
```bash
mkdir -p .cursor/skills/atlassian
cp -R skills/atlassian/cursor/* .cursor/skills/atlassian/
cd .cursor/skills/atlassian/scripts
pnpm install
```
Global install:
```bash
mkdir -p ~/.cursor/skills/atlassian
cp -R skills/atlassian/cursor/* ~/.cursor/skills/atlassian/
cd ~/.cursor/skills/atlassian/scripts
pnpm install
```
## Verify Installation
Run in the installed `scripts/` folder:
```bash
node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')"
node -e 'require("dotenv").config({ path: ".env" }); const required = ["ATLASSIAN_BASE_URL", "ATLASSIAN_EMAIL", "ATLASSIAN_API_TOKEN"]; const missing = required.filter((key) => !(process.env[key] || "").trim()); if (missing.length) { console.error("Missing required Atlassian config: " + missing.join(", ")); process.exit(1); } console.log("OK: Atlassian config present")'
pnpm atlassian health
```
## Usage Examples
- Search Jira issues:
- `pnpm atlassian jira-search --jql "project = ENG ORDER BY updated DESC" --max-results 25`
- Inspect an issue:
- `pnpm atlassian jira-get --issue ENG-123`
- Dry-run a Jira comment:
- `pnpm atlassian jira-comment --issue ENG-123 --body-file comment.md --dry-run`
- Dry-run a Jira issue create with default project fallback:
- `pnpm atlassian jira-create --type Story --summary "Add Atlassian skill" --description-file story.md --dry-run`
- Search Confluence pages:
- `pnpm atlassian conf-search --query "title ~ \\\"Runbook\\\"" --max-results 10 --start-at 0`
- Inspect a Confluence page:
- `pnpm atlassian conf-get --page 12345`
- Dry-run a Confluence page update:
- `pnpm atlassian conf-update --page 12345 --title "Runbook" --body-file page.storage.html --dry-run`
- Dry-run a Confluence footer comment:
- `pnpm atlassian conf-comment --page 12345 --body-file comment.storage.html --dry-run`
- Use bounded raw mode:
- `pnpm atlassian raw --product jira --method GET --path "/rest/api/3/issue/ENG-123"`
- `pnpm atlassian raw --product confluence --method POST --path "/wiki/api/v2/pages" --body-file page.json --dry-run`
## Scope Notes
- Atlassian Cloud is first-class in v1.
- Data Center support is future work.
- Full `mcp-atlassian` parity is not the goal in v1; the initial scope is the approved core workflow set above.

View File

@@ -18,6 +18,7 @@ Create structured implementation plans with milestone and story tracking, and op
- Claude Code: `~/.claude/skills/reviewer-runtime/run-review.sh` - Claude Code: `~/.claude/skills/reviewer-runtime/run-review.sh`
- OpenCode: `~/.config/opencode/skills/reviewer-runtime/run-review.sh` - OpenCode: `~/.config/opencode/skills/reviewer-runtime/run-review.sh`
- Cursor: `.cursor/skills/reviewer-runtime/run-review.sh` or `~/.cursor/skills/reviewer-runtime/run-review.sh` - Cursor: `.cursor/skills/reviewer-runtime/run-review.sh` or `~/.cursor/skills/reviewer-runtime/run-review.sh`
- Telegram notification setup is documented in [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md)
If dependencies are missing, stop and return: If dependencies are missing, stop and return:
@@ -115,10 +116,13 @@ Verify Superpowers dependencies exist in your agent skills root:
- Creates plans under `ai_plan/YYYY-MM-DD-<short-title>/`. - Creates plans under `ai_plan/YYYY-MM-DD-<short-title>/`.
- Ensures `/ai_plan/` is in `.gitignore`. - Ensures `/ai_plan/` is in `.gitignore`.
- Commits `.gitignore` update locally when added. - Commits `.gitignore` update locally when added.
- Asks which reviewer CLI and model to use (or accepts `skip` for no review). - Asks which reviewer CLI, model, and max rounds to use (or accepts `skip` for no review).
- Iteratively reviews the plan with the chosen reviewer (max 5 rounds) before generating files. - Iteratively reviews the plan with the chosen reviewer (default max 10 rounds) before generating files.
- Runs reviewer commands through `reviewer-runtime/run-review.sh` when available, with fallback to direct synchronous execution only if the helper is missing. - Runs reviewer commands through `reviewer-runtime/run-review.sh` when available, with fallback to direct synchronous execution only if the helper is missing.
- Waits as long as the reviewer runtime keeps emitting per-minute `In progress N` heartbeats.
- Requires reviewer findings to be ordered `P0` through `P3`, with `P3` explicitly non-blocking.
- Captures reviewer stderr and helper status logs for diagnostics and retains them on failed, empty-output, or operator-decision review rounds. - Captures reviewer stderr and helper status logs for diagnostics and retains them on failed, empty-output, or operator-decision review rounds.
- Sends completion notifications through Telegram only when the shared setup in [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md) is installed and configured.
- Produces: - Produces:
- `original-plan.md` - `original-plan.md`
- `final-transcript.md` - `final-transcript.md`
@@ -130,13 +134,24 @@ Verify Superpowers dependencies exist in your agent skills root:
After the plan is created (design + milestones + stories), the skill sends it to a second model for review: After the plan is created (design + milestones + stories), the skill sends it to a second model for review:
1. **Configure** — user picks a reviewer CLI (`codex`, `claude`, `cursor`) and model, or skips 1. **Configure** — user picks a reviewer CLI (`codex`, `claude`, `cursor`), a model, and optional max rounds (default 10), or skips
2. **Prepare** — plan payload and a bash reviewer command script are written to temp files 2. **Prepare** — plan payload and a bash reviewer command script are written to temp files
3. **Run** — the command script is executed through `reviewer-runtime/run-review.sh` when installed 3. **Run** — the command script is executed through `reviewer-runtime/run-review.sh` when installed
4. **Feedback** — reviewer evaluates correctness, risks, missing steps, alternatives, security 4. **Feedback** — reviewer evaluates correctness, risks, missing steps, alternatives, security, and returns `## Summary`, `## Findings`, and `## Verdict`
5. **Revise** — the planning agent addresses each issue and re-submits 5. **Prioritize** — findings are ordered `P0`, `P1`, `P2`, `P3`
6. **Repeat**up to 5 rounds until the reviewer returns `VERDICT: APPROVED` 6. **Revise**the planning agent addresses findings in priority order and re-submits
7. **Finalize** — approved plan is used to generate the plan file package 7. **Repeat** — up to max rounds until the reviewer returns `VERDICT: APPROVED`
8. **Finalize** — approved plan is used to generate the plan file package
### Reviewer Output Contract
- `P0` = total blocker
- `P1` = major risk
- `P2` = must-fix before approval
- `P3` = cosmetic / nice to have
- Each severity section should use `- None.` when empty
- `VERDICT: APPROVED` is valid only when no `P0`, `P1`, or `P2` findings remain
- `P3` findings are non-blocking, but the caller should still try to fix them when cheap and safe
### Runtime Artifacts ### Runtime Artifacts
@@ -153,23 +168,25 @@ The review flow may create these temp artifacts:
Status log lines use this format: Status log lines use this format:
```text ```text
ts=<ISO-8601> level=<info|warn|error> state=<running-silent|running-active|stall-warning|completed|completed-empty-output|failed|needs-operator-decision> elapsed_s=<int> pid=<int> stdout_bytes=<int> stderr_bytes=<int> note="<short message>" ts=<ISO-8601> level=<info|warn|error> state=<running-silent|running-active|in-progress|stall-warning|completed|completed-empty-output|failed|needs-operator-decision> elapsed_s=<int> pid=<int> stdout_bytes=<int> stderr_bytes=<int> note="<short message>"
``` ```
`stall-warning` is a heartbeat/status-log state only. It is not a terminal review result. `in-progress` is the liveness heartbeat emitted roughly once per minute with `note="In progress N"`.
`stall-warning` is a non-terminal status-log state only. It does not mean the caller should stop waiting if `in-progress` heartbeats continue.
### Failure Handling ### Failure Handling
- `completed-empty-output` means the reviewer exited without producing review text; surface `.stderr` and `.status`, then retry only after diagnosing the cause. - `completed-empty-output` means the reviewer exited without producing review text; surface `.stderr` and `.status`, then retry only after diagnosing the cause.
- `needs-operator-decision` means the helper reached hard-timeout escalation; surface `.status` and decide whether to keep waiting, abort, or retry with different parameters. - `needs-operator-decision` means the helper reached hard-timeout escalation; surface `.status` and decide whether to extend the timeout, abort, or retry with different parameters.
- Successful rounds clean up temp artifacts. Failed, empty-output, and operator-decision rounds should retain `.stderr`, `.status`, and `.runner.out` until diagnosed. - Successful rounds clean up temp artifacts. Failed, empty-output, and operator-decision rounds should retain `.stderr`, `.status`, and `.runner.out` until diagnosed.
- As long as fresh `in-progress` heartbeats continue to arrive roughly once per minute, the caller should keep waiting.
### Supported Reviewer CLIs ### Supported Reviewer CLIs
| CLI | Command | Session Resume | Read-Only Mode | | CLI | Command | Session Resume | Read-Only Mode |
|---|---|---|---| |---|---|---|---|
| `codex` | `codex exec -m <model> -s read-only` | Yes (`codex exec resume <id>`) | `-s read-only` | | `codex` | `codex exec -m <model> -s read-only` | Yes (`codex exec resume <id>`) | `-s read-only` |
| `claude` | `claude -p --model <model> --allowedTools Read` | No (fresh call each round) | `--allowedTools Read` | | `claude` | `claude -p --model <model> --strict-mcp-config --setting-sources user` | No (fresh call each round) | `--strict-mcp-config --setting-sources user` |
| `cursor` | `cursor-agent -p --mode=ask --model <model> --trust --output-format json` | Yes (`--resume <id>`) | `--mode=ask` | | `cursor` | `cursor-agent -p --mode=ask --model <model> --trust --output-format json` | Yes (`--resume <id>`) | `--mode=ask` |
For all three CLIs, the preferred execution path is: For all three CLIs, the preferred execution path is:
@@ -178,6 +195,13 @@ For all three CLIs, the preferred execution path is:
2. run that script through `reviewer-runtime/run-review.sh` 2. run that script through `reviewer-runtime/run-review.sh`
3. fall back to direct synchronous execution only if the helper is missing or not executable 3. fall back to direct synchronous execution only if the helper is missing or not executable
## Notifications
- Telegram is the only supported notification path.
- Shared setup: [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md)
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
## Template Guardrails ## Template Guardrails
All plan templates now include guardrail sections that enforce: All plan templates now include guardrail sections that enforce:

View File

@@ -25,6 +25,7 @@ Execute an existing plan (created by `create-plan`) in an isolated git worktree,
- Claude Code: `~/.claude/skills/reviewer-runtime/run-review.sh` - Claude Code: `~/.claude/skills/reviewer-runtime/run-review.sh`
- OpenCode: `~/.config/opencode/skills/reviewer-runtime/run-review.sh` - OpenCode: `~/.config/opencode/skills/reviewer-runtime/run-review.sh`
- Cursor: `.cursor/skills/reviewer-runtime/run-review.sh` or `~/.cursor/skills/reviewer-runtime/run-review.sh` - Cursor: `.cursor/skills/reviewer-runtime/run-review.sh` or `~/.cursor/skills/reviewer-runtime/run-review.sh`
- Telegram notification setup is documented in [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md)
If dependencies are missing, stop and return: If dependencies are missing, stop and return:
@@ -133,10 +134,13 @@ Verify Superpowers execution dependencies exist in your agent skills root:
- Runs lint/typecheck/tests as a gate before each milestone review. - Runs lint/typecheck/tests as a gate before each milestone review.
- Sends each milestone to a reviewer CLI for approval (max rounds configurable, default 10). - Sends each milestone to a reviewer CLI for approval (max rounds configurable, default 10).
- Runs reviewer commands through `reviewer-runtime/run-review.sh` when available, with fallback to direct synchronous execution only if the helper is missing. - Runs reviewer commands through `reviewer-runtime/run-review.sh` when available, with fallback to direct synchronous execution only if the helper is missing.
- Waits as long as the reviewer runtime keeps emitting per-minute `In progress N` heartbeats.
- Requires reviewer findings to be ordered `P0` through `P3`, with `P3` explicitly non-blocking.
- Captures reviewer stderr and helper status logs for diagnostics and retains them on failed, empty-output, or operator-decision review rounds. - Captures reviewer stderr and helper status logs for diagnostics and retains them on failed, empty-output, or operator-decision review rounds.
- Commits each milestone locally only after reviewer approval (does not push). - Commits each milestone locally only after reviewer approval (does not push).
- After all milestones approved, merges worktree branch to parent and deletes worktree. - After all milestones approved, merges worktree branch to parent and deletes worktree.
- Supports resume: detects existing worktree and `in-dev`/`completed` stories. - Supports resume: detects existing worktree and `in-dev`/`completed` stories.
- Sends completion notifications through Telegram only when the shared setup in [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md) is installed and configured.
## Milestone Review Loop ## Milestone Review Loop
@@ -145,11 +149,22 @@ After each milestone is implemented and verified, the skill sends it to a second
1. **Configure** — user picks a reviewer CLI (`codex`, `claude`, `cursor`) and model, or skips 1. **Configure** — user picks a reviewer CLI (`codex`, `claude`, `cursor`) and model, or skips
2. **Prepare** — milestone payload and a bash reviewer command script are written to temp files 2. **Prepare** — milestone payload and a bash reviewer command script are written to temp files
3. **Run** — the command script is executed through `reviewer-runtime/run-review.sh` when installed 3. **Run** — the command script is executed through `reviewer-runtime/run-review.sh` when installed
4. **Feedback** — reviewer evaluates correctness, acceptance criteria, code quality, test coverage, security 4. **Feedback** — reviewer evaluates correctness, acceptance criteria, code quality, test coverage, security, and returns `## Summary`, `## Findings`, and `## Verdict`
5. **Revise** — the implementing agent addresses each issue, re-verifies, and re-submits 5. **Prioritize** — findings are ordered `P0`, `P1`, `P2`, `P3`
6. **Repeat**up to max rounds (default 10) until the reviewer returns `VERDICT: APPROVED` 6. **Revise**the implementing agent addresses findings in priority order, re-verifies, and re-submits
7. **Repeat** — up to max rounds (default 10) until the reviewer returns `VERDICT: APPROVED`
7. **Approve** — milestone is marked approved in `story-tracker.md` 7. **Approve** — milestone is marked approved in `story-tracker.md`
### Reviewer Output Contract
- `P0` = total blocker
- `P1` = major risk
- `P2` = must-fix before approval
- `P3` = cosmetic / nice to have
- Each severity section should use `- None.` when empty
- `VERDICT: APPROVED` is valid only when no `P0`, `P1`, or `P2` findings remain
- `P3` findings are non-blocking, but the caller should still try to fix them when cheap and safe
### Runtime Artifacts ### Runtime Artifacts
The milestone review flow may create these temp artifacts: The milestone review flow may create these temp artifacts:
@@ -165,23 +180,25 @@ The milestone review flow may create these temp artifacts:
Status log lines use this format: Status log lines use this format:
```text ```text
ts=<ISO-8601> level=<info|warn|error> state=<running-silent|running-active|stall-warning|completed|completed-empty-output|failed|needs-operator-decision> elapsed_s=<int> pid=<int> stdout_bytes=<int> stderr_bytes=<int> note="<short message>" ts=<ISO-8601> level=<info|warn|error> state=<running-silent|running-active|in-progress|stall-warning|completed|completed-empty-output|failed|needs-operator-decision> elapsed_s=<int> pid=<int> stdout_bytes=<int> stderr_bytes=<int> note="<short message>"
``` ```
`stall-warning` is a heartbeat/status-log state only. It is not a terminal review result. `in-progress` is the liveness heartbeat emitted roughly once per minute with `note="In progress N"`.
`stall-warning` is a non-terminal status-log state only. It does not mean the caller should stop waiting if `in-progress` heartbeats continue.
### Failure Handling ### Failure Handling
- `completed-empty-output` means the reviewer exited without producing review text; surface `.stderr` and `.status`, then retry only after diagnosing the cause. - `completed-empty-output` means the reviewer exited without producing review text; surface `.stderr` and `.status`, then retry only after diagnosing the cause.
- `needs-operator-decision` means the helper reached hard-timeout escalation; surface `.status` and decide whether to keep waiting, abort, or retry with different parameters. - `needs-operator-decision` means the helper reached hard-timeout escalation; surface `.status` and decide whether to extend the timeout, abort, or retry with different parameters.
- Successful rounds clean up temp artifacts. Failed, empty-output, and operator-decision rounds should retain `.stderr`, `.status`, and `.runner.out` until diagnosed. - Successful rounds clean up temp artifacts. Failed, empty-output, and operator-decision rounds should retain `.stderr`, `.status`, and `.runner.out` until diagnosed.
- As long as fresh `in-progress` heartbeats continue to arrive roughly once per minute, the caller should keep waiting.
### Supported Reviewer CLIs ### Supported Reviewer CLIs
| CLI | Command | Session Resume | Read-Only Mode | | CLI | Command | Session Resume | Read-Only Mode |
|---|---|---|---| |---|---|---|---|
| `codex` | `codex exec -m <model> -s read-only` | Yes (`codex exec resume <id>`) | `-s read-only` | | `codex` | `codex exec -m <model> -s read-only` | Yes (`codex exec resume <id>`) | `-s read-only` |
| `claude` | `claude -p --model <model> --allowedTools Read` | No (fresh call each round) | `--allowedTools Read` | | `claude` | `claude -p --model <model> --strict-mcp-config --setting-sources user` | No (fresh call each round) | `--strict-mcp-config --setting-sources user` |
| `cursor` | `cursor-agent -p --mode=ask --model <model> --trust --output-format json` | Yes (`--resume <id>`) | `--mode=ask` | | `cursor` | `cursor-agent -p --mode=ask --model <model> --trust --output-format json` | Yes (`--resume <id>`) | `--mode=ask` |
For all three CLIs, the preferred execution path is: For all three CLIs, the preferred execution path is:
@@ -190,6 +207,13 @@ For all three CLIs, the preferred execution path is:
2. run that script through `reviewer-runtime/run-review.sh` 2. run that script through `reviewer-runtime/run-review.sh`
3. fall back to direct synchronous execution only if the helper is missing or not executable 3. fall back to direct synchronous execution only if the helper is missing or not executable
## Notifications
- Telegram is the only supported notification path.
- Shared setup: [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md)
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
The helper also supports manual override flags for diagnostics: The helper also supports manual override flags for diagnostics:
```bash ```bash

View File

@@ -4,8 +4,10 @@ This directory contains user-facing docs for each skill.
## Index ## Index
- [ATLASSIAN.md](./ATLASSIAN.md) — Includes requirements, generated bundle sync, install, auth, safety rules, and usage examples for the Atlassian skill.
- [CREATE-PLAN.md](./CREATE-PLAN.md) — Includes requirements, install, verification, and execution workflow for create-plan. - [CREATE-PLAN.md](./CREATE-PLAN.md) — Includes requirements, install, verification, and execution workflow for create-plan.
- [IMPLEMENT-PLAN.md](./IMPLEMENT-PLAN.md) — Includes requirements, install, verification, and milestone review workflow for implement-plan. - [IMPLEMENT-PLAN.md](./IMPLEMENT-PLAN.md) — Includes requirements, install, verification, and milestone review workflow for implement-plan.
- [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md) — Shared Telegram notification setup used by reviewer-driven skills.
- [WEB-AUTOMATION.md](./WEB-AUTOMATION.md) — Includes requirements, install, dependency verification, and usage examples for web-automation. - [WEB-AUTOMATION.md](./WEB-AUTOMATION.md) — Includes requirements, install, dependency verification, and usage examples for web-automation.
## Repo Setup ## Repo Setup

View File

@@ -0,0 +1,97 @@
# TELEGRAM-NOTIFICATIONS
## Purpose
Shared setup for Telegram notifications used by reviewer-driven skills such as `create-plan` and `implement-plan`, both for completion and for pauses that need user attention.
## Requirements
- Telegram bot token in `TELEGRAM_BOT_TOKEN`
- Telegram chat id in `TELEGRAM_CHAT_ID`
- Notification helper installed beside the shared reviewer runtime:
- Codex: `~/.codex/skills/reviewer-runtime/notify-telegram.sh`
- Claude Code: `~/.claude/skills/reviewer-runtime/notify-telegram.sh`
- OpenCode: `~/.config/opencode/skills/reviewer-runtime/notify-telegram.sh`
- Cursor: `.cursor/skills/reviewer-runtime/notify-telegram.sh` or `~/.cursor/skills/reviewer-runtime/notify-telegram.sh`
## Install
The helper ships from `skills/reviewer-runtime/` together with `run-review.sh`.
### Codex
```bash
mkdir -p ~/.codex/skills/reviewer-runtime
cp -R skills/reviewer-runtime/* ~/.codex/skills/reviewer-runtime/
```
### Claude Code
```bash
mkdir -p ~/.claude/skills/reviewer-runtime
cp -R skills/reviewer-runtime/* ~/.claude/skills/reviewer-runtime/
```
### OpenCode
```bash
mkdir -p ~/.config/opencode/skills/reviewer-runtime
cp -R skills/reviewer-runtime/* ~/.config/opencode/skills/reviewer-runtime/
```
### Cursor
Repo-local install:
```bash
mkdir -p .cursor/skills/reviewer-runtime
cp -R skills/reviewer-runtime/* .cursor/skills/reviewer-runtime/
```
Global install:
```bash
mkdir -p ~/.cursor/skills/reviewer-runtime
cp -R skills/reviewer-runtime/* ~/.cursor/skills/reviewer-runtime/
```
## Verify Installation
```bash
test -x ~/.codex/skills/reviewer-runtime/notify-telegram.sh || true
test -x ~/.claude/skills/reviewer-runtime/notify-telegram.sh || true
test -x ~/.config/opencode/skills/reviewer-runtime/notify-telegram.sh || true
test -x .cursor/skills/reviewer-runtime/notify-telegram.sh || test -x ~/.cursor/skills/reviewer-runtime/notify-telegram.sh || true
```
## Configure Telegram
Export the required variables before running a skill that sends Telegram notifications:
```bash
export TELEGRAM_BOT_TOKEN="<bot-token>"
export TELEGRAM_CHAT_ID="<chat-id>"
```
Optional:
```bash
export TELEGRAM_API_BASE_URL="https://api.telegram.org"
```
## Test the Helper
Example:
```bash
TELEGRAM_BOT_TOKEN="<bot-token>" \
TELEGRAM_CHAT_ID="<chat-id>" \
skills/reviewer-runtime/notify-telegram.sh --message "Telegram notification test"
```
## Rules
- Telegram is the only supported notification path for these skills.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- Skills should report when Telegram is not configured instead of silently pretending a notification was sent.

View File

@@ -2,15 +2,25 @@
## Purpose ## Purpose
Automate browsing and scraping with Playwright + Camoufox. Automate rendered browsing, scraping, authentication, and multi-step browser flows with Playwright-compatible CloakBrowser.
## What Ships In Every Variant
- `browse.ts` for direct navigation and screenshots
- `auth.ts` for form and Microsoft SSO login flows
- `scrape.ts` for markdown extraction
- `flow.ts` for natural-language or JSON browser steps
- `extract.js` for one-shot rendered JSON extraction
- `check-install.js` for install and wiring validation
- `scan-local-app.ts` for configurable local-app smoke scans
## Requirements ## Requirements
- Node.js 20+ - Node.js 20+
- pnpm - pnpm
- `cloakbrowser`
- `playwright-core` - `playwright-core`
- `camoufox-js` - Network access to download the CloakBrowser binary on first use
- Network access to download Camoufox browser artifacts
## Install ## Install
@@ -21,8 +31,9 @@ mkdir -p ~/.codex/skills/web-automation
cp -R skills/web-automation/codex/* ~/.codex/skills/web-automation/ cp -R skills/web-automation/codex/* ~/.codex/skills/web-automation/
cd ~/.codex/skills/web-automation/scripts cd ~/.codex/skills/web-automation/scripts
pnpm install pnpm install
pnpm add playwright-core camoufox-js npx cloakbrowser install
npx camoufox-js fetch pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
``` ```
### Claude Code ### Claude Code
@@ -32,8 +43,9 @@ mkdir -p ~/.claude/skills/web-automation
cp -R skills/web-automation/claude-code/* ~/.claude/skills/web-automation/ cp -R skills/web-automation/claude-code/* ~/.claude/skills/web-automation/
cd ~/.claude/skills/web-automation/scripts cd ~/.claude/skills/web-automation/scripts
pnpm install pnpm install
pnpm add playwright-core camoufox-js npx cloakbrowser install
npx camoufox-js fetch pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
``` ```
### OpenCode ### OpenCode
@@ -43,25 +55,79 @@ mkdir -p ~/.config/opencode/skills/web-automation
cp -R skills/web-automation/opencode/* ~/.config/opencode/skills/web-automation/ cp -R skills/web-automation/opencode/* ~/.config/opencode/skills/web-automation/
cd ~/.config/opencode/skills/web-automation/scripts cd ~/.config/opencode/skills/web-automation/scripts
pnpm install pnpm install
pnpm add playwright-core camoufox-js npx cloakbrowser install
npx camoufox-js fetch pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
``` ```
## Verify Installation & Dependencies ## Update To The Latest CloakBrowser
Run inside the installed `scripts/` directory for the variant you are using:
```bash
pnpm up cloakbrowser playwright-core
npx cloakbrowser install
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
```
This repo intentionally treats `cloakbrowser` as a refreshable dependency: update to the latest available compatible release, then regenerate the lockfile from that resolved set.
## Verify Installation & Wiring
Run in the installed `scripts/` folder: Run in the installed `scripts/` folder:
```bash ```bash
node -e "require.resolve('playwright-core/package.json');require.resolve('camoufox-js/package.json');console.log('OK: playwright-core + camoufox-js installed')" node check-install.js
node -e "const fs=require('fs');const t=fs.readFileSync('browse.ts','utf8');if(!/camoufox-js/.test(t)){throw new Error('browse.ts is not configured for Camoufox')}console.log('OK: Camoufox integration detected in browse.ts')"
``` ```
If checks fail, stop and return: Expected checks:
"Missing dependency/config: web-automation requires `playwright-core` + `camoufox-js` and Camoufox-based scripts. Run setup in this skill, then retry." - `cloakbrowser` and `playwright-core` resolve correctly
- `browse.ts` is wired to CloakBrowser
If the check fails, stop and return:
"Missing dependency/config: web-automation requires `cloakbrowser` and `playwright-core` with CloakBrowser-based scripts. Run setup in this skill, then retry."
If runtime later fails with native-binding issues, run:
```bash
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
```
## Environment Variables
- `CLOAKBROWSER_PROFILE_PATH`
- `CLOAKBROWSER_HEADLESS`
- `CLOAKBROWSER_USERNAME`
- `CLOAKBROWSER_PASSWORD`
There are no `CAMOUFOX_*` compatibility aliases in this migration.
## Usage Examples ## Usage Examples
- Browse: `npx tsx browse.ts --url "https://example.com"` - Browse: `npx tsx browse.ts --url "https://example.com"`
- Scrape: `npx tsx scrape.ts --url "https://example.com" --mode main --output page.md` - Scrape markdown: `npx tsx scrape.ts --url "https://example.com" --mode main --output page.md`
- Auth: `npx tsx auth.ts --url "https://example.com/login"` - Authenticate: `npx tsx auth.ts --url "https://example.com/login"`
- Natural-language flow: `npx tsx flow.ts --instruction 'go to https://example.com then click on "Login"'`
- JSON extract: `node extract.js "https://example.com"`
- Local smoke scan: `SCAN_BASE_URL=http://localhost:3000 SCAN_ROUTES=/,/dashboard npx tsx scan-local-app.ts`
## Local App Smoke Scan
`scan-local-app.ts` is generic. Configure it with:
- `SCAN_BASE_URL`
- `SCAN_LOGIN_PATH`
- `SCAN_USERNAME`
- `SCAN_PASSWORD`
- `SCAN_USERNAME_SELECTOR`
- `SCAN_PASSWORD_SELECTOR`
- `SCAN_SUBMIT_SELECTOR`
- `SCAN_ROUTES`
- `SCAN_REPORT_PATH`
- `SCAN_HEADLESS`
If `SCAN_USERNAME` or `SCAN_PASSWORD` are omitted, the script falls back to `CLOAKBROWSER_USERNAME` and `CLOAKBROWSER_PASSWORD`.

View File

@@ -0,0 +1,78 @@
---
name: atlassian
description: Interact with Atlassian Cloud Jira and Confluence through a portable task-oriented CLI for search, issue/page edits, comments, transitions, and bounded raw requests.
---
# Atlassian (Claude Code)
Portable Atlassian workflows for Claude Code using a shared TypeScript CLI.
## Requirements
- Node.js 20+
- `pnpm`
- Atlassian Cloud account access
- `ATLASSIAN_BASE_URL`
- `ATLASSIAN_EMAIL`
- `ATLASSIAN_API_TOKEN`
The `ATLASSIAN_*` values may come from the shell environment or a `.env` file in `~/.claude/skills/atlassian/scripts`.
## First-Time Setup
```bash
mkdir -p ~/.claude/skills/atlassian
cp -R skills/atlassian/claude-code/* ~/.claude/skills/atlassian/
cd ~/.claude/skills/atlassian/scripts
pnpm install
```
## Prerequisite Check (MANDATORY)
```bash
cd ~/.claude/skills/atlassian/scripts
node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')"
node -e 'require("dotenv").config({ path: ".env" }); const required = ["ATLASSIAN_BASE_URL", "ATLASSIAN_EMAIL", "ATLASSIAN_API_TOKEN"]; const missing = required.filter((key) => !(process.env[key] || "").trim()); if (missing.length) { console.error("Missing required Atlassian config: " + missing.join(", ")); process.exit(1); } console.log("OK: Atlassian config present")'
pnpm atlassian health
```
If any check fails, stop and return:
`Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Configure ATLASSIAN_* in the shell environment or scripts/.env, then retry.`
## Supported Commands
- `pnpm atlassian health`
- `pnpm atlassian jira-search --jql "..."`
- `pnpm atlassian jira-get --issue ABC-123`
- `pnpm atlassian jira-create ... [--dry-run]`
- `pnpm atlassian jira-update ... [--dry-run]`
- `pnpm atlassian jira-comment ... [--dry-run]`
- `pnpm atlassian jira-transitions --issue ABC-123`
- `pnpm atlassian jira-transition ... [--dry-run]`
- `pnpm atlassian conf-search --query "..."`
- `pnpm atlassian conf-get --page 12345`
- `pnpm atlassian conf-create ... [--dry-run]`
- `pnpm atlassian conf-update ... [--dry-run]`
- `pnpm atlassian conf-comment ... [--dry-run]`
- `pnpm atlassian conf-children --page 12345`
- `pnpm atlassian raw --product jira|confluence --method GET|POST|PUT --path ...`
## Usage Examples
- `pnpm atlassian jira-search --jql "project = ENG ORDER BY updated DESC" --max-results 10`
- `pnpm atlassian conf-comment --page 12345 --body-file comment.storage.html --dry-run`
- `pnpm atlassian raw --product jira --method GET --path "/rest/api/3/issue/ENG-123"`
## Safety Rules
- Default output is JSON; only switch to text output when the user needs a human-readable summary.
- Use `--dry-run` before any write unless the user clearly asked for the mutation.
- Treat `raw` as an escape hatch, not the default API surface.
- `--body-file` must stay inside the current workspace.
- Confluence write bodies should be storage-format inputs in v1.
## Notes
- Atlassian Cloud is the primary supported platform in v1.
- The portable CLI exists so the same skill works consistently across multiple agent environments.

View File

@@ -0,0 +1,20 @@
{
"name": "atlassian-skill-scripts",
"version": "1.0.0",
"description": "Shared runtime for the Atlassian skill",
"type": "module",
"scripts": {
"atlassian": "tsx src/cli.ts",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"commander": "^13.1.0",
"dotenv": "^16.4.7"
},
"devDependencies": {
"@types/node": "^24.3.0",
"tsx": "^4.20.5",
"typescript": "^5.9.2"
},
"packageManager": "pnpm@10.18.1+sha512.77a884a165cbba2d8d1c19e3b4880eee6d2fcabd0d879121e282196b80042351d5eb3ca0935fa599da1dc51265cc68816ad2bddd2a2de5ea9fdf92adbec7cd34"
}

View File

@@ -0,0 +1,361 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
importers:
.:
dependencies:
commander:
specifier: ^13.1.0
version: 13.1.0
dotenv:
specifier: ^16.4.7
version: 16.6.1
devDependencies:
'@types/node':
specifier: ^24.3.0
version: 24.12.0
tsx:
specifier: ^4.20.5
version: 4.21.0
typescript:
specifier: ^5.9.2
version: 5.9.3
packages:
'@esbuild/aix-ppc64@0.27.3':
resolution: {integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [aix]
'@esbuild/android-arm64@0.27.3':
resolution: {integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [android]
'@esbuild/android-arm@0.27.3':
resolution: {integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==}
engines: {node: '>=18'}
cpu: [arm]
os: [android]
'@esbuild/android-x64@0.27.3':
resolution: {integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [android]
'@esbuild/darwin-arm64@0.27.3':
resolution: {integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [darwin]
'@esbuild/darwin-x64@0.27.3':
resolution: {integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==}
engines: {node: '>=18'}
cpu: [x64]
os: [darwin]
'@esbuild/freebsd-arm64@0.27.3':
resolution: {integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==}
engines: {node: '>=18'}
cpu: [arm64]
os: [freebsd]
'@esbuild/freebsd-x64@0.27.3':
resolution: {integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==}
engines: {node: '>=18'}
cpu: [x64]
os: [freebsd]
'@esbuild/linux-arm64@0.27.3':
resolution: {integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [linux]
'@esbuild/linux-arm@0.27.3':
resolution: {integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==}
engines: {node: '>=18'}
cpu: [arm]
os: [linux]
'@esbuild/linux-ia32@0.27.3':
resolution: {integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==}
engines: {node: '>=18'}
cpu: [ia32]
os: [linux]
'@esbuild/linux-loong64@0.27.3':
resolution: {integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==}
engines: {node: '>=18'}
cpu: [loong64]
os: [linux]
'@esbuild/linux-mips64el@0.27.3':
resolution: {integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==}
engines: {node: '>=18'}
cpu: [mips64el]
os: [linux]
'@esbuild/linux-ppc64@0.27.3':
resolution: {integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [linux]
'@esbuild/linux-riscv64@0.27.3':
resolution: {integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==}
engines: {node: '>=18'}
cpu: [riscv64]
os: [linux]
'@esbuild/linux-s390x@0.27.3':
resolution: {integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==}
engines: {node: '>=18'}
cpu: [s390x]
os: [linux]
'@esbuild/linux-x64@0.27.3':
resolution: {integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==}
engines: {node: '>=18'}
cpu: [x64]
os: [linux]
'@esbuild/netbsd-arm64@0.27.3':
resolution: {integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [netbsd]
'@esbuild/netbsd-x64@0.27.3':
resolution: {integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==}
engines: {node: '>=18'}
cpu: [x64]
os: [netbsd]
'@esbuild/openbsd-arm64@0.27.3':
resolution: {integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openbsd]
'@esbuild/openbsd-x64@0.27.3':
resolution: {integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [openbsd]
'@esbuild/openharmony-arm64@0.27.3':
resolution: {integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openharmony]
'@esbuild/sunos-x64@0.27.3':
resolution: {integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==}
engines: {node: '>=18'}
cpu: [x64]
os: [sunos]
'@esbuild/win32-arm64@0.27.3':
resolution: {integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [win32]
'@esbuild/win32-ia32@0.27.3':
resolution: {integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==}
engines: {node: '>=18'}
cpu: [ia32]
os: [win32]
'@esbuild/win32-x64@0.27.3':
resolution: {integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==}
engines: {node: '>=18'}
cpu: [x64]
os: [win32]
'@types/node@24.12.0':
resolution: {integrity: sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==}
commander@13.1.0:
resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==}
engines: {node: '>=18'}
dotenv@16.6.1:
resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==}
engines: {node: '>=12'}
esbuild@0.27.3:
resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==}
engines: {node: '>=18'}
hasBin: true
fsevents@2.3.3:
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
get-tsconfig@4.13.6:
resolution: {integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==}
resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
tsx@4.21.0:
resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==}
engines: {node: '>=18.0.0'}
hasBin: true
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
hasBin: true
undici-types@7.16.0:
resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==}
snapshots:
'@esbuild/aix-ppc64@0.27.3':
optional: true
'@esbuild/android-arm64@0.27.3':
optional: true
'@esbuild/android-arm@0.27.3':
optional: true
'@esbuild/android-x64@0.27.3':
optional: true
'@esbuild/darwin-arm64@0.27.3':
optional: true
'@esbuild/darwin-x64@0.27.3':
optional: true
'@esbuild/freebsd-arm64@0.27.3':
optional: true
'@esbuild/freebsd-x64@0.27.3':
optional: true
'@esbuild/linux-arm64@0.27.3':
optional: true
'@esbuild/linux-arm@0.27.3':
optional: true
'@esbuild/linux-ia32@0.27.3':
optional: true
'@esbuild/linux-loong64@0.27.3':
optional: true
'@esbuild/linux-mips64el@0.27.3':
optional: true
'@esbuild/linux-ppc64@0.27.3':
optional: true
'@esbuild/linux-riscv64@0.27.3':
optional: true
'@esbuild/linux-s390x@0.27.3':
optional: true
'@esbuild/linux-x64@0.27.3':
optional: true
'@esbuild/netbsd-arm64@0.27.3':
optional: true
'@esbuild/netbsd-x64@0.27.3':
optional: true
'@esbuild/openbsd-arm64@0.27.3':
optional: true
'@esbuild/openbsd-x64@0.27.3':
optional: true
'@esbuild/openharmony-arm64@0.27.3':
optional: true
'@esbuild/sunos-x64@0.27.3':
optional: true
'@esbuild/win32-arm64@0.27.3':
optional: true
'@esbuild/win32-ia32@0.27.3':
optional: true
'@esbuild/win32-x64@0.27.3':
optional: true
'@types/node@24.12.0':
dependencies:
undici-types: 7.16.0
commander@13.1.0: {}
dotenv@16.6.1: {}
esbuild@0.27.3:
optionalDependencies:
'@esbuild/aix-ppc64': 0.27.3
'@esbuild/android-arm': 0.27.3
'@esbuild/android-arm64': 0.27.3
'@esbuild/android-x64': 0.27.3
'@esbuild/darwin-arm64': 0.27.3
'@esbuild/darwin-x64': 0.27.3
'@esbuild/freebsd-arm64': 0.27.3
'@esbuild/freebsd-x64': 0.27.3
'@esbuild/linux-arm': 0.27.3
'@esbuild/linux-arm64': 0.27.3
'@esbuild/linux-ia32': 0.27.3
'@esbuild/linux-loong64': 0.27.3
'@esbuild/linux-mips64el': 0.27.3
'@esbuild/linux-ppc64': 0.27.3
'@esbuild/linux-riscv64': 0.27.3
'@esbuild/linux-s390x': 0.27.3
'@esbuild/linux-x64': 0.27.3
'@esbuild/netbsd-arm64': 0.27.3
'@esbuild/netbsd-x64': 0.27.3
'@esbuild/openbsd-arm64': 0.27.3
'@esbuild/openbsd-x64': 0.27.3
'@esbuild/openharmony-arm64': 0.27.3
'@esbuild/sunos-x64': 0.27.3
'@esbuild/win32-arm64': 0.27.3
'@esbuild/win32-ia32': 0.27.3
'@esbuild/win32-x64': 0.27.3
fsevents@2.3.3:
optional: true
get-tsconfig@4.13.6:
dependencies:
resolve-pkg-maps: 1.0.0
resolve-pkg-maps@1.0.0: {}
tsx@4.21.0:
dependencies:
esbuild: 0.27.3
get-tsconfig: 4.13.6
optionalDependencies:
fsevents: 2.3.3
typescript@5.9.3: {}
undici-types@7.16.0: {}

View File

@@ -0,0 +1,92 @@
const TEXT_NODE = "text";
function textNode(text: string) {
return {
type: TEXT_NODE,
text,
};
}
function paragraphNode(lines: string[]) {
const content: Array<{ type: string; text?: string }> = [];
lines.forEach((line, index) => {
if (index > 0) {
content.push({ type: "hardBreak" });
}
if (line.length > 0) {
content.push(textNode(line));
}
});
return {
type: "paragraph",
...(content.length > 0 ? { content } : {}),
};
}
export function markdownToAdf(input: string) {
const lines = input.replace(/\r\n/g, "\n").split("\n");
const content: Array<Record<string, unknown>> = [];
let index = 0;
while (index < lines.length) {
const current = lines[index]?.trimEnd() ?? "";
if (current.trim().length === 0) {
index += 1;
continue;
}
const heading = current.match(/^(#{1,6})\s+(.*)$/);
if (heading) {
content.push({
type: "heading",
attrs: { level: heading[1].length },
content: [textNode(heading[2])],
});
index += 1;
continue;
}
if (/^[-*]\s+/.test(current)) {
const items: Array<Record<string, unknown>> = [];
while (index < lines.length && /^[-*]\s+/.test(lines[index] ?? "")) {
items.push({
type: "listItem",
content: [
{
type: "paragraph",
content: [textNode((lines[index] ?? "").replace(/^[-*]\s+/, ""))],
},
],
});
index += 1;
}
content.push({
type: "bulletList",
content: items,
});
continue;
}
const paragraphLines: string[] = [];
while (index < lines.length && (lines[index]?.trim().length ?? 0) > 0) {
paragraphLines.push(lines[index] ?? "");
index += 1;
}
content.push(paragraphNode(paragraphLines));
}
return {
type: "doc",
version: 1,
content,
};
}

View File

@@ -0,0 +1,332 @@
import process from "node:process";
import { pathToFileURL } from "node:url";
import { Command } from "commander";
import { createConfluenceClient } from "./confluence.js";
import { loadConfig } from "./config.js";
import { readWorkspaceFile } from "./files.js";
import { runHealthCheck } from "./health.js";
import { createJiraClient } from "./jira.js";
import { writeOutput } from "./output.js";
import { runRawCommand } from "./raw.js";
import type { FetchLike, OutputFormat, Writer } from "./types.js";
type CliContext = {
cwd?: string;
env?: NodeJS.ProcessEnv;
fetchImpl?: FetchLike;
stdout?: Writer;
stderr?: Writer;
};
function resolveFormat(format: string | undefined): OutputFormat {
return format === "text" ? "text" : "json";
}
function createRuntime(context: CliContext) {
const cwd = context.cwd ?? process.cwd();
const env = context.env ?? process.env;
const stdout = context.stdout ?? process.stdout;
const stderr = context.stderr ?? process.stderr;
let configCache: ReturnType<typeof loadConfig> | undefined;
let jiraCache: ReturnType<typeof createJiraClient> | undefined;
let confluenceCache: ReturnType<typeof createConfluenceClient> | undefined;
function getConfig() {
configCache ??= loadConfig(env, { cwd });
return configCache;
}
function getJiraClient() {
jiraCache ??= createJiraClient({
config: getConfig(),
fetchImpl: context.fetchImpl,
});
return jiraCache;
}
function getConfluenceClient() {
confluenceCache ??= createConfluenceClient({
config: getConfig(),
fetchImpl: context.fetchImpl,
});
return confluenceCache;
}
async function readBodyFile(filePath: string | undefined) {
if (!filePath) {
return undefined;
}
return readWorkspaceFile(filePath, cwd);
}
return {
cwd,
stdout,
stderr,
readBodyFile,
getConfig,
getJiraClient,
getConfluenceClient,
fetchImpl: context.fetchImpl,
};
}
export function buildProgram(context: CliContext = {}) {
const runtime = createRuntime(context);
const program = new Command()
.name("atlassian")
.description("Portable Atlassian CLI for multi-agent skills")
.version("0.1.0");
program
.command("health")
.description("Validate configuration and Atlassian connectivity")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runHealthCheck(runtime.getConfig(), runtime.fetchImpl);
writeOutput(
runtime.stdout,
payload,
resolveFormat(options.format),
);
});
program
.command("conf-search")
.requiredOption("--query <query>", "CQL search query")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Result offset", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().searchPages({
query: options.query,
maxResults: Number(options.maxResults),
startAt: Number(options.startAt),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-get")
.requiredOption("--page <page>", "Confluence page ID")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().getPage(options.page);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-create")
.requiredOption("--title <title>", "Confluence page title")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--space <space>", "Confluence space ID")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().createPage({
space: options.space,
title: options.title,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-update")
.requiredOption("--page <page>", "Confluence page ID")
.requiredOption("--title <title>", "Confluence page title")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().updatePage({
pageId: options.page,
title: options.title,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-comment")
.requiredOption("--page <page>", "Confluence page ID")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().commentPage({
pageId: options.page,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-children")
.requiredOption("--page <page>", "Confluence page ID")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Cursor/start token", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().listChildren(
options.page,
Number(options.maxResults),
Number(options.startAt),
);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("raw")
.requiredOption("--product <product>", "jira or confluence")
.requiredOption("--method <method>", "GET, POST, or PUT")
.requiredOption("--path <path>", "Validated API path")
.option("--body-file <path>", "Workspace-relative JSON file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runRawCommand(runtime.getConfig(), runtime.fetchImpl, {
product: options.product,
method: String(options.method).toUpperCase(),
path: options.path,
bodyFile: options.bodyFile,
cwd: runtime.cwd,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-search")
.requiredOption("--jql <jql>", "JQL expression to execute")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Result offset", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().searchIssues({
jql: options.jql,
maxResults: Number(options.maxResults),
startAt: Number(options.startAt),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-get")
.requiredOption("--issue <issue>", "Issue key")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().getIssue(options.issue);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-create")
.requiredOption("--type <type>", "Issue type name")
.requiredOption("--summary <summary>", "Issue summary")
.option("--project <project>", "Project key")
.option("--description-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().createIssue({
project: options.project,
type: options.type,
summary: options.summary,
description: await runtime.readBodyFile(options.descriptionFile),
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-update")
.requiredOption("--issue <issue>", "Issue key")
.option("--summary <summary>", "Updated summary")
.option("--description-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().updateIssue({
issue: options.issue,
summary: options.summary,
description: await runtime.readBodyFile(options.descriptionFile),
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-comment")
.requiredOption("--issue <issue>", "Issue key")
.requiredOption("--body-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().commentIssue({
issue: options.issue,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-transitions")
.requiredOption("--issue <issue>", "Issue key")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().getTransitions(options.issue);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-transition")
.requiredOption("--issue <issue>", "Issue key")
.requiredOption("--transition <transition>", "Transition ID")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().transitionIssue({
issue: options.issue,
transition: options.transition,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
return program;
}
export async function runCli(argv = process.argv, context: CliContext = {}) {
const program = buildProgram(context);
await program.parseAsync(argv);
}
const isDirectExecution =
Boolean(process.argv[1]) && import.meta.url === pathToFileURL(process.argv[1]).href;
if (isDirectExecution) {
runCli().catch((error: unknown) => {
const message = error instanceof Error ? error.message : String(error);
process.stderr.write(`${message}\n`);
process.exitCode = 1;
});
}

View File

@@ -0,0 +1,52 @@
import path from "node:path";
import { config as loadDotEnv } from "dotenv";
import type { AtlassianConfig } from "./types.js";
function normalizeBaseUrl(value: string) {
return value.replace(/\/+$/, "");
}
function readRequired(env: NodeJS.ProcessEnv, key: string) {
const value = env[key]?.trim();
if (!value) {
throw new Error(`Missing required environment variable: ${key}`);
}
return value;
}
export function loadConfig(
env: NodeJS.ProcessEnv = process.env,
options?: {
cwd?: string;
},
): AtlassianConfig {
loadDotEnv({
path: path.resolve(options?.cwd ?? process.cwd(), ".env"),
processEnv: env as Record<string, string>,
override: false,
});
const baseUrl = normalizeBaseUrl(readRequired(env, "ATLASSIAN_BASE_URL"));
return {
baseUrl,
jiraBaseUrl: normalizeBaseUrl(env.ATLASSIAN_JIRA_BASE_URL?.trim() || baseUrl),
confluenceBaseUrl: normalizeBaseUrl(env.ATLASSIAN_CONFLUENCE_BASE_URL?.trim() || baseUrl),
email: readRequired(env, "ATLASSIAN_EMAIL"),
apiToken: readRequired(env, "ATLASSIAN_API_TOKEN"),
defaultProject: env.ATLASSIAN_DEFAULT_PROJECT?.trim() || undefined,
defaultSpace: env.ATLASSIAN_DEFAULT_SPACE?.trim() || undefined,
};
}
export function createBasicAuthHeader(config: {
email: string;
apiToken: string;
[key: string]: unknown;
}) {
return `Basic ${Buffer.from(`${config.email}:${config.apiToken}`).toString("base64")}`;
}

View File

@@ -0,0 +1,292 @@
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
type ConfluenceClientOptions = {
config: AtlassianConfig;
fetchImpl?: FetchLike;
};
type SearchInput = {
query: string;
maxResults: number;
startAt: number;
};
type CreateInput = {
space?: string;
title: string;
body: string;
dryRun?: boolean;
};
type UpdateInput = {
pageId: string;
title: string;
body: string;
dryRun?: boolean;
};
type CommentInput = {
pageId: string;
body: string;
dryRun?: boolean;
};
type PageSummary = {
id: string;
title: string;
type: string;
status?: string;
spaceId?: string;
url?: string;
};
function buildUrl(baseUrl: string, path: string) {
return new URL(path, `${baseUrl}/`).toString();
}
function normalizePage(baseUrl: string, page: Record<string, unknown>, excerpt?: string) {
const links = (page._links ?? {}) as Record<string, unknown>;
return {
id: String(page.id ?? ""),
title: String(page.title ?? ""),
type: String(page.type ?? "page"),
...(page.status ? { status: String(page.status) } : {}),
...(page.spaceId ? { spaceId: String(page.spaceId) } : {}),
...(excerpt ? { excerpt } : {}),
...(links.webui ? { url: `${baseUrl}${String(links.webui)}` } : {}),
};
}
export function createConfluenceClient(options: ConfluenceClientOptions) {
const config = options.config;
async function getPageForUpdate(pageId: string) {
return (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${pageId}?body-format=storage`),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
}
return {
async searchPages(input: SearchInput): Promise<CommandOutput<unknown>> {
const url = new URL("/wiki/rest/api/search", `${config.confluenceBaseUrl}/`);
url.searchParams.set("cql", input.query);
url.searchParams.set("limit", String(input.maxResults));
url.searchParams.set("start", String(input.startAt));
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: url.toString(),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const results = Array.isArray(raw.results) ? raw.results : [];
return {
ok: true,
data: {
pages: results.map((entry) => {
const result = entry as Record<string, unknown>;
return normalizePage(
config.baseUrl,
(result.content ?? {}) as Record<string, unknown>,
result.excerpt ? String(result.excerpt) : undefined,
);
}),
startAt: Number(raw.start ?? input.startAt),
maxResults: Number(raw.limit ?? input.maxResults),
total: Number(raw.totalSize ?? raw.size ?? results.length),
},
};
},
async getPage(pageId: string): Promise<CommandOutput<unknown>> {
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${pageId}?body-format=storage`),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const body = ((raw.body ?? {}) as Record<string, unknown>).storage as Record<string, unknown> | undefined;
return {
ok: true,
data: {
page: {
...normalizePage(config.baseUrl, raw),
version: Number((((raw.version ?? {}) as Record<string, unknown>).number ?? 0)),
body: body?.value ? String(body.value) : "",
},
},
raw,
};
},
async listChildren(pageId: string, maxResults: number, startAt: number): Promise<CommandOutput<unknown>> {
const url = new URL(`/wiki/api/v2/pages/${pageId}/direct-children`, `${config.confluenceBaseUrl}/`);
url.searchParams.set("limit", String(maxResults));
url.searchParams.set("cursor", String(startAt));
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: url.toString(),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const results = Array.isArray(raw.results) ? raw.results : [];
const links = (raw._links ?? {}) as Record<string, unknown>;
return {
ok: true,
data: {
pages: results.map((page) => normalizePage(config.baseUrl, page as Record<string, unknown>)),
nextCursor: links.next ? String(links.next) : null,
},
};
},
async createPage(input: CreateInput): Promise<CommandOutput<unknown>> {
const spaceId = input.space || config.defaultSpace;
if (!spaceId) {
throw new Error("conf-create requires --space or ATLASSIAN_DEFAULT_SPACE");
}
const request = {
method: "POST" as const,
url: buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/pages"),
body: {
spaceId,
title: input.title,
status: "current",
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
});
return {
ok: true,
data: raw,
};
},
async updatePage(input: UpdateInput): Promise<CommandOutput<unknown>> {
const currentPage = await getPageForUpdate(input.pageId);
const version = (((currentPage.version ?? {}) as Record<string, unknown>).number ?? 0) as number;
const spaceId = String(currentPage.spaceId ?? "");
const request = {
method: "PUT" as const,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${input.pageId}`),
body: {
id: input.pageId,
status: String(currentPage.status ?? "current"),
title: input.title,
spaceId,
version: {
number: Number(version) + 1,
},
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
handleResponseError(response) {
if (response.status === 409) {
return new Error(`Confluence update conflict: page ${input.pageId} was updated by someone else`);
}
return undefined;
},
});
return {
ok: true,
data: raw,
};
},
async commentPage(input: CommentInput): Promise<CommandOutput<unknown>> {
const request = {
method: "POST" as const,
url: buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/footer-comments"),
body: {
pageId: input.pageId,
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
});
return {
ok: true,
data: raw,
};
},
};
}

View File

@@ -0,0 +1,13 @@
import { readFile } from "node:fs/promises";
import path from "node:path";
export async function readWorkspaceFile(filePath: string, cwd: string) {
const resolved = path.resolve(cwd, filePath);
const relative = path.relative(cwd, resolved);
if (relative.startsWith("..") || path.isAbsolute(relative)) {
throw new Error(`--body-file must stay within the active workspace: ${filePath}`);
}
return readFile(resolved, "utf8");
}

View File

@@ -0,0 +1,69 @@
import { createJsonHeaders, createStatusError } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
type ProductHealth = {
ok: boolean;
status?: number;
message?: string;
};
function buildUrl(baseUrl: string, path: string) {
return new URL(path, `${baseUrl}/`).toString();
}
export async function runHealthCheck(
config: AtlassianConfig,
fetchImpl: FetchLike | undefined,
): Promise<CommandOutput<unknown>> {
const client = fetchImpl ?? globalThis.fetch;
if (!client) {
throw new Error("Fetch API is not available in this runtime");
}
async function probe(product: "Jira" | "Confluence", url: string): Promise<ProductHealth> {
try {
const response = await client(url, {
method: "GET",
headers: createJsonHeaders(config, false),
});
if (!response.ok) {
const error = createStatusError(`${product} health check failed`, response);
return {
ok: false,
status: response.status,
message: error.message,
};
}
return {
ok: true,
status: response.status,
};
} catch (error: unknown) {
return {
ok: false,
message: error instanceof Error ? error.message : String(error),
};
}
}
const jira = await probe("Jira", buildUrl(config.jiraBaseUrl, "/rest/api/3/myself"));
const confluence = await probe("Confluence", buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/spaces?limit=1"));
return {
ok: jira.ok && confluence.ok,
data: {
baseUrl: config.baseUrl,
jiraBaseUrl: config.jiraBaseUrl,
confluenceBaseUrl: config.confluenceBaseUrl,
defaultProject: config.defaultProject,
defaultSpace: config.defaultSpace,
products: {
jira,
confluence,
},
},
};
}

View File

@@ -0,0 +1,86 @@
import { createBasicAuthHeader } from "./config.js";
import type { AtlassianConfig, FetchLike } from "./types.js";
export type HttpMethod = "GET" | "POST" | "PUT";
export function createJsonHeaders(config: AtlassianConfig, includeJsonBody: boolean) {
const headers: Array<[string, string]> = [
["Accept", "application/json"],
["Authorization", createBasicAuthHeader(config)],
];
if (includeJsonBody) {
headers.push(["Content-Type", "application/json"]);
}
return headers;
}
export async function parseResponse(response: Response) {
if (response.status === 204) {
return null;
}
const contentType = response.headers.get("content-type") ?? "";
if (contentType.includes("application/json")) {
try {
return await response.json();
} catch {
throw new Error("Malformed JSON response from Atlassian API");
}
}
return response.text();
}
export function createStatusError(errorPrefix: string, response: Response) {
const base = `${errorPrefix}: ${response.status} ${response.statusText}`;
switch (response.status) {
case 401:
return new Error(`${base} - check ATLASSIAN_EMAIL and ATLASSIAN_API_TOKEN`);
case 403:
return new Error(`${base} - verify product permissions for this account`);
case 404:
return new Error(`${base} - verify the resource identifier or API path`);
case 429:
return new Error(`${base} - retry later or reduce request rate`);
default:
return new Error(base);
}
}
export async function sendJsonRequest(options: {
config: AtlassianConfig;
fetchImpl?: FetchLike;
url: string;
method: HttpMethod;
body?: unknown;
errorPrefix: string;
handleResponseError?: (response: Response) => Error | undefined;
}) {
const fetchImpl = options.fetchImpl ?? globalThis.fetch;
if (!fetchImpl) {
throw new Error("Fetch API is not available in this runtime");
}
const response = await fetchImpl(options.url, {
method: options.method,
headers: createJsonHeaders(options.config, options.body !== undefined),
...(options.body === undefined ? {} : { body: JSON.stringify(options.body) }),
});
if (!response.ok) {
const customError = options.handleResponseError?.(response);
if (customError) {
throw customError;
}
throw createStatusError(options.errorPrefix, response);
}
return parseResponse(response);
}

View File

@@ -0,0 +1,264 @@
import { markdownToAdf } from "./adf.js";
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike, JiraIssueSummary } from "./types.js";
const ISSUE_FIELDS = ["summary", "issuetype", "status", "assignee", "created", "updated"] as const;
type JiraClientOptions = {
config: AtlassianConfig;
fetchImpl?: FetchLike;
};
type SearchInput = {
jql: string;
maxResults: number;
startAt: number;
};
type CreateInput = {
project?: string;
type: string;
summary: string;
description?: string;
dryRun?: boolean;
};
type UpdateInput = {
issue: string;
summary?: string;
description?: string;
dryRun?: boolean;
};
type CommentInput = {
issue: string;
body: string;
dryRun?: boolean;
};
type TransitionInput = {
issue: string;
transition: string;
dryRun?: boolean;
};
function normalizeIssue(config: AtlassianConfig, issue: Record<string, unknown>): JiraIssueSummary {
const fields = (issue.fields ?? {}) as Record<string, unknown>;
const issueType = (fields.issuetype ?? {}) as Record<string, unknown>;
const status = (fields.status ?? {}) as Record<string, unknown>;
const assignee = (fields.assignee ?? {}) as Record<string, unknown>;
return {
key: String(issue.key ?? ""),
summary: String(fields.summary ?? ""),
issueType: String(issueType.name ?? ""),
status: String(status.name ?? ""),
assignee: assignee.displayName ? String(assignee.displayName) : undefined,
created: String(fields.created ?? ""),
updated: String(fields.updated ?? ""),
url: `${config.baseUrl}/browse/${issue.key ?? ""}`,
};
}
function createRequest(config: AtlassianConfig, method: "GET" | "POST" | "PUT", path: string, body?: unknown) {
const url = new URL(path, `${config.jiraBaseUrl}/`);
return {
method,
url: url.toString(),
...(body === undefined ? {} : { body }),
};
}
export function createJiraClient(options: JiraClientOptions) {
const fetchImpl = options.fetchImpl ?? globalThis.fetch;
if (!fetchImpl) {
throw new Error("Fetch API is not available in this runtime");
}
async function send(method: "GET" | "POST" | "PUT", path: string, body?: unknown) {
const request = createRequest(options.config, method, path, body);
return sendJsonRequest({
config: options.config,
fetchImpl,
url: request.url,
method,
body,
errorPrefix: "Jira request failed",
});
}
return {
async searchIssues(input: SearchInput): Promise<CommandOutput<unknown>> {
const raw = (await send("POST", "/rest/api/3/search", {
jql: input.jql,
maxResults: input.maxResults,
startAt: input.startAt,
fields: [...ISSUE_FIELDS],
})) as Record<string, unknown>;
const issues = Array.isArray(raw.issues) ? raw.issues : [];
return {
ok: true,
data: {
issues: issues.map((issue) => normalizeIssue(options.config, issue as Record<string, unknown>)),
startAt: Number(raw.startAt ?? input.startAt),
maxResults: Number(raw.maxResults ?? input.maxResults),
total: Number(raw.total ?? issues.length),
},
};
},
async getIssue(issue: string): Promise<CommandOutput<unknown>> {
const url = new URL(`/rest/api/3/issue/${issue}`, `${options.config.jiraBaseUrl}/`);
url.searchParams.set("fields", ISSUE_FIELDS.join(","));
const raw = (await send("GET", `${url.pathname}${url.search}`)) as Record<string, unknown>;
return {
ok: true,
data: {
issue: normalizeIssue(options.config, raw),
},
raw,
};
},
async getTransitions(issue: string): Promise<CommandOutput<unknown>> {
const raw = (await send(
"GET",
`/rest/api/3/issue/${issue}/transitions`,
)) as { transitions?: Array<Record<string, unknown>> };
return {
ok: true,
data: {
transitions: (raw.transitions ?? []).map((transition) => ({
id: String(transition.id ?? ""),
name: String(transition.name ?? ""),
toStatus: String(((transition.to ?? {}) as Record<string, unknown>).name ?? ""),
hasScreen: Boolean(transition.hasScreen),
})),
},
};
},
async createIssue(input: CreateInput): Promise<CommandOutput<unknown>> {
const project = input.project || options.config.defaultProject;
if (!project) {
throw new Error("jira-create requires --project or ATLASSIAN_DEFAULT_PROJECT");
}
const request = createRequest(options.config, "POST", "/rest/api/3/issue", {
fields: {
project: { key: project },
issuetype: { name: input.type },
summary: input.summary,
...(input.description ? { description: markdownToAdf(input.description) } : {}),
},
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await send("POST", "/rest/api/3/issue", request.body);
return { ok: true, data: raw };
},
async updateIssue(input: UpdateInput): Promise<CommandOutput<unknown>> {
const fields: Record<string, unknown> = {};
if (input.summary) {
fields.summary = input.summary;
}
if (input.description) {
fields.description = markdownToAdf(input.description);
}
if (Object.keys(fields).length === 0) {
throw new Error("jira-update requires --summary and/or --description-file");
}
const request = createRequest(options.config, "PUT", `/rest/api/3/issue/${input.issue}`, {
fields,
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
await send("PUT", `/rest/api/3/issue/${input.issue}`, request.body);
return {
ok: true,
data: {
issue: input.issue,
updated: true,
},
};
},
async commentIssue(input: CommentInput): Promise<CommandOutput<unknown>> {
const request = createRequest(options.config, "POST", `/rest/api/3/issue/${input.issue}/comment`, {
body: markdownToAdf(input.body),
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await send("POST", `/rest/api/3/issue/${input.issue}/comment`, request.body);
return {
ok: true,
data: raw,
};
},
async transitionIssue(input: TransitionInput): Promise<CommandOutput<unknown>> {
const request = createRequest(
options.config,
"POST",
`/rest/api/3/issue/${input.issue}/transitions`,
{
transition: {
id: input.transition,
},
},
);
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
await send("POST", `/rest/api/3/issue/${input.issue}/transitions`, request.body);
return {
ok: true,
data: {
issue: input.issue,
transitioned: true,
transition: input.transition,
},
};
},
};
}

View File

@@ -0,0 +1,44 @@
import type { CommandOutput, OutputFormat, Writer } from "./types.js";
function renderText(payload: CommandOutput<unknown>) {
const data = payload.data as Record<string, unknown>;
if (Array.isArray(data?.issues)) {
return data.issues
.map((issue) => {
const item = issue as Record<string, string>;
return `${item.key} [${item.status}] ${item.issueType} - ${item.summary}`;
})
.join("\n");
}
if (data?.issue && typeof data.issue === "object") {
const issue = data.issue as Record<string, string>;
return [
issue.key,
`${issue.issueType} | ${issue.status}`,
issue.summary,
issue.url,
].join("\n");
}
if (Array.isArray(data?.transitions)) {
return data.transitions
.map((transition) => {
const item = transition as Record<string, string>;
return `${item.id} ${item.name} -> ${item.toStatus}`;
})
.join("\n");
}
return JSON.stringify(payload, null, 2);
}
export function writeOutput(
writer: Writer,
payload: CommandOutput<unknown>,
format: OutputFormat = "json",
) {
const body = format === "text" ? renderText(payload) : JSON.stringify(payload, null, 2);
writer.write(`${body}\n`);
}

View File

@@ -0,0 +1,85 @@
import { readWorkspaceFile } from "./files.js";
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
const JIRA_ALLOWED_PREFIXES = ["/rest/api/3/"] as const;
const CONFLUENCE_ALLOWED_PREFIXES = ["/wiki/api/v2/", "/wiki/rest/api/"] as const;
type RawInput = {
product: "jira" | "confluence";
method: string;
path: string;
bodyFile?: string;
cwd: string;
dryRun?: boolean;
};
function getAllowedPrefixes(product: RawInput["product"]) {
return product === "jira" ? JIRA_ALLOWED_PREFIXES : CONFLUENCE_ALLOWED_PREFIXES;
}
function buildUrl(config: AtlassianConfig, product: RawInput["product"], path: string) {
const baseUrl = product === "jira" ? config.jiraBaseUrl : config.confluenceBaseUrl;
return new URL(path, `${baseUrl}/`).toString();
}
function validateMethod(method: string): asserts method is "GET" | "POST" | "PUT" {
if (!["GET", "POST", "PUT"].includes(method)) {
throw new Error("raw only allows GET, POST, and PUT");
}
}
function validatePath(product: RawInput["product"], path: string) {
const allowedPrefixes = getAllowedPrefixes(product);
if (!allowedPrefixes.some((prefix) => path.startsWith(prefix))) {
throw new Error(`raw path is not allowed for ${product}: ${path}`);
}
}
async function readRawBody(bodyFile: string | undefined, cwd: string) {
if (!bodyFile) {
return undefined;
}
const contents = await readWorkspaceFile(bodyFile, cwd);
return JSON.parse(contents) as unknown;
}
export async function runRawCommand(
config: AtlassianConfig,
fetchImpl: FetchLike | undefined,
input: RawInput,
): Promise<CommandOutput<unknown>> {
validateMethod(input.method);
validatePath(input.product, input.path);
const body = await readRawBody(input.bodyFile, input.cwd);
const request = {
method: input.method,
url: buildUrl(config, input.product, input.path),
...(body === undefined ? {} : { body }),
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const data = await sendJsonRequest({
config,
fetchImpl,
url: request.url,
method: input.method,
body,
errorPrefix: "Raw request failed",
});
return {
ok: true,
data,
};
}

View File

@@ -0,0 +1,35 @@
export type AtlassianConfig = {
baseUrl: string;
jiraBaseUrl: string;
confluenceBaseUrl: string;
email: string;
apiToken: string;
defaultProject?: string;
defaultSpace?: string;
};
export type CommandOutput<T> = {
ok: boolean;
data: T;
dryRun?: boolean;
raw?: unknown;
};
export type JiraIssueSummary = {
key: string;
summary: string;
issueType: string;
status: string;
assignee?: string;
created: string;
updated: string;
url: string;
};
export type Writer = {
write(chunk: string | Uint8Array): unknown;
};
export type FetchLike = typeof fetch;
export type OutputFormat = "json" | "text";

View File

@@ -0,0 +1,15 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": true,
"skipLibCheck": true,
"resolveJsonModule": true,
"types": ["node"],
"outDir": "dist"
},
"include": ["src/**/*.ts", "scripts/**/*.ts", "tests/**/*.ts"]
}

View File

@@ -0,0 +1,81 @@
---
name: atlassian
description: Interact with Atlassian Cloud Jira and Confluence through a portable task-oriented CLI for search, issue/page edits, comments, transitions, and bounded raw requests.
---
# Atlassian (Codex)
Portable Atlassian workflows for Codex using a shared TypeScript CLI.
## Requirements
- Node.js 20+
- `pnpm`
- Atlassian Cloud account access
- `ATLASSIAN_BASE_URL`
- `ATLASSIAN_EMAIL`
- `ATLASSIAN_API_TOKEN`
The `ATLASSIAN_*` values may come from the shell environment or a `.env` file in `~/.codex/skills/atlassian/scripts`.
## First-Time Setup
```bash
mkdir -p ~/.codex/skills/atlassian
cp -R skills/atlassian/codex/* ~/.codex/skills/atlassian/
cd ~/.codex/skills/atlassian/scripts
pnpm install
```
## Prerequisite Check (MANDATORY)
Run before using the skill:
```bash
cd ~/.codex/skills/atlassian/scripts
node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')"
node -e 'require("dotenv").config({ path: ".env" }); const required = ["ATLASSIAN_BASE_URL", "ATLASSIAN_EMAIL", "ATLASSIAN_API_TOKEN"]; const missing = required.filter((key) => !(process.env[key] || "").trim()); if (missing.length) { console.error("Missing required Atlassian config: " + missing.join(", ")); process.exit(1); } console.log("OK: Atlassian config present")'
pnpm atlassian health
```
If any check fails, stop and return:
`Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Configure ATLASSIAN_* in the shell environment or scripts/.env, then retry.`
## Supported Commands
- `pnpm atlassian health`
- `pnpm atlassian jira-search --jql "..."`
- `pnpm atlassian jira-get --issue ABC-123`
- `pnpm atlassian jira-create ... [--dry-run]`
- `pnpm atlassian jira-update ... [--dry-run]`
- `pnpm atlassian jira-comment ... [--dry-run]`
- `pnpm atlassian jira-transitions --issue ABC-123`
- `pnpm atlassian jira-transition ... [--dry-run]`
- `pnpm atlassian conf-search --query "..."`
- `pnpm atlassian conf-get --page 12345`
- `pnpm atlassian conf-create ... [--dry-run]`
- `pnpm atlassian conf-update ... [--dry-run]`
- `pnpm atlassian conf-comment ... [--dry-run]`
- `pnpm atlassian conf-children --page 12345`
- `pnpm atlassian raw --product jira|confluence --method GET|POST|PUT --path ...`
## Usage Examples
- `pnpm atlassian jira-search --jql "project = ENG ORDER BY updated DESC" --max-results 10`
- `pnpm atlassian conf-update --page 12345 --title "Runbook" --body-file page.storage.html --dry-run`
- `pnpm atlassian raw --product confluence --method POST --path "/wiki/api/v2/pages" --body-file page.json --dry-run`
## Safety Rules
- Default output is JSON; prefer that for agent workflows.
- Use `--dry-run` before any mutating command unless the user clearly wants the write to happen immediately.
- Jira long-text fields are converted to ADF locally.
- Confluence page bodies are storage-first in v1.
- `--body-file` must point to workspace-scoped files only; do not use arbitrary system paths.
- `raw` is for explicit edge cases only and does not allow `DELETE`.
## Notes
- Atlassian Cloud is the only first-class target in v1.
- This skill exists so Codex, Claude Code, Cursor Agent, and OpenCode can share the same command surface even when MCP access differs.

View File

@@ -0,0 +1,20 @@
{
"name": "atlassian-skill-scripts",
"version": "1.0.0",
"description": "Shared runtime for the Atlassian skill",
"type": "module",
"scripts": {
"atlassian": "tsx src/cli.ts",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"commander": "^13.1.0",
"dotenv": "^16.4.7"
},
"devDependencies": {
"@types/node": "^24.3.0",
"tsx": "^4.20.5",
"typescript": "^5.9.2"
},
"packageManager": "pnpm@10.18.1+sha512.77a884a165cbba2d8d1c19e3b4880eee6d2fcabd0d879121e282196b80042351d5eb3ca0935fa599da1dc51265cc68816ad2bddd2a2de5ea9fdf92adbec7cd34"
}

View File

@@ -0,0 +1,361 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
importers:
.:
dependencies:
commander:
specifier: ^13.1.0
version: 13.1.0
dotenv:
specifier: ^16.4.7
version: 16.6.1
devDependencies:
'@types/node':
specifier: ^24.3.0
version: 24.12.0
tsx:
specifier: ^4.20.5
version: 4.21.0
typescript:
specifier: ^5.9.2
version: 5.9.3
packages:
'@esbuild/aix-ppc64@0.27.3':
resolution: {integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [aix]
'@esbuild/android-arm64@0.27.3':
resolution: {integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [android]
'@esbuild/android-arm@0.27.3':
resolution: {integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==}
engines: {node: '>=18'}
cpu: [arm]
os: [android]
'@esbuild/android-x64@0.27.3':
resolution: {integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [android]
'@esbuild/darwin-arm64@0.27.3':
resolution: {integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [darwin]
'@esbuild/darwin-x64@0.27.3':
resolution: {integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==}
engines: {node: '>=18'}
cpu: [x64]
os: [darwin]
'@esbuild/freebsd-arm64@0.27.3':
resolution: {integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==}
engines: {node: '>=18'}
cpu: [arm64]
os: [freebsd]
'@esbuild/freebsd-x64@0.27.3':
resolution: {integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==}
engines: {node: '>=18'}
cpu: [x64]
os: [freebsd]
'@esbuild/linux-arm64@0.27.3':
resolution: {integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [linux]
'@esbuild/linux-arm@0.27.3':
resolution: {integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==}
engines: {node: '>=18'}
cpu: [arm]
os: [linux]
'@esbuild/linux-ia32@0.27.3':
resolution: {integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==}
engines: {node: '>=18'}
cpu: [ia32]
os: [linux]
'@esbuild/linux-loong64@0.27.3':
resolution: {integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==}
engines: {node: '>=18'}
cpu: [loong64]
os: [linux]
'@esbuild/linux-mips64el@0.27.3':
resolution: {integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==}
engines: {node: '>=18'}
cpu: [mips64el]
os: [linux]
'@esbuild/linux-ppc64@0.27.3':
resolution: {integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [linux]
'@esbuild/linux-riscv64@0.27.3':
resolution: {integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==}
engines: {node: '>=18'}
cpu: [riscv64]
os: [linux]
'@esbuild/linux-s390x@0.27.3':
resolution: {integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==}
engines: {node: '>=18'}
cpu: [s390x]
os: [linux]
'@esbuild/linux-x64@0.27.3':
resolution: {integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==}
engines: {node: '>=18'}
cpu: [x64]
os: [linux]
'@esbuild/netbsd-arm64@0.27.3':
resolution: {integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [netbsd]
'@esbuild/netbsd-x64@0.27.3':
resolution: {integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==}
engines: {node: '>=18'}
cpu: [x64]
os: [netbsd]
'@esbuild/openbsd-arm64@0.27.3':
resolution: {integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openbsd]
'@esbuild/openbsd-x64@0.27.3':
resolution: {integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [openbsd]
'@esbuild/openharmony-arm64@0.27.3':
resolution: {integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openharmony]
'@esbuild/sunos-x64@0.27.3':
resolution: {integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==}
engines: {node: '>=18'}
cpu: [x64]
os: [sunos]
'@esbuild/win32-arm64@0.27.3':
resolution: {integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [win32]
'@esbuild/win32-ia32@0.27.3':
resolution: {integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==}
engines: {node: '>=18'}
cpu: [ia32]
os: [win32]
'@esbuild/win32-x64@0.27.3':
resolution: {integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==}
engines: {node: '>=18'}
cpu: [x64]
os: [win32]
'@types/node@24.12.0':
resolution: {integrity: sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==}
commander@13.1.0:
resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==}
engines: {node: '>=18'}
dotenv@16.6.1:
resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==}
engines: {node: '>=12'}
esbuild@0.27.3:
resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==}
engines: {node: '>=18'}
hasBin: true
fsevents@2.3.3:
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
get-tsconfig@4.13.6:
resolution: {integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==}
resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
tsx@4.21.0:
resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==}
engines: {node: '>=18.0.0'}
hasBin: true
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
hasBin: true
undici-types@7.16.0:
resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==}
snapshots:
'@esbuild/aix-ppc64@0.27.3':
optional: true
'@esbuild/android-arm64@0.27.3':
optional: true
'@esbuild/android-arm@0.27.3':
optional: true
'@esbuild/android-x64@0.27.3':
optional: true
'@esbuild/darwin-arm64@0.27.3':
optional: true
'@esbuild/darwin-x64@0.27.3':
optional: true
'@esbuild/freebsd-arm64@0.27.3':
optional: true
'@esbuild/freebsd-x64@0.27.3':
optional: true
'@esbuild/linux-arm64@0.27.3':
optional: true
'@esbuild/linux-arm@0.27.3':
optional: true
'@esbuild/linux-ia32@0.27.3':
optional: true
'@esbuild/linux-loong64@0.27.3':
optional: true
'@esbuild/linux-mips64el@0.27.3':
optional: true
'@esbuild/linux-ppc64@0.27.3':
optional: true
'@esbuild/linux-riscv64@0.27.3':
optional: true
'@esbuild/linux-s390x@0.27.3':
optional: true
'@esbuild/linux-x64@0.27.3':
optional: true
'@esbuild/netbsd-arm64@0.27.3':
optional: true
'@esbuild/netbsd-x64@0.27.3':
optional: true
'@esbuild/openbsd-arm64@0.27.3':
optional: true
'@esbuild/openbsd-x64@0.27.3':
optional: true
'@esbuild/openharmony-arm64@0.27.3':
optional: true
'@esbuild/sunos-x64@0.27.3':
optional: true
'@esbuild/win32-arm64@0.27.3':
optional: true
'@esbuild/win32-ia32@0.27.3':
optional: true
'@esbuild/win32-x64@0.27.3':
optional: true
'@types/node@24.12.0':
dependencies:
undici-types: 7.16.0
commander@13.1.0: {}
dotenv@16.6.1: {}
esbuild@0.27.3:
optionalDependencies:
'@esbuild/aix-ppc64': 0.27.3
'@esbuild/android-arm': 0.27.3
'@esbuild/android-arm64': 0.27.3
'@esbuild/android-x64': 0.27.3
'@esbuild/darwin-arm64': 0.27.3
'@esbuild/darwin-x64': 0.27.3
'@esbuild/freebsd-arm64': 0.27.3
'@esbuild/freebsd-x64': 0.27.3
'@esbuild/linux-arm': 0.27.3
'@esbuild/linux-arm64': 0.27.3
'@esbuild/linux-ia32': 0.27.3
'@esbuild/linux-loong64': 0.27.3
'@esbuild/linux-mips64el': 0.27.3
'@esbuild/linux-ppc64': 0.27.3
'@esbuild/linux-riscv64': 0.27.3
'@esbuild/linux-s390x': 0.27.3
'@esbuild/linux-x64': 0.27.3
'@esbuild/netbsd-arm64': 0.27.3
'@esbuild/netbsd-x64': 0.27.3
'@esbuild/openbsd-arm64': 0.27.3
'@esbuild/openbsd-x64': 0.27.3
'@esbuild/openharmony-arm64': 0.27.3
'@esbuild/sunos-x64': 0.27.3
'@esbuild/win32-arm64': 0.27.3
'@esbuild/win32-ia32': 0.27.3
'@esbuild/win32-x64': 0.27.3
fsevents@2.3.3:
optional: true
get-tsconfig@4.13.6:
dependencies:
resolve-pkg-maps: 1.0.0
resolve-pkg-maps@1.0.0: {}
tsx@4.21.0:
dependencies:
esbuild: 0.27.3
get-tsconfig: 4.13.6
optionalDependencies:
fsevents: 2.3.3
typescript@5.9.3: {}
undici-types@7.16.0: {}

View File

@@ -0,0 +1,92 @@
const TEXT_NODE = "text";
function textNode(text: string) {
return {
type: TEXT_NODE,
text,
};
}
function paragraphNode(lines: string[]) {
const content: Array<{ type: string; text?: string }> = [];
lines.forEach((line, index) => {
if (index > 0) {
content.push({ type: "hardBreak" });
}
if (line.length > 0) {
content.push(textNode(line));
}
});
return {
type: "paragraph",
...(content.length > 0 ? { content } : {}),
};
}
export function markdownToAdf(input: string) {
const lines = input.replace(/\r\n/g, "\n").split("\n");
const content: Array<Record<string, unknown>> = [];
let index = 0;
while (index < lines.length) {
const current = lines[index]?.trimEnd() ?? "";
if (current.trim().length === 0) {
index += 1;
continue;
}
const heading = current.match(/^(#{1,6})\s+(.*)$/);
if (heading) {
content.push({
type: "heading",
attrs: { level: heading[1].length },
content: [textNode(heading[2])],
});
index += 1;
continue;
}
if (/^[-*]\s+/.test(current)) {
const items: Array<Record<string, unknown>> = [];
while (index < lines.length && /^[-*]\s+/.test(lines[index] ?? "")) {
items.push({
type: "listItem",
content: [
{
type: "paragraph",
content: [textNode((lines[index] ?? "").replace(/^[-*]\s+/, ""))],
},
],
});
index += 1;
}
content.push({
type: "bulletList",
content: items,
});
continue;
}
const paragraphLines: string[] = [];
while (index < lines.length && (lines[index]?.trim().length ?? 0) > 0) {
paragraphLines.push(lines[index] ?? "");
index += 1;
}
content.push(paragraphNode(paragraphLines));
}
return {
type: "doc",
version: 1,
content,
};
}

View File

@@ -0,0 +1,332 @@
import process from "node:process";
import { pathToFileURL } from "node:url";
import { Command } from "commander";
import { createConfluenceClient } from "./confluence.js";
import { loadConfig } from "./config.js";
import { readWorkspaceFile } from "./files.js";
import { runHealthCheck } from "./health.js";
import { createJiraClient } from "./jira.js";
import { writeOutput } from "./output.js";
import { runRawCommand } from "./raw.js";
import type { FetchLike, OutputFormat, Writer } from "./types.js";
type CliContext = {
cwd?: string;
env?: NodeJS.ProcessEnv;
fetchImpl?: FetchLike;
stdout?: Writer;
stderr?: Writer;
};
function resolveFormat(format: string | undefined): OutputFormat {
return format === "text" ? "text" : "json";
}
function createRuntime(context: CliContext) {
const cwd = context.cwd ?? process.cwd();
const env = context.env ?? process.env;
const stdout = context.stdout ?? process.stdout;
const stderr = context.stderr ?? process.stderr;
let configCache: ReturnType<typeof loadConfig> | undefined;
let jiraCache: ReturnType<typeof createJiraClient> | undefined;
let confluenceCache: ReturnType<typeof createConfluenceClient> | undefined;
function getConfig() {
configCache ??= loadConfig(env, { cwd });
return configCache;
}
function getJiraClient() {
jiraCache ??= createJiraClient({
config: getConfig(),
fetchImpl: context.fetchImpl,
});
return jiraCache;
}
function getConfluenceClient() {
confluenceCache ??= createConfluenceClient({
config: getConfig(),
fetchImpl: context.fetchImpl,
});
return confluenceCache;
}
async function readBodyFile(filePath: string | undefined) {
if (!filePath) {
return undefined;
}
return readWorkspaceFile(filePath, cwd);
}
return {
cwd,
stdout,
stderr,
readBodyFile,
getConfig,
getJiraClient,
getConfluenceClient,
fetchImpl: context.fetchImpl,
};
}
export function buildProgram(context: CliContext = {}) {
const runtime = createRuntime(context);
const program = new Command()
.name("atlassian")
.description("Portable Atlassian CLI for multi-agent skills")
.version("0.1.0");
program
.command("health")
.description("Validate configuration and Atlassian connectivity")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runHealthCheck(runtime.getConfig(), runtime.fetchImpl);
writeOutput(
runtime.stdout,
payload,
resolveFormat(options.format),
);
});
program
.command("conf-search")
.requiredOption("--query <query>", "CQL search query")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Result offset", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().searchPages({
query: options.query,
maxResults: Number(options.maxResults),
startAt: Number(options.startAt),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-get")
.requiredOption("--page <page>", "Confluence page ID")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().getPage(options.page);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-create")
.requiredOption("--title <title>", "Confluence page title")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--space <space>", "Confluence space ID")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().createPage({
space: options.space,
title: options.title,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-update")
.requiredOption("--page <page>", "Confluence page ID")
.requiredOption("--title <title>", "Confluence page title")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().updatePage({
pageId: options.page,
title: options.title,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-comment")
.requiredOption("--page <page>", "Confluence page ID")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().commentPage({
pageId: options.page,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-children")
.requiredOption("--page <page>", "Confluence page ID")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Cursor/start token", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().listChildren(
options.page,
Number(options.maxResults),
Number(options.startAt),
);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("raw")
.requiredOption("--product <product>", "jira or confluence")
.requiredOption("--method <method>", "GET, POST, or PUT")
.requiredOption("--path <path>", "Validated API path")
.option("--body-file <path>", "Workspace-relative JSON file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runRawCommand(runtime.getConfig(), runtime.fetchImpl, {
product: options.product,
method: String(options.method).toUpperCase(),
path: options.path,
bodyFile: options.bodyFile,
cwd: runtime.cwd,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-search")
.requiredOption("--jql <jql>", "JQL expression to execute")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Result offset", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().searchIssues({
jql: options.jql,
maxResults: Number(options.maxResults),
startAt: Number(options.startAt),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-get")
.requiredOption("--issue <issue>", "Issue key")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().getIssue(options.issue);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-create")
.requiredOption("--type <type>", "Issue type name")
.requiredOption("--summary <summary>", "Issue summary")
.option("--project <project>", "Project key")
.option("--description-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().createIssue({
project: options.project,
type: options.type,
summary: options.summary,
description: await runtime.readBodyFile(options.descriptionFile),
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-update")
.requiredOption("--issue <issue>", "Issue key")
.option("--summary <summary>", "Updated summary")
.option("--description-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().updateIssue({
issue: options.issue,
summary: options.summary,
description: await runtime.readBodyFile(options.descriptionFile),
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-comment")
.requiredOption("--issue <issue>", "Issue key")
.requiredOption("--body-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().commentIssue({
issue: options.issue,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-transitions")
.requiredOption("--issue <issue>", "Issue key")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().getTransitions(options.issue);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-transition")
.requiredOption("--issue <issue>", "Issue key")
.requiredOption("--transition <transition>", "Transition ID")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().transitionIssue({
issue: options.issue,
transition: options.transition,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
return program;
}
export async function runCli(argv = process.argv, context: CliContext = {}) {
const program = buildProgram(context);
await program.parseAsync(argv);
}
const isDirectExecution =
Boolean(process.argv[1]) && import.meta.url === pathToFileURL(process.argv[1]).href;
if (isDirectExecution) {
runCli().catch((error: unknown) => {
const message = error instanceof Error ? error.message : String(error);
process.stderr.write(`${message}\n`);
process.exitCode = 1;
});
}

View File

@@ -0,0 +1,52 @@
import path from "node:path";
import { config as loadDotEnv } from "dotenv";
import type { AtlassianConfig } from "./types.js";
function normalizeBaseUrl(value: string) {
return value.replace(/\/+$/, "");
}
function readRequired(env: NodeJS.ProcessEnv, key: string) {
const value = env[key]?.trim();
if (!value) {
throw new Error(`Missing required environment variable: ${key}`);
}
return value;
}
export function loadConfig(
env: NodeJS.ProcessEnv = process.env,
options?: {
cwd?: string;
},
): AtlassianConfig {
loadDotEnv({
path: path.resolve(options?.cwd ?? process.cwd(), ".env"),
processEnv: env as Record<string, string>,
override: false,
});
const baseUrl = normalizeBaseUrl(readRequired(env, "ATLASSIAN_BASE_URL"));
return {
baseUrl,
jiraBaseUrl: normalizeBaseUrl(env.ATLASSIAN_JIRA_BASE_URL?.trim() || baseUrl),
confluenceBaseUrl: normalizeBaseUrl(env.ATLASSIAN_CONFLUENCE_BASE_URL?.trim() || baseUrl),
email: readRequired(env, "ATLASSIAN_EMAIL"),
apiToken: readRequired(env, "ATLASSIAN_API_TOKEN"),
defaultProject: env.ATLASSIAN_DEFAULT_PROJECT?.trim() || undefined,
defaultSpace: env.ATLASSIAN_DEFAULT_SPACE?.trim() || undefined,
};
}
export function createBasicAuthHeader(config: {
email: string;
apiToken: string;
[key: string]: unknown;
}) {
return `Basic ${Buffer.from(`${config.email}:${config.apiToken}`).toString("base64")}`;
}

View File

@@ -0,0 +1,292 @@
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
type ConfluenceClientOptions = {
config: AtlassianConfig;
fetchImpl?: FetchLike;
};
type SearchInput = {
query: string;
maxResults: number;
startAt: number;
};
type CreateInput = {
space?: string;
title: string;
body: string;
dryRun?: boolean;
};
type UpdateInput = {
pageId: string;
title: string;
body: string;
dryRun?: boolean;
};
type CommentInput = {
pageId: string;
body: string;
dryRun?: boolean;
};
type PageSummary = {
id: string;
title: string;
type: string;
status?: string;
spaceId?: string;
url?: string;
};
function buildUrl(baseUrl: string, path: string) {
return new URL(path, `${baseUrl}/`).toString();
}
function normalizePage(baseUrl: string, page: Record<string, unknown>, excerpt?: string) {
const links = (page._links ?? {}) as Record<string, unknown>;
return {
id: String(page.id ?? ""),
title: String(page.title ?? ""),
type: String(page.type ?? "page"),
...(page.status ? { status: String(page.status) } : {}),
...(page.spaceId ? { spaceId: String(page.spaceId) } : {}),
...(excerpt ? { excerpt } : {}),
...(links.webui ? { url: `${baseUrl}${String(links.webui)}` } : {}),
};
}
export function createConfluenceClient(options: ConfluenceClientOptions) {
const config = options.config;
async function getPageForUpdate(pageId: string) {
return (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${pageId}?body-format=storage`),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
}
return {
async searchPages(input: SearchInput): Promise<CommandOutput<unknown>> {
const url = new URL("/wiki/rest/api/search", `${config.confluenceBaseUrl}/`);
url.searchParams.set("cql", input.query);
url.searchParams.set("limit", String(input.maxResults));
url.searchParams.set("start", String(input.startAt));
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: url.toString(),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const results = Array.isArray(raw.results) ? raw.results : [];
return {
ok: true,
data: {
pages: results.map((entry) => {
const result = entry as Record<string, unknown>;
return normalizePage(
config.baseUrl,
(result.content ?? {}) as Record<string, unknown>,
result.excerpt ? String(result.excerpt) : undefined,
);
}),
startAt: Number(raw.start ?? input.startAt),
maxResults: Number(raw.limit ?? input.maxResults),
total: Number(raw.totalSize ?? raw.size ?? results.length),
},
};
},
async getPage(pageId: string): Promise<CommandOutput<unknown>> {
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${pageId}?body-format=storage`),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const body = ((raw.body ?? {}) as Record<string, unknown>).storage as Record<string, unknown> | undefined;
return {
ok: true,
data: {
page: {
...normalizePage(config.baseUrl, raw),
version: Number((((raw.version ?? {}) as Record<string, unknown>).number ?? 0)),
body: body?.value ? String(body.value) : "",
},
},
raw,
};
},
async listChildren(pageId: string, maxResults: number, startAt: number): Promise<CommandOutput<unknown>> {
const url = new URL(`/wiki/api/v2/pages/${pageId}/direct-children`, `${config.confluenceBaseUrl}/`);
url.searchParams.set("limit", String(maxResults));
url.searchParams.set("cursor", String(startAt));
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: url.toString(),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const results = Array.isArray(raw.results) ? raw.results : [];
const links = (raw._links ?? {}) as Record<string, unknown>;
return {
ok: true,
data: {
pages: results.map((page) => normalizePage(config.baseUrl, page as Record<string, unknown>)),
nextCursor: links.next ? String(links.next) : null,
},
};
},
async createPage(input: CreateInput): Promise<CommandOutput<unknown>> {
const spaceId = input.space || config.defaultSpace;
if (!spaceId) {
throw new Error("conf-create requires --space or ATLASSIAN_DEFAULT_SPACE");
}
const request = {
method: "POST" as const,
url: buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/pages"),
body: {
spaceId,
title: input.title,
status: "current",
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
});
return {
ok: true,
data: raw,
};
},
async updatePage(input: UpdateInput): Promise<CommandOutput<unknown>> {
const currentPage = await getPageForUpdate(input.pageId);
const version = (((currentPage.version ?? {}) as Record<string, unknown>).number ?? 0) as number;
const spaceId = String(currentPage.spaceId ?? "");
const request = {
method: "PUT" as const,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${input.pageId}`),
body: {
id: input.pageId,
status: String(currentPage.status ?? "current"),
title: input.title,
spaceId,
version: {
number: Number(version) + 1,
},
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
handleResponseError(response) {
if (response.status === 409) {
return new Error(`Confluence update conflict: page ${input.pageId} was updated by someone else`);
}
return undefined;
},
});
return {
ok: true,
data: raw,
};
},
async commentPage(input: CommentInput): Promise<CommandOutput<unknown>> {
const request = {
method: "POST" as const,
url: buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/footer-comments"),
body: {
pageId: input.pageId,
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
});
return {
ok: true,
data: raw,
};
},
};
}

View File

@@ -0,0 +1,13 @@
import { readFile } from "node:fs/promises";
import path from "node:path";
export async function readWorkspaceFile(filePath: string, cwd: string) {
const resolved = path.resolve(cwd, filePath);
const relative = path.relative(cwd, resolved);
if (relative.startsWith("..") || path.isAbsolute(relative)) {
throw new Error(`--body-file must stay within the active workspace: ${filePath}`);
}
return readFile(resolved, "utf8");
}

View File

@@ -0,0 +1,69 @@
import { createJsonHeaders, createStatusError } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
type ProductHealth = {
ok: boolean;
status?: number;
message?: string;
};
function buildUrl(baseUrl: string, path: string) {
return new URL(path, `${baseUrl}/`).toString();
}
export async function runHealthCheck(
config: AtlassianConfig,
fetchImpl: FetchLike | undefined,
): Promise<CommandOutput<unknown>> {
const client = fetchImpl ?? globalThis.fetch;
if (!client) {
throw new Error("Fetch API is not available in this runtime");
}
async function probe(product: "Jira" | "Confluence", url: string): Promise<ProductHealth> {
try {
const response = await client(url, {
method: "GET",
headers: createJsonHeaders(config, false),
});
if (!response.ok) {
const error = createStatusError(`${product} health check failed`, response);
return {
ok: false,
status: response.status,
message: error.message,
};
}
return {
ok: true,
status: response.status,
};
} catch (error: unknown) {
return {
ok: false,
message: error instanceof Error ? error.message : String(error),
};
}
}
const jira = await probe("Jira", buildUrl(config.jiraBaseUrl, "/rest/api/3/myself"));
const confluence = await probe("Confluence", buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/spaces?limit=1"));
return {
ok: jira.ok && confluence.ok,
data: {
baseUrl: config.baseUrl,
jiraBaseUrl: config.jiraBaseUrl,
confluenceBaseUrl: config.confluenceBaseUrl,
defaultProject: config.defaultProject,
defaultSpace: config.defaultSpace,
products: {
jira,
confluence,
},
},
};
}

View File

@@ -0,0 +1,86 @@
import { createBasicAuthHeader } from "./config.js";
import type { AtlassianConfig, FetchLike } from "./types.js";
export type HttpMethod = "GET" | "POST" | "PUT";
export function createJsonHeaders(config: AtlassianConfig, includeJsonBody: boolean) {
const headers: Array<[string, string]> = [
["Accept", "application/json"],
["Authorization", createBasicAuthHeader(config)],
];
if (includeJsonBody) {
headers.push(["Content-Type", "application/json"]);
}
return headers;
}
export async function parseResponse(response: Response) {
if (response.status === 204) {
return null;
}
const contentType = response.headers.get("content-type") ?? "";
if (contentType.includes("application/json")) {
try {
return await response.json();
} catch {
throw new Error("Malformed JSON response from Atlassian API");
}
}
return response.text();
}
export function createStatusError(errorPrefix: string, response: Response) {
const base = `${errorPrefix}: ${response.status} ${response.statusText}`;
switch (response.status) {
case 401:
return new Error(`${base} - check ATLASSIAN_EMAIL and ATLASSIAN_API_TOKEN`);
case 403:
return new Error(`${base} - verify product permissions for this account`);
case 404:
return new Error(`${base} - verify the resource identifier or API path`);
case 429:
return new Error(`${base} - retry later or reduce request rate`);
default:
return new Error(base);
}
}
export async function sendJsonRequest(options: {
config: AtlassianConfig;
fetchImpl?: FetchLike;
url: string;
method: HttpMethod;
body?: unknown;
errorPrefix: string;
handleResponseError?: (response: Response) => Error | undefined;
}) {
const fetchImpl = options.fetchImpl ?? globalThis.fetch;
if (!fetchImpl) {
throw new Error("Fetch API is not available in this runtime");
}
const response = await fetchImpl(options.url, {
method: options.method,
headers: createJsonHeaders(options.config, options.body !== undefined),
...(options.body === undefined ? {} : { body: JSON.stringify(options.body) }),
});
if (!response.ok) {
const customError = options.handleResponseError?.(response);
if (customError) {
throw customError;
}
throw createStatusError(options.errorPrefix, response);
}
return parseResponse(response);
}

View File

@@ -0,0 +1,264 @@
import { markdownToAdf } from "./adf.js";
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike, JiraIssueSummary } from "./types.js";
const ISSUE_FIELDS = ["summary", "issuetype", "status", "assignee", "created", "updated"] as const;
type JiraClientOptions = {
config: AtlassianConfig;
fetchImpl?: FetchLike;
};
type SearchInput = {
jql: string;
maxResults: number;
startAt: number;
};
type CreateInput = {
project?: string;
type: string;
summary: string;
description?: string;
dryRun?: boolean;
};
type UpdateInput = {
issue: string;
summary?: string;
description?: string;
dryRun?: boolean;
};
type CommentInput = {
issue: string;
body: string;
dryRun?: boolean;
};
type TransitionInput = {
issue: string;
transition: string;
dryRun?: boolean;
};
function normalizeIssue(config: AtlassianConfig, issue: Record<string, unknown>): JiraIssueSummary {
const fields = (issue.fields ?? {}) as Record<string, unknown>;
const issueType = (fields.issuetype ?? {}) as Record<string, unknown>;
const status = (fields.status ?? {}) as Record<string, unknown>;
const assignee = (fields.assignee ?? {}) as Record<string, unknown>;
return {
key: String(issue.key ?? ""),
summary: String(fields.summary ?? ""),
issueType: String(issueType.name ?? ""),
status: String(status.name ?? ""),
assignee: assignee.displayName ? String(assignee.displayName) : undefined,
created: String(fields.created ?? ""),
updated: String(fields.updated ?? ""),
url: `${config.baseUrl}/browse/${issue.key ?? ""}`,
};
}
function createRequest(config: AtlassianConfig, method: "GET" | "POST" | "PUT", path: string, body?: unknown) {
const url = new URL(path, `${config.jiraBaseUrl}/`);
return {
method,
url: url.toString(),
...(body === undefined ? {} : { body }),
};
}
export function createJiraClient(options: JiraClientOptions) {
const fetchImpl = options.fetchImpl ?? globalThis.fetch;
if (!fetchImpl) {
throw new Error("Fetch API is not available in this runtime");
}
async function send(method: "GET" | "POST" | "PUT", path: string, body?: unknown) {
const request = createRequest(options.config, method, path, body);
return sendJsonRequest({
config: options.config,
fetchImpl,
url: request.url,
method,
body,
errorPrefix: "Jira request failed",
});
}
return {
async searchIssues(input: SearchInput): Promise<CommandOutput<unknown>> {
const raw = (await send("POST", "/rest/api/3/search", {
jql: input.jql,
maxResults: input.maxResults,
startAt: input.startAt,
fields: [...ISSUE_FIELDS],
})) as Record<string, unknown>;
const issues = Array.isArray(raw.issues) ? raw.issues : [];
return {
ok: true,
data: {
issues: issues.map((issue) => normalizeIssue(options.config, issue as Record<string, unknown>)),
startAt: Number(raw.startAt ?? input.startAt),
maxResults: Number(raw.maxResults ?? input.maxResults),
total: Number(raw.total ?? issues.length),
},
};
},
async getIssue(issue: string): Promise<CommandOutput<unknown>> {
const url = new URL(`/rest/api/3/issue/${issue}`, `${options.config.jiraBaseUrl}/`);
url.searchParams.set("fields", ISSUE_FIELDS.join(","));
const raw = (await send("GET", `${url.pathname}${url.search}`)) as Record<string, unknown>;
return {
ok: true,
data: {
issue: normalizeIssue(options.config, raw),
},
raw,
};
},
async getTransitions(issue: string): Promise<CommandOutput<unknown>> {
const raw = (await send(
"GET",
`/rest/api/3/issue/${issue}/transitions`,
)) as { transitions?: Array<Record<string, unknown>> };
return {
ok: true,
data: {
transitions: (raw.transitions ?? []).map((transition) => ({
id: String(transition.id ?? ""),
name: String(transition.name ?? ""),
toStatus: String(((transition.to ?? {}) as Record<string, unknown>).name ?? ""),
hasScreen: Boolean(transition.hasScreen),
})),
},
};
},
async createIssue(input: CreateInput): Promise<CommandOutput<unknown>> {
const project = input.project || options.config.defaultProject;
if (!project) {
throw new Error("jira-create requires --project or ATLASSIAN_DEFAULT_PROJECT");
}
const request = createRequest(options.config, "POST", "/rest/api/3/issue", {
fields: {
project: { key: project },
issuetype: { name: input.type },
summary: input.summary,
...(input.description ? { description: markdownToAdf(input.description) } : {}),
},
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await send("POST", "/rest/api/3/issue", request.body);
return { ok: true, data: raw };
},
async updateIssue(input: UpdateInput): Promise<CommandOutput<unknown>> {
const fields: Record<string, unknown> = {};
if (input.summary) {
fields.summary = input.summary;
}
if (input.description) {
fields.description = markdownToAdf(input.description);
}
if (Object.keys(fields).length === 0) {
throw new Error("jira-update requires --summary and/or --description-file");
}
const request = createRequest(options.config, "PUT", `/rest/api/3/issue/${input.issue}`, {
fields,
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
await send("PUT", `/rest/api/3/issue/${input.issue}`, request.body);
return {
ok: true,
data: {
issue: input.issue,
updated: true,
},
};
},
async commentIssue(input: CommentInput): Promise<CommandOutput<unknown>> {
const request = createRequest(options.config, "POST", `/rest/api/3/issue/${input.issue}/comment`, {
body: markdownToAdf(input.body),
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await send("POST", `/rest/api/3/issue/${input.issue}/comment`, request.body);
return {
ok: true,
data: raw,
};
},
async transitionIssue(input: TransitionInput): Promise<CommandOutput<unknown>> {
const request = createRequest(
options.config,
"POST",
`/rest/api/3/issue/${input.issue}/transitions`,
{
transition: {
id: input.transition,
},
},
);
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
await send("POST", `/rest/api/3/issue/${input.issue}/transitions`, request.body);
return {
ok: true,
data: {
issue: input.issue,
transitioned: true,
transition: input.transition,
},
};
},
};
}

View File

@@ -0,0 +1,44 @@
import type { CommandOutput, OutputFormat, Writer } from "./types.js";
function renderText(payload: CommandOutput<unknown>) {
const data = payload.data as Record<string, unknown>;
if (Array.isArray(data?.issues)) {
return data.issues
.map((issue) => {
const item = issue as Record<string, string>;
return `${item.key} [${item.status}] ${item.issueType} - ${item.summary}`;
})
.join("\n");
}
if (data?.issue && typeof data.issue === "object") {
const issue = data.issue as Record<string, string>;
return [
issue.key,
`${issue.issueType} | ${issue.status}`,
issue.summary,
issue.url,
].join("\n");
}
if (Array.isArray(data?.transitions)) {
return data.transitions
.map((transition) => {
const item = transition as Record<string, string>;
return `${item.id} ${item.name} -> ${item.toStatus}`;
})
.join("\n");
}
return JSON.stringify(payload, null, 2);
}
export function writeOutput(
writer: Writer,
payload: CommandOutput<unknown>,
format: OutputFormat = "json",
) {
const body = format === "text" ? renderText(payload) : JSON.stringify(payload, null, 2);
writer.write(`${body}\n`);
}

View File

@@ -0,0 +1,85 @@
import { readWorkspaceFile } from "./files.js";
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
const JIRA_ALLOWED_PREFIXES = ["/rest/api/3/"] as const;
const CONFLUENCE_ALLOWED_PREFIXES = ["/wiki/api/v2/", "/wiki/rest/api/"] as const;
type RawInput = {
product: "jira" | "confluence";
method: string;
path: string;
bodyFile?: string;
cwd: string;
dryRun?: boolean;
};
function getAllowedPrefixes(product: RawInput["product"]) {
return product === "jira" ? JIRA_ALLOWED_PREFIXES : CONFLUENCE_ALLOWED_PREFIXES;
}
function buildUrl(config: AtlassianConfig, product: RawInput["product"], path: string) {
const baseUrl = product === "jira" ? config.jiraBaseUrl : config.confluenceBaseUrl;
return new URL(path, `${baseUrl}/`).toString();
}
function validateMethod(method: string): asserts method is "GET" | "POST" | "PUT" {
if (!["GET", "POST", "PUT"].includes(method)) {
throw new Error("raw only allows GET, POST, and PUT");
}
}
function validatePath(product: RawInput["product"], path: string) {
const allowedPrefixes = getAllowedPrefixes(product);
if (!allowedPrefixes.some((prefix) => path.startsWith(prefix))) {
throw new Error(`raw path is not allowed for ${product}: ${path}`);
}
}
async function readRawBody(bodyFile: string | undefined, cwd: string) {
if (!bodyFile) {
return undefined;
}
const contents = await readWorkspaceFile(bodyFile, cwd);
return JSON.parse(contents) as unknown;
}
export async function runRawCommand(
config: AtlassianConfig,
fetchImpl: FetchLike | undefined,
input: RawInput,
): Promise<CommandOutput<unknown>> {
validateMethod(input.method);
validatePath(input.product, input.path);
const body = await readRawBody(input.bodyFile, input.cwd);
const request = {
method: input.method,
url: buildUrl(config, input.product, input.path),
...(body === undefined ? {} : { body }),
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const data = await sendJsonRequest({
config,
fetchImpl,
url: request.url,
method: input.method,
body,
errorPrefix: "Raw request failed",
});
return {
ok: true,
data,
};
}

View File

@@ -0,0 +1,35 @@
export type AtlassianConfig = {
baseUrl: string;
jiraBaseUrl: string;
confluenceBaseUrl: string;
email: string;
apiToken: string;
defaultProject?: string;
defaultSpace?: string;
};
export type CommandOutput<T> = {
ok: boolean;
data: T;
dryRun?: boolean;
raw?: unknown;
};
export type JiraIssueSummary = {
key: string;
summary: string;
issueType: string;
status: string;
assignee?: string;
created: string;
updated: string;
url: string;
};
export type Writer = {
write(chunk: string | Uint8Array): unknown;
};
export type FetchLike = typeof fetch;
export type OutputFormat = "json" | "text";

View File

@@ -0,0 +1,15 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": true,
"skipLibCheck": true,
"resolveJsonModule": true,
"types": ["node"],
"outDir": "dist"
},
"include": ["src/**/*.ts", "scripts/**/*.ts", "tests/**/*.ts"]
}

View File

@@ -0,0 +1,93 @@
---
name: atlassian
description: Interact with Atlassian Cloud Jira and Confluence through a portable task-oriented CLI for search, issue/page edits, comments, transitions, and bounded raw requests.
---
# Atlassian (Cursor Agent CLI)
Portable Atlassian workflows for Cursor Agent CLI using a shared TypeScript CLI.
## Requirements
- Cursor Agent CLI skill discovery via `.cursor/skills/` or `~/.cursor/skills/`
- Node.js 20+
- `pnpm`
- Atlassian Cloud account access
- `ATLASSIAN_BASE_URL`
- `ATLASSIAN_EMAIL`
- `ATLASSIAN_API_TOKEN`
The `ATLASSIAN_*` values may come from the shell environment or a `.env` file in the installed `scripts/` folder.
## First-Time Setup
Repo-local install:
```bash
mkdir -p .cursor/skills/atlassian
cp -R skills/atlassian/cursor/* .cursor/skills/atlassian/
cd .cursor/skills/atlassian/scripts
pnpm install
```
Global install:
```bash
mkdir -p ~/.cursor/skills/atlassian
cp -R skills/atlassian/cursor/* ~/.cursor/skills/atlassian/
cd ~/.cursor/skills/atlassian/scripts
pnpm install
```
## Prerequisite Check (MANDATORY)
Repo-local form:
```bash
cursor-agent --version
cd .cursor/skills/atlassian/scripts
node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')"
node -e 'require("dotenv").config({ path: ".env" }); const required = ["ATLASSIAN_BASE_URL", "ATLASSIAN_EMAIL", "ATLASSIAN_API_TOKEN"]; const missing = required.filter((key) => !(process.env[key] || "").trim()); if (missing.length) { console.error("Missing required Atlassian config: " + missing.join(", ")); process.exit(1); } console.log("OK: Atlassian config present")'
pnpm atlassian health
```
If any check fails, stop and return:
`Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Configure ATLASSIAN_* in the shell environment or scripts/.env, then retry.`
## Supported Commands
- `pnpm atlassian health`
- `pnpm atlassian jira-search --jql "..."`
- `pnpm atlassian jira-get --issue ABC-123`
- `pnpm atlassian jira-create ... [--dry-run]`
- `pnpm atlassian jira-update ... [--dry-run]`
- `pnpm atlassian jira-comment ... [--dry-run]`
- `pnpm atlassian jira-transitions --issue ABC-123`
- `pnpm atlassian jira-transition ... [--dry-run]`
- `pnpm atlassian conf-search --query "..."`
- `pnpm atlassian conf-get --page 12345`
- `pnpm atlassian conf-create ... [--dry-run]`
- `pnpm atlassian conf-update ... [--dry-run]`
- `pnpm atlassian conf-comment ... [--dry-run]`
- `pnpm atlassian conf-children --page 12345`
- `pnpm atlassian raw --product jira|confluence --method GET|POST|PUT --path ...`
## Usage Examples
- `pnpm atlassian jira-get --issue ENG-123`
- `pnpm atlassian conf-search --query "title ~ \\\"Runbook\\\"" --max-results 10 --start-at 0`
- `pnpm atlassian raw --product confluence --method POST --path "/wiki/api/v2/pages" --body-file page.json --dry-run`
## Safety Rules
- Prefer JSON output for agent use.
- Use `--dry-run` before writes unless the user explicitly wants the change applied.
- Keep `--body-file` inputs within the current workspace.
- Use `raw` only for user-requested unsupported endpoints.
- `raw` does not allow `DELETE`.
## Notes
- Cursor discovers this skill from `.cursor/skills/` or `~/.cursor/skills/`.
- Atlassian Cloud is the supported platform in v1.

View File

@@ -0,0 +1,20 @@
{
"name": "atlassian-skill-scripts",
"version": "1.0.0",
"description": "Shared runtime for the Atlassian skill",
"type": "module",
"scripts": {
"atlassian": "tsx src/cli.ts",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"commander": "^13.1.0",
"dotenv": "^16.4.7"
},
"devDependencies": {
"@types/node": "^24.3.0",
"tsx": "^4.20.5",
"typescript": "^5.9.2"
},
"packageManager": "pnpm@10.18.1+sha512.77a884a165cbba2d8d1c19e3b4880eee6d2fcabd0d879121e282196b80042351d5eb3ca0935fa599da1dc51265cc68816ad2bddd2a2de5ea9fdf92adbec7cd34"
}

View File

@@ -0,0 +1,361 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
importers:
.:
dependencies:
commander:
specifier: ^13.1.0
version: 13.1.0
dotenv:
specifier: ^16.4.7
version: 16.6.1
devDependencies:
'@types/node':
specifier: ^24.3.0
version: 24.12.0
tsx:
specifier: ^4.20.5
version: 4.21.0
typescript:
specifier: ^5.9.2
version: 5.9.3
packages:
'@esbuild/aix-ppc64@0.27.3':
resolution: {integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [aix]
'@esbuild/android-arm64@0.27.3':
resolution: {integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [android]
'@esbuild/android-arm@0.27.3':
resolution: {integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==}
engines: {node: '>=18'}
cpu: [arm]
os: [android]
'@esbuild/android-x64@0.27.3':
resolution: {integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [android]
'@esbuild/darwin-arm64@0.27.3':
resolution: {integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [darwin]
'@esbuild/darwin-x64@0.27.3':
resolution: {integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==}
engines: {node: '>=18'}
cpu: [x64]
os: [darwin]
'@esbuild/freebsd-arm64@0.27.3':
resolution: {integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==}
engines: {node: '>=18'}
cpu: [arm64]
os: [freebsd]
'@esbuild/freebsd-x64@0.27.3':
resolution: {integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==}
engines: {node: '>=18'}
cpu: [x64]
os: [freebsd]
'@esbuild/linux-arm64@0.27.3':
resolution: {integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [linux]
'@esbuild/linux-arm@0.27.3':
resolution: {integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==}
engines: {node: '>=18'}
cpu: [arm]
os: [linux]
'@esbuild/linux-ia32@0.27.3':
resolution: {integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==}
engines: {node: '>=18'}
cpu: [ia32]
os: [linux]
'@esbuild/linux-loong64@0.27.3':
resolution: {integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==}
engines: {node: '>=18'}
cpu: [loong64]
os: [linux]
'@esbuild/linux-mips64el@0.27.3':
resolution: {integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==}
engines: {node: '>=18'}
cpu: [mips64el]
os: [linux]
'@esbuild/linux-ppc64@0.27.3':
resolution: {integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [linux]
'@esbuild/linux-riscv64@0.27.3':
resolution: {integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==}
engines: {node: '>=18'}
cpu: [riscv64]
os: [linux]
'@esbuild/linux-s390x@0.27.3':
resolution: {integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==}
engines: {node: '>=18'}
cpu: [s390x]
os: [linux]
'@esbuild/linux-x64@0.27.3':
resolution: {integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==}
engines: {node: '>=18'}
cpu: [x64]
os: [linux]
'@esbuild/netbsd-arm64@0.27.3':
resolution: {integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [netbsd]
'@esbuild/netbsd-x64@0.27.3':
resolution: {integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==}
engines: {node: '>=18'}
cpu: [x64]
os: [netbsd]
'@esbuild/openbsd-arm64@0.27.3':
resolution: {integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openbsd]
'@esbuild/openbsd-x64@0.27.3':
resolution: {integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [openbsd]
'@esbuild/openharmony-arm64@0.27.3':
resolution: {integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openharmony]
'@esbuild/sunos-x64@0.27.3':
resolution: {integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==}
engines: {node: '>=18'}
cpu: [x64]
os: [sunos]
'@esbuild/win32-arm64@0.27.3':
resolution: {integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [win32]
'@esbuild/win32-ia32@0.27.3':
resolution: {integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==}
engines: {node: '>=18'}
cpu: [ia32]
os: [win32]
'@esbuild/win32-x64@0.27.3':
resolution: {integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==}
engines: {node: '>=18'}
cpu: [x64]
os: [win32]
'@types/node@24.12.0':
resolution: {integrity: sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==}
commander@13.1.0:
resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==}
engines: {node: '>=18'}
dotenv@16.6.1:
resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==}
engines: {node: '>=12'}
esbuild@0.27.3:
resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==}
engines: {node: '>=18'}
hasBin: true
fsevents@2.3.3:
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
get-tsconfig@4.13.6:
resolution: {integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==}
resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
tsx@4.21.0:
resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==}
engines: {node: '>=18.0.0'}
hasBin: true
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
hasBin: true
undici-types@7.16.0:
resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==}
snapshots:
'@esbuild/aix-ppc64@0.27.3':
optional: true
'@esbuild/android-arm64@0.27.3':
optional: true
'@esbuild/android-arm@0.27.3':
optional: true
'@esbuild/android-x64@0.27.3':
optional: true
'@esbuild/darwin-arm64@0.27.3':
optional: true
'@esbuild/darwin-x64@0.27.3':
optional: true
'@esbuild/freebsd-arm64@0.27.3':
optional: true
'@esbuild/freebsd-x64@0.27.3':
optional: true
'@esbuild/linux-arm64@0.27.3':
optional: true
'@esbuild/linux-arm@0.27.3':
optional: true
'@esbuild/linux-ia32@0.27.3':
optional: true
'@esbuild/linux-loong64@0.27.3':
optional: true
'@esbuild/linux-mips64el@0.27.3':
optional: true
'@esbuild/linux-ppc64@0.27.3':
optional: true
'@esbuild/linux-riscv64@0.27.3':
optional: true
'@esbuild/linux-s390x@0.27.3':
optional: true
'@esbuild/linux-x64@0.27.3':
optional: true
'@esbuild/netbsd-arm64@0.27.3':
optional: true
'@esbuild/netbsd-x64@0.27.3':
optional: true
'@esbuild/openbsd-arm64@0.27.3':
optional: true
'@esbuild/openbsd-x64@0.27.3':
optional: true
'@esbuild/openharmony-arm64@0.27.3':
optional: true
'@esbuild/sunos-x64@0.27.3':
optional: true
'@esbuild/win32-arm64@0.27.3':
optional: true
'@esbuild/win32-ia32@0.27.3':
optional: true
'@esbuild/win32-x64@0.27.3':
optional: true
'@types/node@24.12.0':
dependencies:
undici-types: 7.16.0
commander@13.1.0: {}
dotenv@16.6.1: {}
esbuild@0.27.3:
optionalDependencies:
'@esbuild/aix-ppc64': 0.27.3
'@esbuild/android-arm': 0.27.3
'@esbuild/android-arm64': 0.27.3
'@esbuild/android-x64': 0.27.3
'@esbuild/darwin-arm64': 0.27.3
'@esbuild/darwin-x64': 0.27.3
'@esbuild/freebsd-arm64': 0.27.3
'@esbuild/freebsd-x64': 0.27.3
'@esbuild/linux-arm': 0.27.3
'@esbuild/linux-arm64': 0.27.3
'@esbuild/linux-ia32': 0.27.3
'@esbuild/linux-loong64': 0.27.3
'@esbuild/linux-mips64el': 0.27.3
'@esbuild/linux-ppc64': 0.27.3
'@esbuild/linux-riscv64': 0.27.3
'@esbuild/linux-s390x': 0.27.3
'@esbuild/linux-x64': 0.27.3
'@esbuild/netbsd-arm64': 0.27.3
'@esbuild/netbsd-x64': 0.27.3
'@esbuild/openbsd-arm64': 0.27.3
'@esbuild/openbsd-x64': 0.27.3
'@esbuild/openharmony-arm64': 0.27.3
'@esbuild/sunos-x64': 0.27.3
'@esbuild/win32-arm64': 0.27.3
'@esbuild/win32-ia32': 0.27.3
'@esbuild/win32-x64': 0.27.3
fsevents@2.3.3:
optional: true
get-tsconfig@4.13.6:
dependencies:
resolve-pkg-maps: 1.0.0
resolve-pkg-maps@1.0.0: {}
tsx@4.21.0:
dependencies:
esbuild: 0.27.3
get-tsconfig: 4.13.6
optionalDependencies:
fsevents: 2.3.3
typescript@5.9.3: {}
undici-types@7.16.0: {}

View File

@@ -0,0 +1,92 @@
const TEXT_NODE = "text";
function textNode(text: string) {
return {
type: TEXT_NODE,
text,
};
}
function paragraphNode(lines: string[]) {
const content: Array<{ type: string; text?: string }> = [];
lines.forEach((line, index) => {
if (index > 0) {
content.push({ type: "hardBreak" });
}
if (line.length > 0) {
content.push(textNode(line));
}
});
return {
type: "paragraph",
...(content.length > 0 ? { content } : {}),
};
}
export function markdownToAdf(input: string) {
const lines = input.replace(/\r\n/g, "\n").split("\n");
const content: Array<Record<string, unknown>> = [];
let index = 0;
while (index < lines.length) {
const current = lines[index]?.trimEnd() ?? "";
if (current.trim().length === 0) {
index += 1;
continue;
}
const heading = current.match(/^(#{1,6})\s+(.*)$/);
if (heading) {
content.push({
type: "heading",
attrs: { level: heading[1].length },
content: [textNode(heading[2])],
});
index += 1;
continue;
}
if (/^[-*]\s+/.test(current)) {
const items: Array<Record<string, unknown>> = [];
while (index < lines.length && /^[-*]\s+/.test(lines[index] ?? "")) {
items.push({
type: "listItem",
content: [
{
type: "paragraph",
content: [textNode((lines[index] ?? "").replace(/^[-*]\s+/, ""))],
},
],
});
index += 1;
}
content.push({
type: "bulletList",
content: items,
});
continue;
}
const paragraphLines: string[] = [];
while (index < lines.length && (lines[index]?.trim().length ?? 0) > 0) {
paragraphLines.push(lines[index] ?? "");
index += 1;
}
content.push(paragraphNode(paragraphLines));
}
return {
type: "doc",
version: 1,
content,
};
}

View File

@@ -0,0 +1,332 @@
import process from "node:process";
import { pathToFileURL } from "node:url";
import { Command } from "commander";
import { createConfluenceClient } from "./confluence.js";
import { loadConfig } from "./config.js";
import { readWorkspaceFile } from "./files.js";
import { runHealthCheck } from "./health.js";
import { createJiraClient } from "./jira.js";
import { writeOutput } from "./output.js";
import { runRawCommand } from "./raw.js";
import type { FetchLike, OutputFormat, Writer } from "./types.js";
type CliContext = {
cwd?: string;
env?: NodeJS.ProcessEnv;
fetchImpl?: FetchLike;
stdout?: Writer;
stderr?: Writer;
};
function resolveFormat(format: string | undefined): OutputFormat {
return format === "text" ? "text" : "json";
}
function createRuntime(context: CliContext) {
const cwd = context.cwd ?? process.cwd();
const env = context.env ?? process.env;
const stdout = context.stdout ?? process.stdout;
const stderr = context.stderr ?? process.stderr;
let configCache: ReturnType<typeof loadConfig> | undefined;
let jiraCache: ReturnType<typeof createJiraClient> | undefined;
let confluenceCache: ReturnType<typeof createConfluenceClient> | undefined;
function getConfig() {
configCache ??= loadConfig(env, { cwd });
return configCache;
}
function getJiraClient() {
jiraCache ??= createJiraClient({
config: getConfig(),
fetchImpl: context.fetchImpl,
});
return jiraCache;
}
function getConfluenceClient() {
confluenceCache ??= createConfluenceClient({
config: getConfig(),
fetchImpl: context.fetchImpl,
});
return confluenceCache;
}
async function readBodyFile(filePath: string | undefined) {
if (!filePath) {
return undefined;
}
return readWorkspaceFile(filePath, cwd);
}
return {
cwd,
stdout,
stderr,
readBodyFile,
getConfig,
getJiraClient,
getConfluenceClient,
fetchImpl: context.fetchImpl,
};
}
export function buildProgram(context: CliContext = {}) {
const runtime = createRuntime(context);
const program = new Command()
.name("atlassian")
.description("Portable Atlassian CLI for multi-agent skills")
.version("0.1.0");
program
.command("health")
.description("Validate configuration and Atlassian connectivity")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runHealthCheck(runtime.getConfig(), runtime.fetchImpl);
writeOutput(
runtime.stdout,
payload,
resolveFormat(options.format),
);
});
program
.command("conf-search")
.requiredOption("--query <query>", "CQL search query")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Result offset", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().searchPages({
query: options.query,
maxResults: Number(options.maxResults),
startAt: Number(options.startAt),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-get")
.requiredOption("--page <page>", "Confluence page ID")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().getPage(options.page);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-create")
.requiredOption("--title <title>", "Confluence page title")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--space <space>", "Confluence space ID")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().createPage({
space: options.space,
title: options.title,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-update")
.requiredOption("--page <page>", "Confluence page ID")
.requiredOption("--title <title>", "Confluence page title")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().updatePage({
pageId: options.page,
title: options.title,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-comment")
.requiredOption("--page <page>", "Confluence page ID")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().commentPage({
pageId: options.page,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-children")
.requiredOption("--page <page>", "Confluence page ID")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Cursor/start token", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().listChildren(
options.page,
Number(options.maxResults),
Number(options.startAt),
);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("raw")
.requiredOption("--product <product>", "jira or confluence")
.requiredOption("--method <method>", "GET, POST, or PUT")
.requiredOption("--path <path>", "Validated API path")
.option("--body-file <path>", "Workspace-relative JSON file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runRawCommand(runtime.getConfig(), runtime.fetchImpl, {
product: options.product,
method: String(options.method).toUpperCase(),
path: options.path,
bodyFile: options.bodyFile,
cwd: runtime.cwd,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-search")
.requiredOption("--jql <jql>", "JQL expression to execute")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Result offset", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().searchIssues({
jql: options.jql,
maxResults: Number(options.maxResults),
startAt: Number(options.startAt),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-get")
.requiredOption("--issue <issue>", "Issue key")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().getIssue(options.issue);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-create")
.requiredOption("--type <type>", "Issue type name")
.requiredOption("--summary <summary>", "Issue summary")
.option("--project <project>", "Project key")
.option("--description-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().createIssue({
project: options.project,
type: options.type,
summary: options.summary,
description: await runtime.readBodyFile(options.descriptionFile),
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-update")
.requiredOption("--issue <issue>", "Issue key")
.option("--summary <summary>", "Updated summary")
.option("--description-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().updateIssue({
issue: options.issue,
summary: options.summary,
description: await runtime.readBodyFile(options.descriptionFile),
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-comment")
.requiredOption("--issue <issue>", "Issue key")
.requiredOption("--body-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().commentIssue({
issue: options.issue,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-transitions")
.requiredOption("--issue <issue>", "Issue key")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().getTransitions(options.issue);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-transition")
.requiredOption("--issue <issue>", "Issue key")
.requiredOption("--transition <transition>", "Transition ID")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().transitionIssue({
issue: options.issue,
transition: options.transition,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
return program;
}
export async function runCli(argv = process.argv, context: CliContext = {}) {
const program = buildProgram(context);
await program.parseAsync(argv);
}
const isDirectExecution =
Boolean(process.argv[1]) && import.meta.url === pathToFileURL(process.argv[1]).href;
if (isDirectExecution) {
runCli().catch((error: unknown) => {
const message = error instanceof Error ? error.message : String(error);
process.stderr.write(`${message}\n`);
process.exitCode = 1;
});
}

View File

@@ -0,0 +1,52 @@
import path from "node:path";
import { config as loadDotEnv } from "dotenv";
import type { AtlassianConfig } from "./types.js";
function normalizeBaseUrl(value: string) {
return value.replace(/\/+$/, "");
}
function readRequired(env: NodeJS.ProcessEnv, key: string) {
const value = env[key]?.trim();
if (!value) {
throw new Error(`Missing required environment variable: ${key}`);
}
return value;
}
export function loadConfig(
env: NodeJS.ProcessEnv = process.env,
options?: {
cwd?: string;
},
): AtlassianConfig {
loadDotEnv({
path: path.resolve(options?.cwd ?? process.cwd(), ".env"),
processEnv: env as Record<string, string>,
override: false,
});
const baseUrl = normalizeBaseUrl(readRequired(env, "ATLASSIAN_BASE_URL"));
return {
baseUrl,
jiraBaseUrl: normalizeBaseUrl(env.ATLASSIAN_JIRA_BASE_URL?.trim() || baseUrl),
confluenceBaseUrl: normalizeBaseUrl(env.ATLASSIAN_CONFLUENCE_BASE_URL?.trim() || baseUrl),
email: readRequired(env, "ATLASSIAN_EMAIL"),
apiToken: readRequired(env, "ATLASSIAN_API_TOKEN"),
defaultProject: env.ATLASSIAN_DEFAULT_PROJECT?.trim() || undefined,
defaultSpace: env.ATLASSIAN_DEFAULT_SPACE?.trim() || undefined,
};
}
export function createBasicAuthHeader(config: {
email: string;
apiToken: string;
[key: string]: unknown;
}) {
return `Basic ${Buffer.from(`${config.email}:${config.apiToken}`).toString("base64")}`;
}

View File

@@ -0,0 +1,292 @@
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
type ConfluenceClientOptions = {
config: AtlassianConfig;
fetchImpl?: FetchLike;
};
type SearchInput = {
query: string;
maxResults: number;
startAt: number;
};
type CreateInput = {
space?: string;
title: string;
body: string;
dryRun?: boolean;
};
type UpdateInput = {
pageId: string;
title: string;
body: string;
dryRun?: boolean;
};
type CommentInput = {
pageId: string;
body: string;
dryRun?: boolean;
};
type PageSummary = {
id: string;
title: string;
type: string;
status?: string;
spaceId?: string;
url?: string;
};
function buildUrl(baseUrl: string, path: string) {
return new URL(path, `${baseUrl}/`).toString();
}
function normalizePage(baseUrl: string, page: Record<string, unknown>, excerpt?: string) {
const links = (page._links ?? {}) as Record<string, unknown>;
return {
id: String(page.id ?? ""),
title: String(page.title ?? ""),
type: String(page.type ?? "page"),
...(page.status ? { status: String(page.status) } : {}),
...(page.spaceId ? { spaceId: String(page.spaceId) } : {}),
...(excerpt ? { excerpt } : {}),
...(links.webui ? { url: `${baseUrl}${String(links.webui)}` } : {}),
};
}
export function createConfluenceClient(options: ConfluenceClientOptions) {
const config = options.config;
async function getPageForUpdate(pageId: string) {
return (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${pageId}?body-format=storage`),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
}
return {
async searchPages(input: SearchInput): Promise<CommandOutput<unknown>> {
const url = new URL("/wiki/rest/api/search", `${config.confluenceBaseUrl}/`);
url.searchParams.set("cql", input.query);
url.searchParams.set("limit", String(input.maxResults));
url.searchParams.set("start", String(input.startAt));
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: url.toString(),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const results = Array.isArray(raw.results) ? raw.results : [];
return {
ok: true,
data: {
pages: results.map((entry) => {
const result = entry as Record<string, unknown>;
return normalizePage(
config.baseUrl,
(result.content ?? {}) as Record<string, unknown>,
result.excerpt ? String(result.excerpt) : undefined,
);
}),
startAt: Number(raw.start ?? input.startAt),
maxResults: Number(raw.limit ?? input.maxResults),
total: Number(raw.totalSize ?? raw.size ?? results.length),
},
};
},
async getPage(pageId: string): Promise<CommandOutput<unknown>> {
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${pageId}?body-format=storage`),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const body = ((raw.body ?? {}) as Record<string, unknown>).storage as Record<string, unknown> | undefined;
return {
ok: true,
data: {
page: {
...normalizePage(config.baseUrl, raw),
version: Number((((raw.version ?? {}) as Record<string, unknown>).number ?? 0)),
body: body?.value ? String(body.value) : "",
},
},
raw,
};
},
async listChildren(pageId: string, maxResults: number, startAt: number): Promise<CommandOutput<unknown>> {
const url = new URL(`/wiki/api/v2/pages/${pageId}/direct-children`, `${config.confluenceBaseUrl}/`);
url.searchParams.set("limit", String(maxResults));
url.searchParams.set("cursor", String(startAt));
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: url.toString(),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const results = Array.isArray(raw.results) ? raw.results : [];
const links = (raw._links ?? {}) as Record<string, unknown>;
return {
ok: true,
data: {
pages: results.map((page) => normalizePage(config.baseUrl, page as Record<string, unknown>)),
nextCursor: links.next ? String(links.next) : null,
},
};
},
async createPage(input: CreateInput): Promise<CommandOutput<unknown>> {
const spaceId = input.space || config.defaultSpace;
if (!spaceId) {
throw new Error("conf-create requires --space or ATLASSIAN_DEFAULT_SPACE");
}
const request = {
method: "POST" as const,
url: buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/pages"),
body: {
spaceId,
title: input.title,
status: "current",
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
});
return {
ok: true,
data: raw,
};
},
async updatePage(input: UpdateInput): Promise<CommandOutput<unknown>> {
const currentPage = await getPageForUpdate(input.pageId);
const version = (((currentPage.version ?? {}) as Record<string, unknown>).number ?? 0) as number;
const spaceId = String(currentPage.spaceId ?? "");
const request = {
method: "PUT" as const,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${input.pageId}`),
body: {
id: input.pageId,
status: String(currentPage.status ?? "current"),
title: input.title,
spaceId,
version: {
number: Number(version) + 1,
},
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
handleResponseError(response) {
if (response.status === 409) {
return new Error(`Confluence update conflict: page ${input.pageId} was updated by someone else`);
}
return undefined;
},
});
return {
ok: true,
data: raw,
};
},
async commentPage(input: CommentInput): Promise<CommandOutput<unknown>> {
const request = {
method: "POST" as const,
url: buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/footer-comments"),
body: {
pageId: input.pageId,
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
});
return {
ok: true,
data: raw,
};
},
};
}

View File

@@ -0,0 +1,13 @@
import { readFile } from "node:fs/promises";
import path from "node:path";
export async function readWorkspaceFile(filePath: string, cwd: string) {
const resolved = path.resolve(cwd, filePath);
const relative = path.relative(cwd, resolved);
if (relative.startsWith("..") || path.isAbsolute(relative)) {
throw new Error(`--body-file must stay within the active workspace: ${filePath}`);
}
return readFile(resolved, "utf8");
}

View File

@@ -0,0 +1,69 @@
import { createJsonHeaders, createStatusError } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
type ProductHealth = {
ok: boolean;
status?: number;
message?: string;
};
function buildUrl(baseUrl: string, path: string) {
return new URL(path, `${baseUrl}/`).toString();
}
export async function runHealthCheck(
config: AtlassianConfig,
fetchImpl: FetchLike | undefined,
): Promise<CommandOutput<unknown>> {
const client = fetchImpl ?? globalThis.fetch;
if (!client) {
throw new Error("Fetch API is not available in this runtime");
}
async function probe(product: "Jira" | "Confluence", url: string): Promise<ProductHealth> {
try {
const response = await client(url, {
method: "GET",
headers: createJsonHeaders(config, false),
});
if (!response.ok) {
const error = createStatusError(`${product} health check failed`, response);
return {
ok: false,
status: response.status,
message: error.message,
};
}
return {
ok: true,
status: response.status,
};
} catch (error: unknown) {
return {
ok: false,
message: error instanceof Error ? error.message : String(error),
};
}
}
const jira = await probe("Jira", buildUrl(config.jiraBaseUrl, "/rest/api/3/myself"));
const confluence = await probe("Confluence", buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/spaces?limit=1"));
return {
ok: jira.ok && confluence.ok,
data: {
baseUrl: config.baseUrl,
jiraBaseUrl: config.jiraBaseUrl,
confluenceBaseUrl: config.confluenceBaseUrl,
defaultProject: config.defaultProject,
defaultSpace: config.defaultSpace,
products: {
jira,
confluence,
},
},
};
}

View File

@@ -0,0 +1,86 @@
import { createBasicAuthHeader } from "./config.js";
import type { AtlassianConfig, FetchLike } from "./types.js";
export type HttpMethod = "GET" | "POST" | "PUT";
export function createJsonHeaders(config: AtlassianConfig, includeJsonBody: boolean) {
const headers: Array<[string, string]> = [
["Accept", "application/json"],
["Authorization", createBasicAuthHeader(config)],
];
if (includeJsonBody) {
headers.push(["Content-Type", "application/json"]);
}
return headers;
}
export async function parseResponse(response: Response) {
if (response.status === 204) {
return null;
}
const contentType = response.headers.get("content-type") ?? "";
if (contentType.includes("application/json")) {
try {
return await response.json();
} catch {
throw new Error("Malformed JSON response from Atlassian API");
}
}
return response.text();
}
export function createStatusError(errorPrefix: string, response: Response) {
const base = `${errorPrefix}: ${response.status} ${response.statusText}`;
switch (response.status) {
case 401:
return new Error(`${base} - check ATLASSIAN_EMAIL and ATLASSIAN_API_TOKEN`);
case 403:
return new Error(`${base} - verify product permissions for this account`);
case 404:
return new Error(`${base} - verify the resource identifier or API path`);
case 429:
return new Error(`${base} - retry later or reduce request rate`);
default:
return new Error(base);
}
}
export async function sendJsonRequest(options: {
config: AtlassianConfig;
fetchImpl?: FetchLike;
url: string;
method: HttpMethod;
body?: unknown;
errorPrefix: string;
handleResponseError?: (response: Response) => Error | undefined;
}) {
const fetchImpl = options.fetchImpl ?? globalThis.fetch;
if (!fetchImpl) {
throw new Error("Fetch API is not available in this runtime");
}
const response = await fetchImpl(options.url, {
method: options.method,
headers: createJsonHeaders(options.config, options.body !== undefined),
...(options.body === undefined ? {} : { body: JSON.stringify(options.body) }),
});
if (!response.ok) {
const customError = options.handleResponseError?.(response);
if (customError) {
throw customError;
}
throw createStatusError(options.errorPrefix, response);
}
return parseResponse(response);
}

View File

@@ -0,0 +1,264 @@
import { markdownToAdf } from "./adf.js";
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike, JiraIssueSummary } from "./types.js";
const ISSUE_FIELDS = ["summary", "issuetype", "status", "assignee", "created", "updated"] as const;
type JiraClientOptions = {
config: AtlassianConfig;
fetchImpl?: FetchLike;
};
type SearchInput = {
jql: string;
maxResults: number;
startAt: number;
};
type CreateInput = {
project?: string;
type: string;
summary: string;
description?: string;
dryRun?: boolean;
};
type UpdateInput = {
issue: string;
summary?: string;
description?: string;
dryRun?: boolean;
};
type CommentInput = {
issue: string;
body: string;
dryRun?: boolean;
};
type TransitionInput = {
issue: string;
transition: string;
dryRun?: boolean;
};
function normalizeIssue(config: AtlassianConfig, issue: Record<string, unknown>): JiraIssueSummary {
const fields = (issue.fields ?? {}) as Record<string, unknown>;
const issueType = (fields.issuetype ?? {}) as Record<string, unknown>;
const status = (fields.status ?? {}) as Record<string, unknown>;
const assignee = (fields.assignee ?? {}) as Record<string, unknown>;
return {
key: String(issue.key ?? ""),
summary: String(fields.summary ?? ""),
issueType: String(issueType.name ?? ""),
status: String(status.name ?? ""),
assignee: assignee.displayName ? String(assignee.displayName) : undefined,
created: String(fields.created ?? ""),
updated: String(fields.updated ?? ""),
url: `${config.baseUrl}/browse/${issue.key ?? ""}`,
};
}
function createRequest(config: AtlassianConfig, method: "GET" | "POST" | "PUT", path: string, body?: unknown) {
const url = new URL(path, `${config.jiraBaseUrl}/`);
return {
method,
url: url.toString(),
...(body === undefined ? {} : { body }),
};
}
export function createJiraClient(options: JiraClientOptions) {
const fetchImpl = options.fetchImpl ?? globalThis.fetch;
if (!fetchImpl) {
throw new Error("Fetch API is not available in this runtime");
}
async function send(method: "GET" | "POST" | "PUT", path: string, body?: unknown) {
const request = createRequest(options.config, method, path, body);
return sendJsonRequest({
config: options.config,
fetchImpl,
url: request.url,
method,
body,
errorPrefix: "Jira request failed",
});
}
return {
async searchIssues(input: SearchInput): Promise<CommandOutput<unknown>> {
const raw = (await send("POST", "/rest/api/3/search", {
jql: input.jql,
maxResults: input.maxResults,
startAt: input.startAt,
fields: [...ISSUE_FIELDS],
})) as Record<string, unknown>;
const issues = Array.isArray(raw.issues) ? raw.issues : [];
return {
ok: true,
data: {
issues: issues.map((issue) => normalizeIssue(options.config, issue as Record<string, unknown>)),
startAt: Number(raw.startAt ?? input.startAt),
maxResults: Number(raw.maxResults ?? input.maxResults),
total: Number(raw.total ?? issues.length),
},
};
},
async getIssue(issue: string): Promise<CommandOutput<unknown>> {
const url = new URL(`/rest/api/3/issue/${issue}`, `${options.config.jiraBaseUrl}/`);
url.searchParams.set("fields", ISSUE_FIELDS.join(","));
const raw = (await send("GET", `${url.pathname}${url.search}`)) as Record<string, unknown>;
return {
ok: true,
data: {
issue: normalizeIssue(options.config, raw),
},
raw,
};
},
async getTransitions(issue: string): Promise<CommandOutput<unknown>> {
const raw = (await send(
"GET",
`/rest/api/3/issue/${issue}/transitions`,
)) as { transitions?: Array<Record<string, unknown>> };
return {
ok: true,
data: {
transitions: (raw.transitions ?? []).map((transition) => ({
id: String(transition.id ?? ""),
name: String(transition.name ?? ""),
toStatus: String(((transition.to ?? {}) as Record<string, unknown>).name ?? ""),
hasScreen: Boolean(transition.hasScreen),
})),
},
};
},
async createIssue(input: CreateInput): Promise<CommandOutput<unknown>> {
const project = input.project || options.config.defaultProject;
if (!project) {
throw new Error("jira-create requires --project or ATLASSIAN_DEFAULT_PROJECT");
}
const request = createRequest(options.config, "POST", "/rest/api/3/issue", {
fields: {
project: { key: project },
issuetype: { name: input.type },
summary: input.summary,
...(input.description ? { description: markdownToAdf(input.description) } : {}),
},
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await send("POST", "/rest/api/3/issue", request.body);
return { ok: true, data: raw };
},
async updateIssue(input: UpdateInput): Promise<CommandOutput<unknown>> {
const fields: Record<string, unknown> = {};
if (input.summary) {
fields.summary = input.summary;
}
if (input.description) {
fields.description = markdownToAdf(input.description);
}
if (Object.keys(fields).length === 0) {
throw new Error("jira-update requires --summary and/or --description-file");
}
const request = createRequest(options.config, "PUT", `/rest/api/3/issue/${input.issue}`, {
fields,
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
await send("PUT", `/rest/api/3/issue/${input.issue}`, request.body);
return {
ok: true,
data: {
issue: input.issue,
updated: true,
},
};
},
async commentIssue(input: CommentInput): Promise<CommandOutput<unknown>> {
const request = createRequest(options.config, "POST", `/rest/api/3/issue/${input.issue}/comment`, {
body: markdownToAdf(input.body),
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await send("POST", `/rest/api/3/issue/${input.issue}/comment`, request.body);
return {
ok: true,
data: raw,
};
},
async transitionIssue(input: TransitionInput): Promise<CommandOutput<unknown>> {
const request = createRequest(
options.config,
"POST",
`/rest/api/3/issue/${input.issue}/transitions`,
{
transition: {
id: input.transition,
},
},
);
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
await send("POST", `/rest/api/3/issue/${input.issue}/transitions`, request.body);
return {
ok: true,
data: {
issue: input.issue,
transitioned: true,
transition: input.transition,
},
};
},
};
}

View File

@@ -0,0 +1,44 @@
import type { CommandOutput, OutputFormat, Writer } from "./types.js";
function renderText(payload: CommandOutput<unknown>) {
const data = payload.data as Record<string, unknown>;
if (Array.isArray(data?.issues)) {
return data.issues
.map((issue) => {
const item = issue as Record<string, string>;
return `${item.key} [${item.status}] ${item.issueType} - ${item.summary}`;
})
.join("\n");
}
if (data?.issue && typeof data.issue === "object") {
const issue = data.issue as Record<string, string>;
return [
issue.key,
`${issue.issueType} | ${issue.status}`,
issue.summary,
issue.url,
].join("\n");
}
if (Array.isArray(data?.transitions)) {
return data.transitions
.map((transition) => {
const item = transition as Record<string, string>;
return `${item.id} ${item.name} -> ${item.toStatus}`;
})
.join("\n");
}
return JSON.stringify(payload, null, 2);
}
export function writeOutput(
writer: Writer,
payload: CommandOutput<unknown>,
format: OutputFormat = "json",
) {
const body = format === "text" ? renderText(payload) : JSON.stringify(payload, null, 2);
writer.write(`${body}\n`);
}

View File

@@ -0,0 +1,85 @@
import { readWorkspaceFile } from "./files.js";
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
const JIRA_ALLOWED_PREFIXES = ["/rest/api/3/"] as const;
const CONFLUENCE_ALLOWED_PREFIXES = ["/wiki/api/v2/", "/wiki/rest/api/"] as const;
type RawInput = {
product: "jira" | "confluence";
method: string;
path: string;
bodyFile?: string;
cwd: string;
dryRun?: boolean;
};
function getAllowedPrefixes(product: RawInput["product"]) {
return product === "jira" ? JIRA_ALLOWED_PREFIXES : CONFLUENCE_ALLOWED_PREFIXES;
}
function buildUrl(config: AtlassianConfig, product: RawInput["product"], path: string) {
const baseUrl = product === "jira" ? config.jiraBaseUrl : config.confluenceBaseUrl;
return new URL(path, `${baseUrl}/`).toString();
}
function validateMethod(method: string): asserts method is "GET" | "POST" | "PUT" {
if (!["GET", "POST", "PUT"].includes(method)) {
throw new Error("raw only allows GET, POST, and PUT");
}
}
function validatePath(product: RawInput["product"], path: string) {
const allowedPrefixes = getAllowedPrefixes(product);
if (!allowedPrefixes.some((prefix) => path.startsWith(prefix))) {
throw new Error(`raw path is not allowed for ${product}: ${path}`);
}
}
async function readRawBody(bodyFile: string | undefined, cwd: string) {
if (!bodyFile) {
return undefined;
}
const contents = await readWorkspaceFile(bodyFile, cwd);
return JSON.parse(contents) as unknown;
}
export async function runRawCommand(
config: AtlassianConfig,
fetchImpl: FetchLike | undefined,
input: RawInput,
): Promise<CommandOutput<unknown>> {
validateMethod(input.method);
validatePath(input.product, input.path);
const body = await readRawBody(input.bodyFile, input.cwd);
const request = {
method: input.method,
url: buildUrl(config, input.product, input.path),
...(body === undefined ? {} : { body }),
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const data = await sendJsonRequest({
config,
fetchImpl,
url: request.url,
method: input.method,
body,
errorPrefix: "Raw request failed",
});
return {
ok: true,
data,
};
}

View File

@@ -0,0 +1,35 @@
export type AtlassianConfig = {
baseUrl: string;
jiraBaseUrl: string;
confluenceBaseUrl: string;
email: string;
apiToken: string;
defaultProject?: string;
defaultSpace?: string;
};
export type CommandOutput<T> = {
ok: boolean;
data: T;
dryRun?: boolean;
raw?: unknown;
};
export type JiraIssueSummary = {
key: string;
summary: string;
issueType: string;
status: string;
assignee?: string;
created: string;
updated: string;
url: string;
};
export type Writer = {
write(chunk: string | Uint8Array): unknown;
};
export type FetchLike = typeof fetch;
export type OutputFormat = "json" | "text";

View File

@@ -0,0 +1,15 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": true,
"skipLibCheck": true,
"resolveJsonModule": true,
"types": ["node"],
"outDir": "dist"
},
"include": ["src/**/*.ts", "scripts/**/*.ts", "tests/**/*.ts"]
}

View File

@@ -0,0 +1,78 @@
---
name: atlassian
description: Interact with Atlassian Cloud Jira and Confluence through a portable task-oriented CLI for search, issue/page edits, comments, transitions, and bounded raw requests.
---
# Atlassian (OpenCode)
Portable Atlassian workflows for OpenCode using a shared TypeScript CLI.
## Requirements
- Node.js 20+
- `pnpm`
- Atlassian Cloud account access
- `ATLASSIAN_BASE_URL`
- `ATLASSIAN_EMAIL`
- `ATLASSIAN_API_TOKEN`
The `ATLASSIAN_*` values may come from the shell environment or a `.env` file in `~/.config/opencode/skills/atlassian/scripts`.
## First-Time Setup
```bash
mkdir -p ~/.config/opencode/skills/atlassian
cp -R skills/atlassian/opencode/* ~/.config/opencode/skills/atlassian/
cd ~/.config/opencode/skills/atlassian/scripts
pnpm install
```
## Prerequisite Check (MANDATORY)
```bash
cd ~/.config/opencode/skills/atlassian/scripts
node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')"
node -e 'require("dotenv").config({ path: ".env" }); const required = ["ATLASSIAN_BASE_URL", "ATLASSIAN_EMAIL", "ATLASSIAN_API_TOKEN"]; const missing = required.filter((key) => !(process.env[key] || "").trim()); if (missing.length) { console.error("Missing required Atlassian config: " + missing.join(", ")); process.exit(1); } console.log("OK: Atlassian config present")'
pnpm atlassian health
```
If any check fails, stop and return:
`Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Configure ATLASSIAN_* in the shell environment or scripts/.env, then retry.`
## Supported Commands
- `pnpm atlassian health`
- `pnpm atlassian jira-search --jql "..."`
- `pnpm atlassian jira-get --issue ABC-123`
- `pnpm atlassian jira-create ... [--dry-run]`
- `pnpm atlassian jira-update ... [--dry-run]`
- `pnpm atlassian jira-comment ... [--dry-run]`
- `pnpm atlassian jira-transitions --issue ABC-123`
- `pnpm atlassian jira-transition ... [--dry-run]`
- `pnpm atlassian conf-search --query "..."`
- `pnpm atlassian conf-get --page 12345`
- `pnpm atlassian conf-create ... [--dry-run]`
- `pnpm atlassian conf-update ... [--dry-run]`
- `pnpm atlassian conf-comment ... [--dry-run]`
- `pnpm atlassian conf-children --page 12345`
- `pnpm atlassian raw --product jira|confluence --method GET|POST|PUT --path ...`
## Usage Examples
- `pnpm atlassian jira-transition --issue ENG-123 --transition 31 --dry-run`
- `pnpm atlassian conf-create --space OPS --title "Runbook" --body-file page.storage.html --dry-run`
- `pnpm atlassian raw --product jira --method GET --path "/rest/api/3/issue/ENG-123"`
## Safety Rules
- Prefer JSON output for machine consumption.
- Use `--dry-run` on writes unless the user explicitly asks to commit the remote mutation.
- Restrict `--body-file` to project files.
- Use `raw` only for unsupported edge cases.
- `DELETE` is intentionally unsupported in raw mode.
## Notes
- Atlassian Cloud is first-class in v1; Data Center support is future work.
- The CLI contract is shared across all agent variants so the same usage pattern works everywhere.

View File

@@ -0,0 +1,20 @@
{
"name": "atlassian-skill-scripts",
"version": "1.0.0",
"description": "Shared runtime for the Atlassian skill",
"type": "module",
"scripts": {
"atlassian": "tsx src/cli.ts",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"commander": "^13.1.0",
"dotenv": "^16.4.7"
},
"devDependencies": {
"@types/node": "^24.3.0",
"tsx": "^4.20.5",
"typescript": "^5.9.2"
},
"packageManager": "pnpm@10.18.1+sha512.77a884a165cbba2d8d1c19e3b4880eee6d2fcabd0d879121e282196b80042351d5eb3ca0935fa599da1dc51265cc68816ad2bddd2a2de5ea9fdf92adbec7cd34"
}

View File

@@ -0,0 +1,361 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
importers:
.:
dependencies:
commander:
specifier: ^13.1.0
version: 13.1.0
dotenv:
specifier: ^16.4.7
version: 16.6.1
devDependencies:
'@types/node':
specifier: ^24.3.0
version: 24.12.0
tsx:
specifier: ^4.20.5
version: 4.21.0
typescript:
specifier: ^5.9.2
version: 5.9.3
packages:
'@esbuild/aix-ppc64@0.27.3':
resolution: {integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [aix]
'@esbuild/android-arm64@0.27.3':
resolution: {integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [android]
'@esbuild/android-arm@0.27.3':
resolution: {integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==}
engines: {node: '>=18'}
cpu: [arm]
os: [android]
'@esbuild/android-x64@0.27.3':
resolution: {integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [android]
'@esbuild/darwin-arm64@0.27.3':
resolution: {integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [darwin]
'@esbuild/darwin-x64@0.27.3':
resolution: {integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==}
engines: {node: '>=18'}
cpu: [x64]
os: [darwin]
'@esbuild/freebsd-arm64@0.27.3':
resolution: {integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==}
engines: {node: '>=18'}
cpu: [arm64]
os: [freebsd]
'@esbuild/freebsd-x64@0.27.3':
resolution: {integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==}
engines: {node: '>=18'}
cpu: [x64]
os: [freebsd]
'@esbuild/linux-arm64@0.27.3':
resolution: {integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [linux]
'@esbuild/linux-arm@0.27.3':
resolution: {integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==}
engines: {node: '>=18'}
cpu: [arm]
os: [linux]
'@esbuild/linux-ia32@0.27.3':
resolution: {integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==}
engines: {node: '>=18'}
cpu: [ia32]
os: [linux]
'@esbuild/linux-loong64@0.27.3':
resolution: {integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==}
engines: {node: '>=18'}
cpu: [loong64]
os: [linux]
'@esbuild/linux-mips64el@0.27.3':
resolution: {integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==}
engines: {node: '>=18'}
cpu: [mips64el]
os: [linux]
'@esbuild/linux-ppc64@0.27.3':
resolution: {integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [linux]
'@esbuild/linux-riscv64@0.27.3':
resolution: {integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==}
engines: {node: '>=18'}
cpu: [riscv64]
os: [linux]
'@esbuild/linux-s390x@0.27.3':
resolution: {integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==}
engines: {node: '>=18'}
cpu: [s390x]
os: [linux]
'@esbuild/linux-x64@0.27.3':
resolution: {integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==}
engines: {node: '>=18'}
cpu: [x64]
os: [linux]
'@esbuild/netbsd-arm64@0.27.3':
resolution: {integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [netbsd]
'@esbuild/netbsd-x64@0.27.3':
resolution: {integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==}
engines: {node: '>=18'}
cpu: [x64]
os: [netbsd]
'@esbuild/openbsd-arm64@0.27.3':
resolution: {integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openbsd]
'@esbuild/openbsd-x64@0.27.3':
resolution: {integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [openbsd]
'@esbuild/openharmony-arm64@0.27.3':
resolution: {integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openharmony]
'@esbuild/sunos-x64@0.27.3':
resolution: {integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==}
engines: {node: '>=18'}
cpu: [x64]
os: [sunos]
'@esbuild/win32-arm64@0.27.3':
resolution: {integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [win32]
'@esbuild/win32-ia32@0.27.3':
resolution: {integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==}
engines: {node: '>=18'}
cpu: [ia32]
os: [win32]
'@esbuild/win32-x64@0.27.3':
resolution: {integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==}
engines: {node: '>=18'}
cpu: [x64]
os: [win32]
'@types/node@24.12.0':
resolution: {integrity: sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==}
commander@13.1.0:
resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==}
engines: {node: '>=18'}
dotenv@16.6.1:
resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==}
engines: {node: '>=12'}
esbuild@0.27.3:
resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==}
engines: {node: '>=18'}
hasBin: true
fsevents@2.3.3:
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
get-tsconfig@4.13.6:
resolution: {integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==}
resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
tsx@4.21.0:
resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==}
engines: {node: '>=18.0.0'}
hasBin: true
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
hasBin: true
undici-types@7.16.0:
resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==}
snapshots:
'@esbuild/aix-ppc64@0.27.3':
optional: true
'@esbuild/android-arm64@0.27.3':
optional: true
'@esbuild/android-arm@0.27.3':
optional: true
'@esbuild/android-x64@0.27.3':
optional: true
'@esbuild/darwin-arm64@0.27.3':
optional: true
'@esbuild/darwin-x64@0.27.3':
optional: true
'@esbuild/freebsd-arm64@0.27.3':
optional: true
'@esbuild/freebsd-x64@0.27.3':
optional: true
'@esbuild/linux-arm64@0.27.3':
optional: true
'@esbuild/linux-arm@0.27.3':
optional: true
'@esbuild/linux-ia32@0.27.3':
optional: true
'@esbuild/linux-loong64@0.27.3':
optional: true
'@esbuild/linux-mips64el@0.27.3':
optional: true
'@esbuild/linux-ppc64@0.27.3':
optional: true
'@esbuild/linux-riscv64@0.27.3':
optional: true
'@esbuild/linux-s390x@0.27.3':
optional: true
'@esbuild/linux-x64@0.27.3':
optional: true
'@esbuild/netbsd-arm64@0.27.3':
optional: true
'@esbuild/netbsd-x64@0.27.3':
optional: true
'@esbuild/openbsd-arm64@0.27.3':
optional: true
'@esbuild/openbsd-x64@0.27.3':
optional: true
'@esbuild/openharmony-arm64@0.27.3':
optional: true
'@esbuild/sunos-x64@0.27.3':
optional: true
'@esbuild/win32-arm64@0.27.3':
optional: true
'@esbuild/win32-ia32@0.27.3':
optional: true
'@esbuild/win32-x64@0.27.3':
optional: true
'@types/node@24.12.0':
dependencies:
undici-types: 7.16.0
commander@13.1.0: {}
dotenv@16.6.1: {}
esbuild@0.27.3:
optionalDependencies:
'@esbuild/aix-ppc64': 0.27.3
'@esbuild/android-arm': 0.27.3
'@esbuild/android-arm64': 0.27.3
'@esbuild/android-x64': 0.27.3
'@esbuild/darwin-arm64': 0.27.3
'@esbuild/darwin-x64': 0.27.3
'@esbuild/freebsd-arm64': 0.27.3
'@esbuild/freebsd-x64': 0.27.3
'@esbuild/linux-arm': 0.27.3
'@esbuild/linux-arm64': 0.27.3
'@esbuild/linux-ia32': 0.27.3
'@esbuild/linux-loong64': 0.27.3
'@esbuild/linux-mips64el': 0.27.3
'@esbuild/linux-ppc64': 0.27.3
'@esbuild/linux-riscv64': 0.27.3
'@esbuild/linux-s390x': 0.27.3
'@esbuild/linux-x64': 0.27.3
'@esbuild/netbsd-arm64': 0.27.3
'@esbuild/netbsd-x64': 0.27.3
'@esbuild/openbsd-arm64': 0.27.3
'@esbuild/openbsd-x64': 0.27.3
'@esbuild/openharmony-arm64': 0.27.3
'@esbuild/sunos-x64': 0.27.3
'@esbuild/win32-arm64': 0.27.3
'@esbuild/win32-ia32': 0.27.3
'@esbuild/win32-x64': 0.27.3
fsevents@2.3.3:
optional: true
get-tsconfig@4.13.6:
dependencies:
resolve-pkg-maps: 1.0.0
resolve-pkg-maps@1.0.0: {}
tsx@4.21.0:
dependencies:
esbuild: 0.27.3
get-tsconfig: 4.13.6
optionalDependencies:
fsevents: 2.3.3
typescript@5.9.3: {}
undici-types@7.16.0: {}

View File

@@ -0,0 +1,92 @@
const TEXT_NODE = "text";
function textNode(text: string) {
return {
type: TEXT_NODE,
text,
};
}
function paragraphNode(lines: string[]) {
const content: Array<{ type: string; text?: string }> = [];
lines.forEach((line, index) => {
if (index > 0) {
content.push({ type: "hardBreak" });
}
if (line.length > 0) {
content.push(textNode(line));
}
});
return {
type: "paragraph",
...(content.length > 0 ? { content } : {}),
};
}
export function markdownToAdf(input: string) {
const lines = input.replace(/\r\n/g, "\n").split("\n");
const content: Array<Record<string, unknown>> = [];
let index = 0;
while (index < lines.length) {
const current = lines[index]?.trimEnd() ?? "";
if (current.trim().length === 0) {
index += 1;
continue;
}
const heading = current.match(/^(#{1,6})\s+(.*)$/);
if (heading) {
content.push({
type: "heading",
attrs: { level: heading[1].length },
content: [textNode(heading[2])],
});
index += 1;
continue;
}
if (/^[-*]\s+/.test(current)) {
const items: Array<Record<string, unknown>> = [];
while (index < lines.length && /^[-*]\s+/.test(lines[index] ?? "")) {
items.push({
type: "listItem",
content: [
{
type: "paragraph",
content: [textNode((lines[index] ?? "").replace(/^[-*]\s+/, ""))],
},
],
});
index += 1;
}
content.push({
type: "bulletList",
content: items,
});
continue;
}
const paragraphLines: string[] = [];
while (index < lines.length && (lines[index]?.trim().length ?? 0) > 0) {
paragraphLines.push(lines[index] ?? "");
index += 1;
}
content.push(paragraphNode(paragraphLines));
}
return {
type: "doc",
version: 1,
content,
};
}

View File

@@ -0,0 +1,332 @@
import process from "node:process";
import { pathToFileURL } from "node:url";
import { Command } from "commander";
import { createConfluenceClient } from "./confluence.js";
import { loadConfig } from "./config.js";
import { readWorkspaceFile } from "./files.js";
import { runHealthCheck } from "./health.js";
import { createJiraClient } from "./jira.js";
import { writeOutput } from "./output.js";
import { runRawCommand } from "./raw.js";
import type { FetchLike, OutputFormat, Writer } from "./types.js";
type CliContext = {
cwd?: string;
env?: NodeJS.ProcessEnv;
fetchImpl?: FetchLike;
stdout?: Writer;
stderr?: Writer;
};
function resolveFormat(format: string | undefined): OutputFormat {
return format === "text" ? "text" : "json";
}
function createRuntime(context: CliContext) {
const cwd = context.cwd ?? process.cwd();
const env = context.env ?? process.env;
const stdout = context.stdout ?? process.stdout;
const stderr = context.stderr ?? process.stderr;
let configCache: ReturnType<typeof loadConfig> | undefined;
let jiraCache: ReturnType<typeof createJiraClient> | undefined;
let confluenceCache: ReturnType<typeof createConfluenceClient> | undefined;
function getConfig() {
configCache ??= loadConfig(env, { cwd });
return configCache;
}
function getJiraClient() {
jiraCache ??= createJiraClient({
config: getConfig(),
fetchImpl: context.fetchImpl,
});
return jiraCache;
}
function getConfluenceClient() {
confluenceCache ??= createConfluenceClient({
config: getConfig(),
fetchImpl: context.fetchImpl,
});
return confluenceCache;
}
async function readBodyFile(filePath: string | undefined) {
if (!filePath) {
return undefined;
}
return readWorkspaceFile(filePath, cwd);
}
return {
cwd,
stdout,
stderr,
readBodyFile,
getConfig,
getJiraClient,
getConfluenceClient,
fetchImpl: context.fetchImpl,
};
}
export function buildProgram(context: CliContext = {}) {
const runtime = createRuntime(context);
const program = new Command()
.name("atlassian")
.description("Portable Atlassian CLI for multi-agent skills")
.version("0.1.0");
program
.command("health")
.description("Validate configuration and Atlassian connectivity")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runHealthCheck(runtime.getConfig(), runtime.fetchImpl);
writeOutput(
runtime.stdout,
payload,
resolveFormat(options.format),
);
});
program
.command("conf-search")
.requiredOption("--query <query>", "CQL search query")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Result offset", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().searchPages({
query: options.query,
maxResults: Number(options.maxResults),
startAt: Number(options.startAt),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-get")
.requiredOption("--page <page>", "Confluence page ID")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().getPage(options.page);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-create")
.requiredOption("--title <title>", "Confluence page title")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--space <space>", "Confluence space ID")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().createPage({
space: options.space,
title: options.title,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-update")
.requiredOption("--page <page>", "Confluence page ID")
.requiredOption("--title <title>", "Confluence page title")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().updatePage({
pageId: options.page,
title: options.title,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-comment")
.requiredOption("--page <page>", "Confluence page ID")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().commentPage({
pageId: options.page,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-children")
.requiredOption("--page <page>", "Confluence page ID")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Cursor/start token", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().listChildren(
options.page,
Number(options.maxResults),
Number(options.startAt),
);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("raw")
.requiredOption("--product <product>", "jira or confluence")
.requiredOption("--method <method>", "GET, POST, or PUT")
.requiredOption("--path <path>", "Validated API path")
.option("--body-file <path>", "Workspace-relative JSON file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runRawCommand(runtime.getConfig(), runtime.fetchImpl, {
product: options.product,
method: String(options.method).toUpperCase(),
path: options.path,
bodyFile: options.bodyFile,
cwd: runtime.cwd,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-search")
.requiredOption("--jql <jql>", "JQL expression to execute")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Result offset", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().searchIssues({
jql: options.jql,
maxResults: Number(options.maxResults),
startAt: Number(options.startAt),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-get")
.requiredOption("--issue <issue>", "Issue key")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().getIssue(options.issue);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-create")
.requiredOption("--type <type>", "Issue type name")
.requiredOption("--summary <summary>", "Issue summary")
.option("--project <project>", "Project key")
.option("--description-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().createIssue({
project: options.project,
type: options.type,
summary: options.summary,
description: await runtime.readBodyFile(options.descriptionFile),
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-update")
.requiredOption("--issue <issue>", "Issue key")
.option("--summary <summary>", "Updated summary")
.option("--description-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().updateIssue({
issue: options.issue,
summary: options.summary,
description: await runtime.readBodyFile(options.descriptionFile),
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-comment")
.requiredOption("--issue <issue>", "Issue key")
.requiredOption("--body-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().commentIssue({
issue: options.issue,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-transitions")
.requiredOption("--issue <issue>", "Issue key")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().getTransitions(options.issue);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-transition")
.requiredOption("--issue <issue>", "Issue key")
.requiredOption("--transition <transition>", "Transition ID")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().transitionIssue({
issue: options.issue,
transition: options.transition,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
return program;
}
export async function runCli(argv = process.argv, context: CliContext = {}) {
const program = buildProgram(context);
await program.parseAsync(argv);
}
const isDirectExecution =
Boolean(process.argv[1]) && import.meta.url === pathToFileURL(process.argv[1]).href;
if (isDirectExecution) {
runCli().catch((error: unknown) => {
const message = error instanceof Error ? error.message : String(error);
process.stderr.write(`${message}\n`);
process.exitCode = 1;
});
}

View File

@@ -0,0 +1,52 @@
import path from "node:path";
import { config as loadDotEnv } from "dotenv";
import type { AtlassianConfig } from "./types.js";
function normalizeBaseUrl(value: string) {
return value.replace(/\/+$/, "");
}
function readRequired(env: NodeJS.ProcessEnv, key: string) {
const value = env[key]?.trim();
if (!value) {
throw new Error(`Missing required environment variable: ${key}`);
}
return value;
}
export function loadConfig(
env: NodeJS.ProcessEnv = process.env,
options?: {
cwd?: string;
},
): AtlassianConfig {
loadDotEnv({
path: path.resolve(options?.cwd ?? process.cwd(), ".env"),
processEnv: env as Record<string, string>,
override: false,
});
const baseUrl = normalizeBaseUrl(readRequired(env, "ATLASSIAN_BASE_URL"));
return {
baseUrl,
jiraBaseUrl: normalizeBaseUrl(env.ATLASSIAN_JIRA_BASE_URL?.trim() || baseUrl),
confluenceBaseUrl: normalizeBaseUrl(env.ATLASSIAN_CONFLUENCE_BASE_URL?.trim() || baseUrl),
email: readRequired(env, "ATLASSIAN_EMAIL"),
apiToken: readRequired(env, "ATLASSIAN_API_TOKEN"),
defaultProject: env.ATLASSIAN_DEFAULT_PROJECT?.trim() || undefined,
defaultSpace: env.ATLASSIAN_DEFAULT_SPACE?.trim() || undefined,
};
}
export function createBasicAuthHeader(config: {
email: string;
apiToken: string;
[key: string]: unknown;
}) {
return `Basic ${Buffer.from(`${config.email}:${config.apiToken}`).toString("base64")}`;
}

View File

@@ -0,0 +1,292 @@
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
type ConfluenceClientOptions = {
config: AtlassianConfig;
fetchImpl?: FetchLike;
};
type SearchInput = {
query: string;
maxResults: number;
startAt: number;
};
type CreateInput = {
space?: string;
title: string;
body: string;
dryRun?: boolean;
};
type UpdateInput = {
pageId: string;
title: string;
body: string;
dryRun?: boolean;
};
type CommentInput = {
pageId: string;
body: string;
dryRun?: boolean;
};
type PageSummary = {
id: string;
title: string;
type: string;
status?: string;
spaceId?: string;
url?: string;
};
function buildUrl(baseUrl: string, path: string) {
return new URL(path, `${baseUrl}/`).toString();
}
function normalizePage(baseUrl: string, page: Record<string, unknown>, excerpt?: string) {
const links = (page._links ?? {}) as Record<string, unknown>;
return {
id: String(page.id ?? ""),
title: String(page.title ?? ""),
type: String(page.type ?? "page"),
...(page.status ? { status: String(page.status) } : {}),
...(page.spaceId ? { spaceId: String(page.spaceId) } : {}),
...(excerpt ? { excerpt } : {}),
...(links.webui ? { url: `${baseUrl}${String(links.webui)}` } : {}),
};
}
export function createConfluenceClient(options: ConfluenceClientOptions) {
const config = options.config;
async function getPageForUpdate(pageId: string) {
return (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${pageId}?body-format=storage`),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
}
return {
async searchPages(input: SearchInput): Promise<CommandOutput<unknown>> {
const url = new URL("/wiki/rest/api/search", `${config.confluenceBaseUrl}/`);
url.searchParams.set("cql", input.query);
url.searchParams.set("limit", String(input.maxResults));
url.searchParams.set("start", String(input.startAt));
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: url.toString(),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const results = Array.isArray(raw.results) ? raw.results : [];
return {
ok: true,
data: {
pages: results.map((entry) => {
const result = entry as Record<string, unknown>;
return normalizePage(
config.baseUrl,
(result.content ?? {}) as Record<string, unknown>,
result.excerpt ? String(result.excerpt) : undefined,
);
}),
startAt: Number(raw.start ?? input.startAt),
maxResults: Number(raw.limit ?? input.maxResults),
total: Number(raw.totalSize ?? raw.size ?? results.length),
},
};
},
async getPage(pageId: string): Promise<CommandOutput<unknown>> {
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${pageId}?body-format=storage`),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const body = ((raw.body ?? {}) as Record<string, unknown>).storage as Record<string, unknown> | undefined;
return {
ok: true,
data: {
page: {
...normalizePage(config.baseUrl, raw),
version: Number((((raw.version ?? {}) as Record<string, unknown>).number ?? 0)),
body: body?.value ? String(body.value) : "",
},
},
raw,
};
},
async listChildren(pageId: string, maxResults: number, startAt: number): Promise<CommandOutput<unknown>> {
const url = new URL(`/wiki/api/v2/pages/${pageId}/direct-children`, `${config.confluenceBaseUrl}/`);
url.searchParams.set("limit", String(maxResults));
url.searchParams.set("cursor", String(startAt));
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: url.toString(),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const results = Array.isArray(raw.results) ? raw.results : [];
const links = (raw._links ?? {}) as Record<string, unknown>;
return {
ok: true,
data: {
pages: results.map((page) => normalizePage(config.baseUrl, page as Record<string, unknown>)),
nextCursor: links.next ? String(links.next) : null,
},
};
},
async createPage(input: CreateInput): Promise<CommandOutput<unknown>> {
const spaceId = input.space || config.defaultSpace;
if (!spaceId) {
throw new Error("conf-create requires --space or ATLASSIAN_DEFAULT_SPACE");
}
const request = {
method: "POST" as const,
url: buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/pages"),
body: {
spaceId,
title: input.title,
status: "current",
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
});
return {
ok: true,
data: raw,
};
},
async updatePage(input: UpdateInput): Promise<CommandOutput<unknown>> {
const currentPage = await getPageForUpdate(input.pageId);
const version = (((currentPage.version ?? {}) as Record<string, unknown>).number ?? 0) as number;
const spaceId = String(currentPage.spaceId ?? "");
const request = {
method: "PUT" as const,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${input.pageId}`),
body: {
id: input.pageId,
status: String(currentPage.status ?? "current"),
title: input.title,
spaceId,
version: {
number: Number(version) + 1,
},
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
handleResponseError(response) {
if (response.status === 409) {
return new Error(`Confluence update conflict: page ${input.pageId} was updated by someone else`);
}
return undefined;
},
});
return {
ok: true,
data: raw,
};
},
async commentPage(input: CommentInput): Promise<CommandOutput<unknown>> {
const request = {
method: "POST" as const,
url: buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/footer-comments"),
body: {
pageId: input.pageId,
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
});
return {
ok: true,
data: raw,
};
},
};
}

View File

@@ -0,0 +1,13 @@
import { readFile } from "node:fs/promises";
import path from "node:path";
export async function readWorkspaceFile(filePath: string, cwd: string) {
const resolved = path.resolve(cwd, filePath);
const relative = path.relative(cwd, resolved);
if (relative.startsWith("..") || path.isAbsolute(relative)) {
throw new Error(`--body-file must stay within the active workspace: ${filePath}`);
}
return readFile(resolved, "utf8");
}

View File

@@ -0,0 +1,69 @@
import { createJsonHeaders, createStatusError } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
type ProductHealth = {
ok: boolean;
status?: number;
message?: string;
};
function buildUrl(baseUrl: string, path: string) {
return new URL(path, `${baseUrl}/`).toString();
}
export async function runHealthCheck(
config: AtlassianConfig,
fetchImpl: FetchLike | undefined,
): Promise<CommandOutput<unknown>> {
const client = fetchImpl ?? globalThis.fetch;
if (!client) {
throw new Error("Fetch API is not available in this runtime");
}
async function probe(product: "Jira" | "Confluence", url: string): Promise<ProductHealth> {
try {
const response = await client(url, {
method: "GET",
headers: createJsonHeaders(config, false),
});
if (!response.ok) {
const error = createStatusError(`${product} health check failed`, response);
return {
ok: false,
status: response.status,
message: error.message,
};
}
return {
ok: true,
status: response.status,
};
} catch (error: unknown) {
return {
ok: false,
message: error instanceof Error ? error.message : String(error),
};
}
}
const jira = await probe("Jira", buildUrl(config.jiraBaseUrl, "/rest/api/3/myself"));
const confluence = await probe("Confluence", buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/spaces?limit=1"));
return {
ok: jira.ok && confluence.ok,
data: {
baseUrl: config.baseUrl,
jiraBaseUrl: config.jiraBaseUrl,
confluenceBaseUrl: config.confluenceBaseUrl,
defaultProject: config.defaultProject,
defaultSpace: config.defaultSpace,
products: {
jira,
confluence,
},
},
};
}

View File

@@ -0,0 +1,86 @@
import { createBasicAuthHeader } from "./config.js";
import type { AtlassianConfig, FetchLike } from "./types.js";
export type HttpMethod = "GET" | "POST" | "PUT";
export function createJsonHeaders(config: AtlassianConfig, includeJsonBody: boolean) {
const headers: Array<[string, string]> = [
["Accept", "application/json"],
["Authorization", createBasicAuthHeader(config)],
];
if (includeJsonBody) {
headers.push(["Content-Type", "application/json"]);
}
return headers;
}
export async function parseResponse(response: Response) {
if (response.status === 204) {
return null;
}
const contentType = response.headers.get("content-type") ?? "";
if (contentType.includes("application/json")) {
try {
return await response.json();
} catch {
throw new Error("Malformed JSON response from Atlassian API");
}
}
return response.text();
}
export function createStatusError(errorPrefix: string, response: Response) {
const base = `${errorPrefix}: ${response.status} ${response.statusText}`;
switch (response.status) {
case 401:
return new Error(`${base} - check ATLASSIAN_EMAIL and ATLASSIAN_API_TOKEN`);
case 403:
return new Error(`${base} - verify product permissions for this account`);
case 404:
return new Error(`${base} - verify the resource identifier or API path`);
case 429:
return new Error(`${base} - retry later or reduce request rate`);
default:
return new Error(base);
}
}
export async function sendJsonRequest(options: {
config: AtlassianConfig;
fetchImpl?: FetchLike;
url: string;
method: HttpMethod;
body?: unknown;
errorPrefix: string;
handleResponseError?: (response: Response) => Error | undefined;
}) {
const fetchImpl = options.fetchImpl ?? globalThis.fetch;
if (!fetchImpl) {
throw new Error("Fetch API is not available in this runtime");
}
const response = await fetchImpl(options.url, {
method: options.method,
headers: createJsonHeaders(options.config, options.body !== undefined),
...(options.body === undefined ? {} : { body: JSON.stringify(options.body) }),
});
if (!response.ok) {
const customError = options.handleResponseError?.(response);
if (customError) {
throw customError;
}
throw createStatusError(options.errorPrefix, response);
}
return parseResponse(response);
}

View File

@@ -0,0 +1,264 @@
import { markdownToAdf } from "./adf.js";
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike, JiraIssueSummary } from "./types.js";
const ISSUE_FIELDS = ["summary", "issuetype", "status", "assignee", "created", "updated"] as const;
type JiraClientOptions = {
config: AtlassianConfig;
fetchImpl?: FetchLike;
};
type SearchInput = {
jql: string;
maxResults: number;
startAt: number;
};
type CreateInput = {
project?: string;
type: string;
summary: string;
description?: string;
dryRun?: boolean;
};
type UpdateInput = {
issue: string;
summary?: string;
description?: string;
dryRun?: boolean;
};
type CommentInput = {
issue: string;
body: string;
dryRun?: boolean;
};
type TransitionInput = {
issue: string;
transition: string;
dryRun?: boolean;
};
function normalizeIssue(config: AtlassianConfig, issue: Record<string, unknown>): JiraIssueSummary {
const fields = (issue.fields ?? {}) as Record<string, unknown>;
const issueType = (fields.issuetype ?? {}) as Record<string, unknown>;
const status = (fields.status ?? {}) as Record<string, unknown>;
const assignee = (fields.assignee ?? {}) as Record<string, unknown>;
return {
key: String(issue.key ?? ""),
summary: String(fields.summary ?? ""),
issueType: String(issueType.name ?? ""),
status: String(status.name ?? ""),
assignee: assignee.displayName ? String(assignee.displayName) : undefined,
created: String(fields.created ?? ""),
updated: String(fields.updated ?? ""),
url: `${config.baseUrl}/browse/${issue.key ?? ""}`,
};
}
function createRequest(config: AtlassianConfig, method: "GET" | "POST" | "PUT", path: string, body?: unknown) {
const url = new URL(path, `${config.jiraBaseUrl}/`);
return {
method,
url: url.toString(),
...(body === undefined ? {} : { body }),
};
}
export function createJiraClient(options: JiraClientOptions) {
const fetchImpl = options.fetchImpl ?? globalThis.fetch;
if (!fetchImpl) {
throw new Error("Fetch API is not available in this runtime");
}
async function send(method: "GET" | "POST" | "PUT", path: string, body?: unknown) {
const request = createRequest(options.config, method, path, body);
return sendJsonRequest({
config: options.config,
fetchImpl,
url: request.url,
method,
body,
errorPrefix: "Jira request failed",
});
}
return {
async searchIssues(input: SearchInput): Promise<CommandOutput<unknown>> {
const raw = (await send("POST", "/rest/api/3/search", {
jql: input.jql,
maxResults: input.maxResults,
startAt: input.startAt,
fields: [...ISSUE_FIELDS],
})) as Record<string, unknown>;
const issues = Array.isArray(raw.issues) ? raw.issues : [];
return {
ok: true,
data: {
issues: issues.map((issue) => normalizeIssue(options.config, issue as Record<string, unknown>)),
startAt: Number(raw.startAt ?? input.startAt),
maxResults: Number(raw.maxResults ?? input.maxResults),
total: Number(raw.total ?? issues.length),
},
};
},
async getIssue(issue: string): Promise<CommandOutput<unknown>> {
const url = new URL(`/rest/api/3/issue/${issue}`, `${options.config.jiraBaseUrl}/`);
url.searchParams.set("fields", ISSUE_FIELDS.join(","));
const raw = (await send("GET", `${url.pathname}${url.search}`)) as Record<string, unknown>;
return {
ok: true,
data: {
issue: normalizeIssue(options.config, raw),
},
raw,
};
},
async getTransitions(issue: string): Promise<CommandOutput<unknown>> {
const raw = (await send(
"GET",
`/rest/api/3/issue/${issue}/transitions`,
)) as { transitions?: Array<Record<string, unknown>> };
return {
ok: true,
data: {
transitions: (raw.transitions ?? []).map((transition) => ({
id: String(transition.id ?? ""),
name: String(transition.name ?? ""),
toStatus: String(((transition.to ?? {}) as Record<string, unknown>).name ?? ""),
hasScreen: Boolean(transition.hasScreen),
})),
},
};
},
async createIssue(input: CreateInput): Promise<CommandOutput<unknown>> {
const project = input.project || options.config.defaultProject;
if (!project) {
throw new Error("jira-create requires --project or ATLASSIAN_DEFAULT_PROJECT");
}
const request = createRequest(options.config, "POST", "/rest/api/3/issue", {
fields: {
project: { key: project },
issuetype: { name: input.type },
summary: input.summary,
...(input.description ? { description: markdownToAdf(input.description) } : {}),
},
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await send("POST", "/rest/api/3/issue", request.body);
return { ok: true, data: raw };
},
async updateIssue(input: UpdateInput): Promise<CommandOutput<unknown>> {
const fields: Record<string, unknown> = {};
if (input.summary) {
fields.summary = input.summary;
}
if (input.description) {
fields.description = markdownToAdf(input.description);
}
if (Object.keys(fields).length === 0) {
throw new Error("jira-update requires --summary and/or --description-file");
}
const request = createRequest(options.config, "PUT", `/rest/api/3/issue/${input.issue}`, {
fields,
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
await send("PUT", `/rest/api/3/issue/${input.issue}`, request.body);
return {
ok: true,
data: {
issue: input.issue,
updated: true,
},
};
},
async commentIssue(input: CommentInput): Promise<CommandOutput<unknown>> {
const request = createRequest(options.config, "POST", `/rest/api/3/issue/${input.issue}/comment`, {
body: markdownToAdf(input.body),
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await send("POST", `/rest/api/3/issue/${input.issue}/comment`, request.body);
return {
ok: true,
data: raw,
};
},
async transitionIssue(input: TransitionInput): Promise<CommandOutput<unknown>> {
const request = createRequest(
options.config,
"POST",
`/rest/api/3/issue/${input.issue}/transitions`,
{
transition: {
id: input.transition,
},
},
);
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
await send("POST", `/rest/api/3/issue/${input.issue}/transitions`, request.body);
return {
ok: true,
data: {
issue: input.issue,
transitioned: true,
transition: input.transition,
},
};
},
};
}

View File

@@ -0,0 +1,44 @@
import type { CommandOutput, OutputFormat, Writer } from "./types.js";
function renderText(payload: CommandOutput<unknown>) {
const data = payload.data as Record<string, unknown>;
if (Array.isArray(data?.issues)) {
return data.issues
.map((issue) => {
const item = issue as Record<string, string>;
return `${item.key} [${item.status}] ${item.issueType} - ${item.summary}`;
})
.join("\n");
}
if (data?.issue && typeof data.issue === "object") {
const issue = data.issue as Record<string, string>;
return [
issue.key,
`${issue.issueType} | ${issue.status}`,
issue.summary,
issue.url,
].join("\n");
}
if (Array.isArray(data?.transitions)) {
return data.transitions
.map((transition) => {
const item = transition as Record<string, string>;
return `${item.id} ${item.name} -> ${item.toStatus}`;
})
.join("\n");
}
return JSON.stringify(payload, null, 2);
}
export function writeOutput(
writer: Writer,
payload: CommandOutput<unknown>,
format: OutputFormat = "json",
) {
const body = format === "text" ? renderText(payload) : JSON.stringify(payload, null, 2);
writer.write(`${body}\n`);
}

View File

@@ -0,0 +1,85 @@
import { readWorkspaceFile } from "./files.js";
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
const JIRA_ALLOWED_PREFIXES = ["/rest/api/3/"] as const;
const CONFLUENCE_ALLOWED_PREFIXES = ["/wiki/api/v2/", "/wiki/rest/api/"] as const;
type RawInput = {
product: "jira" | "confluence";
method: string;
path: string;
bodyFile?: string;
cwd: string;
dryRun?: boolean;
};
function getAllowedPrefixes(product: RawInput["product"]) {
return product === "jira" ? JIRA_ALLOWED_PREFIXES : CONFLUENCE_ALLOWED_PREFIXES;
}
function buildUrl(config: AtlassianConfig, product: RawInput["product"], path: string) {
const baseUrl = product === "jira" ? config.jiraBaseUrl : config.confluenceBaseUrl;
return new URL(path, `${baseUrl}/`).toString();
}
function validateMethod(method: string): asserts method is "GET" | "POST" | "PUT" {
if (!["GET", "POST", "PUT"].includes(method)) {
throw new Error("raw only allows GET, POST, and PUT");
}
}
function validatePath(product: RawInput["product"], path: string) {
const allowedPrefixes = getAllowedPrefixes(product);
if (!allowedPrefixes.some((prefix) => path.startsWith(prefix))) {
throw new Error(`raw path is not allowed for ${product}: ${path}`);
}
}
async function readRawBody(bodyFile: string | undefined, cwd: string) {
if (!bodyFile) {
return undefined;
}
const contents = await readWorkspaceFile(bodyFile, cwd);
return JSON.parse(contents) as unknown;
}
export async function runRawCommand(
config: AtlassianConfig,
fetchImpl: FetchLike | undefined,
input: RawInput,
): Promise<CommandOutput<unknown>> {
validateMethod(input.method);
validatePath(input.product, input.path);
const body = await readRawBody(input.bodyFile, input.cwd);
const request = {
method: input.method,
url: buildUrl(config, input.product, input.path),
...(body === undefined ? {} : { body }),
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const data = await sendJsonRequest({
config,
fetchImpl,
url: request.url,
method: input.method,
body,
errorPrefix: "Raw request failed",
});
return {
ok: true,
data,
};
}

View File

@@ -0,0 +1,35 @@
export type AtlassianConfig = {
baseUrl: string;
jiraBaseUrl: string;
confluenceBaseUrl: string;
email: string;
apiToken: string;
defaultProject?: string;
defaultSpace?: string;
};
export type CommandOutput<T> = {
ok: boolean;
data: T;
dryRun?: boolean;
raw?: unknown;
};
export type JiraIssueSummary = {
key: string;
summary: string;
issueType: string;
status: string;
assignee?: string;
created: string;
updated: string;
url: string;
};
export type Writer = {
write(chunk: string | Uint8Array): unknown;
};
export type FetchLike = typeof fetch;
export type OutputFormat = "json" | "text";

View File

@@ -0,0 +1,15 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": true,
"skipLibCheck": true,
"resolveJsonModule": true,
"types": ["node"],
"outDir": "dist"
},
"include": ["src/**/*.ts", "scripts/**/*.ts", "tests/**/*.ts"]
}

View File

@@ -0,0 +1,22 @@
{
"name": "atlassian-skill-scripts",
"version": "1.0.0",
"description": "Shared runtime for the Atlassian skill",
"type": "module",
"scripts": {
"atlassian": "tsx src/cli.ts",
"test": "node --import tsx --test tests/*.test.ts",
"typecheck": "tsc --noEmit",
"sync:agents": "tsx scripts/sync-agents.ts"
},
"dependencies": {
"commander": "^13.1.0",
"dotenv": "^16.4.7"
},
"devDependencies": {
"@types/node": "^24.3.0",
"tsx": "^4.20.5",
"typescript": "^5.9.2"
},
"packageManager": "pnpm@10.18.1+sha512.77a884a165cbba2d8d1c19e3b4880eee6d2fcabd0d879121e282196b80042351d5eb3ca0935fa599da1dc51265cc68816ad2bddd2a2de5ea9fdf92adbec7cd34"
}

View File

@@ -0,0 +1,361 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
importers:
.:
dependencies:
commander:
specifier: ^13.1.0
version: 13.1.0
dotenv:
specifier: ^16.4.7
version: 16.6.1
devDependencies:
'@types/node':
specifier: ^24.3.0
version: 24.12.0
tsx:
specifier: ^4.20.5
version: 4.21.0
typescript:
specifier: ^5.9.2
version: 5.9.3
packages:
'@esbuild/aix-ppc64@0.27.3':
resolution: {integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [aix]
'@esbuild/android-arm64@0.27.3':
resolution: {integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [android]
'@esbuild/android-arm@0.27.3':
resolution: {integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==}
engines: {node: '>=18'}
cpu: [arm]
os: [android]
'@esbuild/android-x64@0.27.3':
resolution: {integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [android]
'@esbuild/darwin-arm64@0.27.3':
resolution: {integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [darwin]
'@esbuild/darwin-x64@0.27.3':
resolution: {integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==}
engines: {node: '>=18'}
cpu: [x64]
os: [darwin]
'@esbuild/freebsd-arm64@0.27.3':
resolution: {integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==}
engines: {node: '>=18'}
cpu: [arm64]
os: [freebsd]
'@esbuild/freebsd-x64@0.27.3':
resolution: {integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==}
engines: {node: '>=18'}
cpu: [x64]
os: [freebsd]
'@esbuild/linux-arm64@0.27.3':
resolution: {integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==}
engines: {node: '>=18'}
cpu: [arm64]
os: [linux]
'@esbuild/linux-arm@0.27.3':
resolution: {integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==}
engines: {node: '>=18'}
cpu: [arm]
os: [linux]
'@esbuild/linux-ia32@0.27.3':
resolution: {integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==}
engines: {node: '>=18'}
cpu: [ia32]
os: [linux]
'@esbuild/linux-loong64@0.27.3':
resolution: {integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==}
engines: {node: '>=18'}
cpu: [loong64]
os: [linux]
'@esbuild/linux-mips64el@0.27.3':
resolution: {integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==}
engines: {node: '>=18'}
cpu: [mips64el]
os: [linux]
'@esbuild/linux-ppc64@0.27.3':
resolution: {integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==}
engines: {node: '>=18'}
cpu: [ppc64]
os: [linux]
'@esbuild/linux-riscv64@0.27.3':
resolution: {integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==}
engines: {node: '>=18'}
cpu: [riscv64]
os: [linux]
'@esbuild/linux-s390x@0.27.3':
resolution: {integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==}
engines: {node: '>=18'}
cpu: [s390x]
os: [linux]
'@esbuild/linux-x64@0.27.3':
resolution: {integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==}
engines: {node: '>=18'}
cpu: [x64]
os: [linux]
'@esbuild/netbsd-arm64@0.27.3':
resolution: {integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [netbsd]
'@esbuild/netbsd-x64@0.27.3':
resolution: {integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==}
engines: {node: '>=18'}
cpu: [x64]
os: [netbsd]
'@esbuild/openbsd-arm64@0.27.3':
resolution: {integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openbsd]
'@esbuild/openbsd-x64@0.27.3':
resolution: {integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==}
engines: {node: '>=18'}
cpu: [x64]
os: [openbsd]
'@esbuild/openharmony-arm64@0.27.3':
resolution: {integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==}
engines: {node: '>=18'}
cpu: [arm64]
os: [openharmony]
'@esbuild/sunos-x64@0.27.3':
resolution: {integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==}
engines: {node: '>=18'}
cpu: [x64]
os: [sunos]
'@esbuild/win32-arm64@0.27.3':
resolution: {integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==}
engines: {node: '>=18'}
cpu: [arm64]
os: [win32]
'@esbuild/win32-ia32@0.27.3':
resolution: {integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==}
engines: {node: '>=18'}
cpu: [ia32]
os: [win32]
'@esbuild/win32-x64@0.27.3':
resolution: {integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==}
engines: {node: '>=18'}
cpu: [x64]
os: [win32]
'@types/node@24.12.0':
resolution: {integrity: sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==}
commander@13.1.0:
resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==}
engines: {node: '>=18'}
dotenv@16.6.1:
resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==}
engines: {node: '>=12'}
esbuild@0.27.3:
resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==}
engines: {node: '>=18'}
hasBin: true
fsevents@2.3.3:
resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
get-tsconfig@4.13.6:
resolution: {integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==}
resolve-pkg-maps@1.0.0:
resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==}
tsx@4.21.0:
resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==}
engines: {node: '>=18.0.0'}
hasBin: true
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
hasBin: true
undici-types@7.16.0:
resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==}
snapshots:
'@esbuild/aix-ppc64@0.27.3':
optional: true
'@esbuild/android-arm64@0.27.3':
optional: true
'@esbuild/android-arm@0.27.3':
optional: true
'@esbuild/android-x64@0.27.3':
optional: true
'@esbuild/darwin-arm64@0.27.3':
optional: true
'@esbuild/darwin-x64@0.27.3':
optional: true
'@esbuild/freebsd-arm64@0.27.3':
optional: true
'@esbuild/freebsd-x64@0.27.3':
optional: true
'@esbuild/linux-arm64@0.27.3':
optional: true
'@esbuild/linux-arm@0.27.3':
optional: true
'@esbuild/linux-ia32@0.27.3':
optional: true
'@esbuild/linux-loong64@0.27.3':
optional: true
'@esbuild/linux-mips64el@0.27.3':
optional: true
'@esbuild/linux-ppc64@0.27.3':
optional: true
'@esbuild/linux-riscv64@0.27.3':
optional: true
'@esbuild/linux-s390x@0.27.3':
optional: true
'@esbuild/linux-x64@0.27.3':
optional: true
'@esbuild/netbsd-arm64@0.27.3':
optional: true
'@esbuild/netbsd-x64@0.27.3':
optional: true
'@esbuild/openbsd-arm64@0.27.3':
optional: true
'@esbuild/openbsd-x64@0.27.3':
optional: true
'@esbuild/openharmony-arm64@0.27.3':
optional: true
'@esbuild/sunos-x64@0.27.3':
optional: true
'@esbuild/win32-arm64@0.27.3':
optional: true
'@esbuild/win32-ia32@0.27.3':
optional: true
'@esbuild/win32-x64@0.27.3':
optional: true
'@types/node@24.12.0':
dependencies:
undici-types: 7.16.0
commander@13.1.0: {}
dotenv@16.6.1: {}
esbuild@0.27.3:
optionalDependencies:
'@esbuild/aix-ppc64': 0.27.3
'@esbuild/android-arm': 0.27.3
'@esbuild/android-arm64': 0.27.3
'@esbuild/android-x64': 0.27.3
'@esbuild/darwin-arm64': 0.27.3
'@esbuild/darwin-x64': 0.27.3
'@esbuild/freebsd-arm64': 0.27.3
'@esbuild/freebsd-x64': 0.27.3
'@esbuild/linux-arm': 0.27.3
'@esbuild/linux-arm64': 0.27.3
'@esbuild/linux-ia32': 0.27.3
'@esbuild/linux-loong64': 0.27.3
'@esbuild/linux-mips64el': 0.27.3
'@esbuild/linux-ppc64': 0.27.3
'@esbuild/linux-riscv64': 0.27.3
'@esbuild/linux-s390x': 0.27.3
'@esbuild/linux-x64': 0.27.3
'@esbuild/netbsd-arm64': 0.27.3
'@esbuild/netbsd-x64': 0.27.3
'@esbuild/openbsd-arm64': 0.27.3
'@esbuild/openbsd-x64': 0.27.3
'@esbuild/openharmony-arm64': 0.27.3
'@esbuild/sunos-x64': 0.27.3
'@esbuild/win32-arm64': 0.27.3
'@esbuild/win32-ia32': 0.27.3
'@esbuild/win32-x64': 0.27.3
fsevents@2.3.3:
optional: true
get-tsconfig@4.13.6:
dependencies:
resolve-pkg-maps: 1.0.0
resolve-pkg-maps@1.0.0: {}
tsx@4.21.0:
dependencies:
esbuild: 0.27.3
get-tsconfig: 4.13.6
optionalDependencies:
fsevents: 2.3.3
typescript@5.9.3: {}
undici-types@7.16.0: {}

View File

@@ -0,0 +1,60 @@
import { cp, mkdir, readFile, rm, writeFile } from "node:fs/promises";
import path from "node:path";
import { fileURLToPath } from "node:url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const SHARED_SCRIPTS_DIR = path.resolve(__dirname, "..");
const ATLASSIAN_SKILL_DIR = path.resolve(SHARED_SCRIPTS_DIR, "..", "..");
const AGENTS = ["codex", "claude-code", "cursor", "opencode"] as const;
const ENTRIES_TO_COPY = ["pnpm-lock.yaml", "tsconfig.json", "src"] as const;
async function replaceEntry(source: string, target: string) {
await rm(target, { recursive: true, force: true });
await cp(source, target, { recursive: true });
}
async function syncAgent(agent: (typeof AGENTS)[number]) {
const targetScriptsDir = path.join(ATLASSIAN_SKILL_DIR, agent, "scripts");
await mkdir(targetScriptsDir, { recursive: true });
for (const entry of ENTRIES_TO_COPY) {
await replaceEntry(
path.join(SHARED_SCRIPTS_DIR, entry),
path.join(targetScriptsDir, entry),
);
}
const sourcePackageJson = JSON.parse(
await readFile(path.join(SHARED_SCRIPTS_DIR, "package.json"), "utf8"),
) as {
scripts?: Record<string, string>;
[key: string]: unknown;
};
sourcePackageJson.scripts = {
atlassian: sourcePackageJson.scripts?.atlassian ?? "tsx src/cli.ts",
typecheck: sourcePackageJson.scripts?.typecheck ?? "tsc --noEmit",
};
await writeFile(
path.join(targetScriptsDir, "package.json"),
`${JSON.stringify(sourcePackageJson, null, 2)}\n`,
"utf8",
);
}
async function main() {
for (const agent of AGENTS) {
await syncAgent(agent);
}
console.log(`Synced runtime bundle into ${AGENTS.length} agent script directories.`);
}
main().catch((error: unknown) => {
const message = error instanceof Error ? error.message : String(error);
console.error(message);
process.exitCode = 1;
});

View File

@@ -0,0 +1,92 @@
const TEXT_NODE = "text";
function textNode(text: string) {
return {
type: TEXT_NODE,
text,
};
}
function paragraphNode(lines: string[]) {
const content: Array<{ type: string; text?: string }> = [];
lines.forEach((line, index) => {
if (index > 0) {
content.push({ type: "hardBreak" });
}
if (line.length > 0) {
content.push(textNode(line));
}
});
return {
type: "paragraph",
...(content.length > 0 ? { content } : {}),
};
}
export function markdownToAdf(input: string) {
const lines = input.replace(/\r\n/g, "\n").split("\n");
const content: Array<Record<string, unknown>> = [];
let index = 0;
while (index < lines.length) {
const current = lines[index]?.trimEnd() ?? "";
if (current.trim().length === 0) {
index += 1;
continue;
}
const heading = current.match(/^(#{1,6})\s+(.*)$/);
if (heading) {
content.push({
type: "heading",
attrs: { level: heading[1].length },
content: [textNode(heading[2])],
});
index += 1;
continue;
}
if (/^[-*]\s+/.test(current)) {
const items: Array<Record<string, unknown>> = [];
while (index < lines.length && /^[-*]\s+/.test(lines[index] ?? "")) {
items.push({
type: "listItem",
content: [
{
type: "paragraph",
content: [textNode((lines[index] ?? "").replace(/^[-*]\s+/, ""))],
},
],
});
index += 1;
}
content.push({
type: "bulletList",
content: items,
});
continue;
}
const paragraphLines: string[] = [];
while (index < lines.length && (lines[index]?.trim().length ?? 0) > 0) {
paragraphLines.push(lines[index] ?? "");
index += 1;
}
content.push(paragraphNode(paragraphLines));
}
return {
type: "doc",
version: 1,
content,
};
}

View File

@@ -0,0 +1,332 @@
import process from "node:process";
import { pathToFileURL } from "node:url";
import { Command } from "commander";
import { createConfluenceClient } from "./confluence.js";
import { loadConfig } from "./config.js";
import { readWorkspaceFile } from "./files.js";
import { runHealthCheck } from "./health.js";
import { createJiraClient } from "./jira.js";
import { writeOutput } from "./output.js";
import { runRawCommand } from "./raw.js";
import type { FetchLike, OutputFormat, Writer } from "./types.js";
type CliContext = {
cwd?: string;
env?: NodeJS.ProcessEnv;
fetchImpl?: FetchLike;
stdout?: Writer;
stderr?: Writer;
};
function resolveFormat(format: string | undefined): OutputFormat {
return format === "text" ? "text" : "json";
}
function createRuntime(context: CliContext) {
const cwd = context.cwd ?? process.cwd();
const env = context.env ?? process.env;
const stdout = context.stdout ?? process.stdout;
const stderr = context.stderr ?? process.stderr;
let configCache: ReturnType<typeof loadConfig> | undefined;
let jiraCache: ReturnType<typeof createJiraClient> | undefined;
let confluenceCache: ReturnType<typeof createConfluenceClient> | undefined;
function getConfig() {
configCache ??= loadConfig(env, { cwd });
return configCache;
}
function getJiraClient() {
jiraCache ??= createJiraClient({
config: getConfig(),
fetchImpl: context.fetchImpl,
});
return jiraCache;
}
function getConfluenceClient() {
confluenceCache ??= createConfluenceClient({
config: getConfig(),
fetchImpl: context.fetchImpl,
});
return confluenceCache;
}
async function readBodyFile(filePath: string | undefined) {
if (!filePath) {
return undefined;
}
return readWorkspaceFile(filePath, cwd);
}
return {
cwd,
stdout,
stderr,
readBodyFile,
getConfig,
getJiraClient,
getConfluenceClient,
fetchImpl: context.fetchImpl,
};
}
export function buildProgram(context: CliContext = {}) {
const runtime = createRuntime(context);
const program = new Command()
.name("atlassian")
.description("Portable Atlassian CLI for multi-agent skills")
.version("0.1.0");
program
.command("health")
.description("Validate configuration and Atlassian connectivity")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runHealthCheck(runtime.getConfig(), runtime.fetchImpl);
writeOutput(
runtime.stdout,
payload,
resolveFormat(options.format),
);
});
program
.command("conf-search")
.requiredOption("--query <query>", "CQL search query")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Result offset", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().searchPages({
query: options.query,
maxResults: Number(options.maxResults),
startAt: Number(options.startAt),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-get")
.requiredOption("--page <page>", "Confluence page ID")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().getPage(options.page);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-create")
.requiredOption("--title <title>", "Confluence page title")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--space <space>", "Confluence space ID")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().createPage({
space: options.space,
title: options.title,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-update")
.requiredOption("--page <page>", "Confluence page ID")
.requiredOption("--title <title>", "Confluence page title")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().updatePage({
pageId: options.page,
title: options.title,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-comment")
.requiredOption("--page <page>", "Confluence page ID")
.requiredOption("--body-file <path>", "Workspace-relative storage-format body file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().commentPage({
pageId: options.page,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("conf-children")
.requiredOption("--page <page>", "Confluence page ID")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Cursor/start token", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getConfluenceClient().listChildren(
options.page,
Number(options.maxResults),
Number(options.startAt),
);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("raw")
.requiredOption("--product <product>", "jira or confluence")
.requiredOption("--method <method>", "GET, POST, or PUT")
.requiredOption("--path <path>", "Validated API path")
.option("--body-file <path>", "Workspace-relative JSON file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runRawCommand(runtime.getConfig(), runtime.fetchImpl, {
product: options.product,
method: String(options.method).toUpperCase(),
path: options.path,
bodyFile: options.bodyFile,
cwd: runtime.cwd,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-search")
.requiredOption("--jql <jql>", "JQL expression to execute")
.option("--max-results <number>", "Maximum results to return", "50")
.option("--start-at <number>", "Result offset", "0")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().searchIssues({
jql: options.jql,
maxResults: Number(options.maxResults),
startAt: Number(options.startAt),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-get")
.requiredOption("--issue <issue>", "Issue key")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().getIssue(options.issue);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-create")
.requiredOption("--type <type>", "Issue type name")
.requiredOption("--summary <summary>", "Issue summary")
.option("--project <project>", "Project key")
.option("--description-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().createIssue({
project: options.project,
type: options.type,
summary: options.summary,
description: await runtime.readBodyFile(options.descriptionFile),
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-update")
.requiredOption("--issue <issue>", "Issue key")
.option("--summary <summary>", "Updated summary")
.option("--description-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().updateIssue({
issue: options.issue,
summary: options.summary,
description: await runtime.readBodyFile(options.descriptionFile),
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-comment")
.requiredOption("--issue <issue>", "Issue key")
.requiredOption("--body-file <path>", "Workspace-relative markdown/text file")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().commentIssue({
issue: options.issue,
body: (await runtime.readBodyFile(options.bodyFile)) as string,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-transitions")
.requiredOption("--issue <issue>", "Issue key")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().getTransitions(options.issue);
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
program
.command("jira-transition")
.requiredOption("--issue <issue>", "Issue key")
.requiredOption("--transition <transition>", "Transition ID")
.option("--dry-run", "Print the request without sending it")
.option("--format <format>", "Output format", "json")
.action(async (options) => {
const payload = await runtime.getJiraClient().transitionIssue({
issue: options.issue,
transition: options.transition,
dryRun: Boolean(options.dryRun),
});
writeOutput(runtime.stdout, payload, resolveFormat(options.format));
});
return program;
}
export async function runCli(argv = process.argv, context: CliContext = {}) {
const program = buildProgram(context);
await program.parseAsync(argv);
}
const isDirectExecution =
Boolean(process.argv[1]) && import.meta.url === pathToFileURL(process.argv[1]).href;
if (isDirectExecution) {
runCli().catch((error: unknown) => {
const message = error instanceof Error ? error.message : String(error);
process.stderr.write(`${message}\n`);
process.exitCode = 1;
});
}

View File

@@ -0,0 +1,52 @@
import path from "node:path";
import { config as loadDotEnv } from "dotenv";
import type { AtlassianConfig } from "./types.js";
function normalizeBaseUrl(value: string) {
return value.replace(/\/+$/, "");
}
function readRequired(env: NodeJS.ProcessEnv, key: string) {
const value = env[key]?.trim();
if (!value) {
throw new Error(`Missing required environment variable: ${key}`);
}
return value;
}
export function loadConfig(
env: NodeJS.ProcessEnv = process.env,
options?: {
cwd?: string;
},
): AtlassianConfig {
loadDotEnv({
path: path.resolve(options?.cwd ?? process.cwd(), ".env"),
processEnv: env as Record<string, string>,
override: false,
});
const baseUrl = normalizeBaseUrl(readRequired(env, "ATLASSIAN_BASE_URL"));
return {
baseUrl,
jiraBaseUrl: normalizeBaseUrl(env.ATLASSIAN_JIRA_BASE_URL?.trim() || baseUrl),
confluenceBaseUrl: normalizeBaseUrl(env.ATLASSIAN_CONFLUENCE_BASE_URL?.trim() || baseUrl),
email: readRequired(env, "ATLASSIAN_EMAIL"),
apiToken: readRequired(env, "ATLASSIAN_API_TOKEN"),
defaultProject: env.ATLASSIAN_DEFAULT_PROJECT?.trim() || undefined,
defaultSpace: env.ATLASSIAN_DEFAULT_SPACE?.trim() || undefined,
};
}
export function createBasicAuthHeader(config: {
email: string;
apiToken: string;
[key: string]: unknown;
}) {
return `Basic ${Buffer.from(`${config.email}:${config.apiToken}`).toString("base64")}`;
}

View File

@@ -0,0 +1,292 @@
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
type ConfluenceClientOptions = {
config: AtlassianConfig;
fetchImpl?: FetchLike;
};
type SearchInput = {
query: string;
maxResults: number;
startAt: number;
};
type CreateInput = {
space?: string;
title: string;
body: string;
dryRun?: boolean;
};
type UpdateInput = {
pageId: string;
title: string;
body: string;
dryRun?: boolean;
};
type CommentInput = {
pageId: string;
body: string;
dryRun?: boolean;
};
type PageSummary = {
id: string;
title: string;
type: string;
status?: string;
spaceId?: string;
url?: string;
};
function buildUrl(baseUrl: string, path: string) {
return new URL(path, `${baseUrl}/`).toString();
}
function normalizePage(baseUrl: string, page: Record<string, unknown>, excerpt?: string) {
const links = (page._links ?? {}) as Record<string, unknown>;
return {
id: String(page.id ?? ""),
title: String(page.title ?? ""),
type: String(page.type ?? "page"),
...(page.status ? { status: String(page.status) } : {}),
...(page.spaceId ? { spaceId: String(page.spaceId) } : {}),
...(excerpt ? { excerpt } : {}),
...(links.webui ? { url: `${baseUrl}${String(links.webui)}` } : {}),
};
}
export function createConfluenceClient(options: ConfluenceClientOptions) {
const config = options.config;
async function getPageForUpdate(pageId: string) {
return (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${pageId}?body-format=storage`),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
}
return {
async searchPages(input: SearchInput): Promise<CommandOutput<unknown>> {
const url = new URL("/wiki/rest/api/search", `${config.confluenceBaseUrl}/`);
url.searchParams.set("cql", input.query);
url.searchParams.set("limit", String(input.maxResults));
url.searchParams.set("start", String(input.startAt));
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: url.toString(),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const results = Array.isArray(raw.results) ? raw.results : [];
return {
ok: true,
data: {
pages: results.map((entry) => {
const result = entry as Record<string, unknown>;
return normalizePage(
config.baseUrl,
(result.content ?? {}) as Record<string, unknown>,
result.excerpt ? String(result.excerpt) : undefined,
);
}),
startAt: Number(raw.start ?? input.startAt),
maxResults: Number(raw.limit ?? input.maxResults),
total: Number(raw.totalSize ?? raw.size ?? results.length),
},
};
},
async getPage(pageId: string): Promise<CommandOutput<unknown>> {
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${pageId}?body-format=storage`),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const body = ((raw.body ?? {}) as Record<string, unknown>).storage as Record<string, unknown> | undefined;
return {
ok: true,
data: {
page: {
...normalizePage(config.baseUrl, raw),
version: Number((((raw.version ?? {}) as Record<string, unknown>).number ?? 0)),
body: body?.value ? String(body.value) : "",
},
},
raw,
};
},
async listChildren(pageId: string, maxResults: number, startAt: number): Promise<CommandOutput<unknown>> {
const url = new URL(`/wiki/api/v2/pages/${pageId}/direct-children`, `${config.confluenceBaseUrl}/`);
url.searchParams.set("limit", String(maxResults));
url.searchParams.set("cursor", String(startAt));
const raw = (await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: url.toString(),
method: "GET",
errorPrefix: "Confluence request failed",
})) as Record<string, unknown>;
const results = Array.isArray(raw.results) ? raw.results : [];
const links = (raw._links ?? {}) as Record<string, unknown>;
return {
ok: true,
data: {
pages: results.map((page) => normalizePage(config.baseUrl, page as Record<string, unknown>)),
nextCursor: links.next ? String(links.next) : null,
},
};
},
async createPage(input: CreateInput): Promise<CommandOutput<unknown>> {
const spaceId = input.space || config.defaultSpace;
if (!spaceId) {
throw new Error("conf-create requires --space or ATLASSIAN_DEFAULT_SPACE");
}
const request = {
method: "POST" as const,
url: buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/pages"),
body: {
spaceId,
title: input.title,
status: "current",
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
});
return {
ok: true,
data: raw,
};
},
async updatePage(input: UpdateInput): Promise<CommandOutput<unknown>> {
const currentPage = await getPageForUpdate(input.pageId);
const version = (((currentPage.version ?? {}) as Record<string, unknown>).number ?? 0) as number;
const spaceId = String(currentPage.spaceId ?? "");
const request = {
method: "PUT" as const,
url: buildUrl(config.confluenceBaseUrl, `/wiki/api/v2/pages/${input.pageId}`),
body: {
id: input.pageId,
status: String(currentPage.status ?? "current"),
title: input.title,
spaceId,
version: {
number: Number(version) + 1,
},
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
handleResponseError(response) {
if (response.status === 409) {
return new Error(`Confluence update conflict: page ${input.pageId} was updated by someone else`);
}
return undefined;
},
});
return {
ok: true,
data: raw,
};
},
async commentPage(input: CommentInput): Promise<CommandOutput<unknown>> {
const request = {
method: "POST" as const,
url: buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/footer-comments"),
body: {
pageId: input.pageId,
body: {
representation: "storage",
value: input.body,
},
},
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await sendJsonRequest({
config,
fetchImpl: options.fetchImpl,
url: request.url,
method: request.method,
body: request.body,
errorPrefix: "Confluence request failed",
});
return {
ok: true,
data: raw,
};
},
};
}

View File

@@ -0,0 +1,13 @@
import { readFile } from "node:fs/promises";
import path from "node:path";
export async function readWorkspaceFile(filePath: string, cwd: string) {
const resolved = path.resolve(cwd, filePath);
const relative = path.relative(cwd, resolved);
if (relative.startsWith("..") || path.isAbsolute(relative)) {
throw new Error(`--body-file must stay within the active workspace: ${filePath}`);
}
return readFile(resolved, "utf8");
}

View File

@@ -0,0 +1,69 @@
import { createJsonHeaders, createStatusError } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
type ProductHealth = {
ok: boolean;
status?: number;
message?: string;
};
function buildUrl(baseUrl: string, path: string) {
return new URL(path, `${baseUrl}/`).toString();
}
export async function runHealthCheck(
config: AtlassianConfig,
fetchImpl: FetchLike | undefined,
): Promise<CommandOutput<unknown>> {
const client = fetchImpl ?? globalThis.fetch;
if (!client) {
throw new Error("Fetch API is not available in this runtime");
}
async function probe(product: "Jira" | "Confluence", url: string): Promise<ProductHealth> {
try {
const response = await client(url, {
method: "GET",
headers: createJsonHeaders(config, false),
});
if (!response.ok) {
const error = createStatusError(`${product} health check failed`, response);
return {
ok: false,
status: response.status,
message: error.message,
};
}
return {
ok: true,
status: response.status,
};
} catch (error: unknown) {
return {
ok: false,
message: error instanceof Error ? error.message : String(error),
};
}
}
const jira = await probe("Jira", buildUrl(config.jiraBaseUrl, "/rest/api/3/myself"));
const confluence = await probe("Confluence", buildUrl(config.confluenceBaseUrl, "/wiki/api/v2/spaces?limit=1"));
return {
ok: jira.ok && confluence.ok,
data: {
baseUrl: config.baseUrl,
jiraBaseUrl: config.jiraBaseUrl,
confluenceBaseUrl: config.confluenceBaseUrl,
defaultProject: config.defaultProject,
defaultSpace: config.defaultSpace,
products: {
jira,
confluence,
},
},
};
}

View File

@@ -0,0 +1,86 @@
import { createBasicAuthHeader } from "./config.js";
import type { AtlassianConfig, FetchLike } from "./types.js";
export type HttpMethod = "GET" | "POST" | "PUT";
export function createJsonHeaders(config: AtlassianConfig, includeJsonBody: boolean) {
const headers: Array<[string, string]> = [
["Accept", "application/json"],
["Authorization", createBasicAuthHeader(config)],
];
if (includeJsonBody) {
headers.push(["Content-Type", "application/json"]);
}
return headers;
}
export async function parseResponse(response: Response) {
if (response.status === 204) {
return null;
}
const contentType = response.headers.get("content-type") ?? "";
if (contentType.includes("application/json")) {
try {
return await response.json();
} catch {
throw new Error("Malformed JSON response from Atlassian API");
}
}
return response.text();
}
export function createStatusError(errorPrefix: string, response: Response) {
const base = `${errorPrefix}: ${response.status} ${response.statusText}`;
switch (response.status) {
case 401:
return new Error(`${base} - check ATLASSIAN_EMAIL and ATLASSIAN_API_TOKEN`);
case 403:
return new Error(`${base} - verify product permissions for this account`);
case 404:
return new Error(`${base} - verify the resource identifier or API path`);
case 429:
return new Error(`${base} - retry later or reduce request rate`);
default:
return new Error(base);
}
}
export async function sendJsonRequest(options: {
config: AtlassianConfig;
fetchImpl?: FetchLike;
url: string;
method: HttpMethod;
body?: unknown;
errorPrefix: string;
handleResponseError?: (response: Response) => Error | undefined;
}) {
const fetchImpl = options.fetchImpl ?? globalThis.fetch;
if (!fetchImpl) {
throw new Error("Fetch API is not available in this runtime");
}
const response = await fetchImpl(options.url, {
method: options.method,
headers: createJsonHeaders(options.config, options.body !== undefined),
...(options.body === undefined ? {} : { body: JSON.stringify(options.body) }),
});
if (!response.ok) {
const customError = options.handleResponseError?.(response);
if (customError) {
throw customError;
}
throw createStatusError(options.errorPrefix, response);
}
return parseResponse(response);
}

View File

@@ -0,0 +1,264 @@
import { markdownToAdf } from "./adf.js";
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike, JiraIssueSummary } from "./types.js";
const ISSUE_FIELDS = ["summary", "issuetype", "status", "assignee", "created", "updated"] as const;
type JiraClientOptions = {
config: AtlassianConfig;
fetchImpl?: FetchLike;
};
type SearchInput = {
jql: string;
maxResults: number;
startAt: number;
};
type CreateInput = {
project?: string;
type: string;
summary: string;
description?: string;
dryRun?: boolean;
};
type UpdateInput = {
issue: string;
summary?: string;
description?: string;
dryRun?: boolean;
};
type CommentInput = {
issue: string;
body: string;
dryRun?: boolean;
};
type TransitionInput = {
issue: string;
transition: string;
dryRun?: boolean;
};
function normalizeIssue(config: AtlassianConfig, issue: Record<string, unknown>): JiraIssueSummary {
const fields = (issue.fields ?? {}) as Record<string, unknown>;
const issueType = (fields.issuetype ?? {}) as Record<string, unknown>;
const status = (fields.status ?? {}) as Record<string, unknown>;
const assignee = (fields.assignee ?? {}) as Record<string, unknown>;
return {
key: String(issue.key ?? ""),
summary: String(fields.summary ?? ""),
issueType: String(issueType.name ?? ""),
status: String(status.name ?? ""),
assignee: assignee.displayName ? String(assignee.displayName) : undefined,
created: String(fields.created ?? ""),
updated: String(fields.updated ?? ""),
url: `${config.baseUrl}/browse/${issue.key ?? ""}`,
};
}
function createRequest(config: AtlassianConfig, method: "GET" | "POST" | "PUT", path: string, body?: unknown) {
const url = new URL(path, `${config.jiraBaseUrl}/`);
return {
method,
url: url.toString(),
...(body === undefined ? {} : { body }),
};
}
export function createJiraClient(options: JiraClientOptions) {
const fetchImpl = options.fetchImpl ?? globalThis.fetch;
if (!fetchImpl) {
throw new Error("Fetch API is not available in this runtime");
}
async function send(method: "GET" | "POST" | "PUT", path: string, body?: unknown) {
const request = createRequest(options.config, method, path, body);
return sendJsonRequest({
config: options.config,
fetchImpl,
url: request.url,
method,
body,
errorPrefix: "Jira request failed",
});
}
return {
async searchIssues(input: SearchInput): Promise<CommandOutput<unknown>> {
const raw = (await send("POST", "/rest/api/3/search", {
jql: input.jql,
maxResults: input.maxResults,
startAt: input.startAt,
fields: [...ISSUE_FIELDS],
})) as Record<string, unknown>;
const issues = Array.isArray(raw.issues) ? raw.issues : [];
return {
ok: true,
data: {
issues: issues.map((issue) => normalizeIssue(options.config, issue as Record<string, unknown>)),
startAt: Number(raw.startAt ?? input.startAt),
maxResults: Number(raw.maxResults ?? input.maxResults),
total: Number(raw.total ?? issues.length),
},
};
},
async getIssue(issue: string): Promise<CommandOutput<unknown>> {
const url = new URL(`/rest/api/3/issue/${issue}`, `${options.config.jiraBaseUrl}/`);
url.searchParams.set("fields", ISSUE_FIELDS.join(","));
const raw = (await send("GET", `${url.pathname}${url.search}`)) as Record<string, unknown>;
return {
ok: true,
data: {
issue: normalizeIssue(options.config, raw),
},
raw,
};
},
async getTransitions(issue: string): Promise<CommandOutput<unknown>> {
const raw = (await send(
"GET",
`/rest/api/3/issue/${issue}/transitions`,
)) as { transitions?: Array<Record<string, unknown>> };
return {
ok: true,
data: {
transitions: (raw.transitions ?? []).map((transition) => ({
id: String(transition.id ?? ""),
name: String(transition.name ?? ""),
toStatus: String(((transition.to ?? {}) as Record<string, unknown>).name ?? ""),
hasScreen: Boolean(transition.hasScreen),
})),
},
};
},
async createIssue(input: CreateInput): Promise<CommandOutput<unknown>> {
const project = input.project || options.config.defaultProject;
if (!project) {
throw new Error("jira-create requires --project or ATLASSIAN_DEFAULT_PROJECT");
}
const request = createRequest(options.config, "POST", "/rest/api/3/issue", {
fields: {
project: { key: project },
issuetype: { name: input.type },
summary: input.summary,
...(input.description ? { description: markdownToAdf(input.description) } : {}),
},
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await send("POST", "/rest/api/3/issue", request.body);
return { ok: true, data: raw };
},
async updateIssue(input: UpdateInput): Promise<CommandOutput<unknown>> {
const fields: Record<string, unknown> = {};
if (input.summary) {
fields.summary = input.summary;
}
if (input.description) {
fields.description = markdownToAdf(input.description);
}
if (Object.keys(fields).length === 0) {
throw new Error("jira-update requires --summary and/or --description-file");
}
const request = createRequest(options.config, "PUT", `/rest/api/3/issue/${input.issue}`, {
fields,
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
await send("PUT", `/rest/api/3/issue/${input.issue}`, request.body);
return {
ok: true,
data: {
issue: input.issue,
updated: true,
},
};
},
async commentIssue(input: CommentInput): Promise<CommandOutput<unknown>> {
const request = createRequest(options.config, "POST", `/rest/api/3/issue/${input.issue}/comment`, {
body: markdownToAdf(input.body),
});
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const raw = await send("POST", `/rest/api/3/issue/${input.issue}/comment`, request.body);
return {
ok: true,
data: raw,
};
},
async transitionIssue(input: TransitionInput): Promise<CommandOutput<unknown>> {
const request = createRequest(
options.config,
"POST",
`/rest/api/3/issue/${input.issue}/transitions`,
{
transition: {
id: input.transition,
},
},
);
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
await send("POST", `/rest/api/3/issue/${input.issue}/transitions`, request.body);
return {
ok: true,
data: {
issue: input.issue,
transitioned: true,
transition: input.transition,
},
};
},
};
}

View File

@@ -0,0 +1,44 @@
import type { CommandOutput, OutputFormat, Writer } from "./types.js";
function renderText(payload: CommandOutput<unknown>) {
const data = payload.data as Record<string, unknown>;
if (Array.isArray(data?.issues)) {
return data.issues
.map((issue) => {
const item = issue as Record<string, string>;
return `${item.key} [${item.status}] ${item.issueType} - ${item.summary}`;
})
.join("\n");
}
if (data?.issue && typeof data.issue === "object") {
const issue = data.issue as Record<string, string>;
return [
issue.key,
`${issue.issueType} | ${issue.status}`,
issue.summary,
issue.url,
].join("\n");
}
if (Array.isArray(data?.transitions)) {
return data.transitions
.map((transition) => {
const item = transition as Record<string, string>;
return `${item.id} ${item.name} -> ${item.toStatus}`;
})
.join("\n");
}
return JSON.stringify(payload, null, 2);
}
export function writeOutput(
writer: Writer,
payload: CommandOutput<unknown>,
format: OutputFormat = "json",
) {
const body = format === "text" ? renderText(payload) : JSON.stringify(payload, null, 2);
writer.write(`${body}\n`);
}

View File

@@ -0,0 +1,85 @@
import { readWorkspaceFile } from "./files.js";
import { sendJsonRequest } from "./http.js";
import type { AtlassianConfig, CommandOutput, FetchLike } from "./types.js";
const JIRA_ALLOWED_PREFIXES = ["/rest/api/3/"] as const;
const CONFLUENCE_ALLOWED_PREFIXES = ["/wiki/api/v2/", "/wiki/rest/api/"] as const;
type RawInput = {
product: "jira" | "confluence";
method: string;
path: string;
bodyFile?: string;
cwd: string;
dryRun?: boolean;
};
function getAllowedPrefixes(product: RawInput["product"]) {
return product === "jira" ? JIRA_ALLOWED_PREFIXES : CONFLUENCE_ALLOWED_PREFIXES;
}
function buildUrl(config: AtlassianConfig, product: RawInput["product"], path: string) {
const baseUrl = product === "jira" ? config.jiraBaseUrl : config.confluenceBaseUrl;
return new URL(path, `${baseUrl}/`).toString();
}
function validateMethod(method: string): asserts method is "GET" | "POST" | "PUT" {
if (!["GET", "POST", "PUT"].includes(method)) {
throw new Error("raw only allows GET, POST, and PUT");
}
}
function validatePath(product: RawInput["product"], path: string) {
const allowedPrefixes = getAllowedPrefixes(product);
if (!allowedPrefixes.some((prefix) => path.startsWith(prefix))) {
throw new Error(`raw path is not allowed for ${product}: ${path}`);
}
}
async function readRawBody(bodyFile: string | undefined, cwd: string) {
if (!bodyFile) {
return undefined;
}
const contents = await readWorkspaceFile(bodyFile, cwd);
return JSON.parse(contents) as unknown;
}
export async function runRawCommand(
config: AtlassianConfig,
fetchImpl: FetchLike | undefined,
input: RawInput,
): Promise<CommandOutput<unknown>> {
validateMethod(input.method);
validatePath(input.product, input.path);
const body = await readRawBody(input.bodyFile, input.cwd);
const request = {
method: input.method,
url: buildUrl(config, input.product, input.path),
...(body === undefined ? {} : { body }),
};
if (input.dryRun) {
return {
ok: true,
dryRun: true,
data: request,
};
}
const data = await sendJsonRequest({
config,
fetchImpl,
url: request.url,
method: input.method,
body,
errorPrefix: "Raw request failed",
});
return {
ok: true,
data,
};
}

View File

@@ -0,0 +1,35 @@
export type AtlassianConfig = {
baseUrl: string;
jiraBaseUrl: string;
confluenceBaseUrl: string;
email: string;
apiToken: string;
defaultProject?: string;
defaultSpace?: string;
};
export type CommandOutput<T> = {
ok: boolean;
data: T;
dryRun?: boolean;
raw?: unknown;
};
export type JiraIssueSummary = {
key: string;
summary: string;
issueType: string;
status: string;
assignee?: string;
created: string;
updated: string;
url: string;
};
export type Writer = {
write(chunk: string | Uint8Array): unknown;
};
export type FetchLike = typeof fetch;
export type OutputFormat = "json" | "text";

View File

@@ -0,0 +1,14 @@
import test from "node:test";
import assert from "node:assert/strict";
import { spawnSync } from "node:child_process";
test("CLI help prints the Atlassian skill banner", () => {
const result = spawnSync("pnpm", ["exec", "tsx", "src/cli.ts", "--help"], {
cwd: new URL("../", import.meta.url),
encoding: "utf8",
});
assert.equal(result.status, 0, result.stderr);
assert.match(result.stdout, /Portable Atlassian CLI for multi-agent skills/);
assert.match(result.stdout, /Usage:/);
});

View File

@@ -0,0 +1,38 @@
import test from "node:test";
import assert from "node:assert/strict";
import { createBasicAuthHeader, loadConfig } from "../src/config.js";
test("loadConfig derives Jira and Confluence base URLs from ATLASSIAN_BASE_URL", () => {
const config = loadConfig({
ATLASSIAN_BASE_URL: "https://example.atlassian.net/",
ATLASSIAN_EMAIL: "dev@example.com",
ATLASSIAN_API_TOKEN: "secret-token",
ATLASSIAN_DEFAULT_PROJECT: "ENG",
});
assert.deepEqual(config, {
baseUrl: "https://example.atlassian.net",
jiraBaseUrl: "https://example.atlassian.net",
confluenceBaseUrl: "https://example.atlassian.net",
email: "dev@example.com",
apiToken: "secret-token",
defaultProject: "ENG",
defaultSpace: undefined,
});
});
test("createBasicAuthHeader encodes email and API token for Atlassian Cloud", () => {
const header = createBasicAuthHeader({
baseUrl: "https://example.atlassian.net",
jiraBaseUrl: "https://example.atlassian.net",
confluenceBaseUrl: "https://example.atlassian.net",
email: "dev@example.com",
apiToken: "secret-token",
});
assert.equal(
header,
`Basic ${Buffer.from("dev@example.com:secret-token").toString("base64")}`,
);
});

View File

@@ -0,0 +1,351 @@
import test from "node:test";
import assert from "node:assert/strict";
import { createTempWorkspace, jsonResponse, runCli } from "./helpers.js";
const baseEnv = {
ATLASSIAN_BASE_URL: "https://example.atlassian.net",
ATLASSIAN_EMAIL: "dev@example.com",
ATLASSIAN_API_TOKEN: "secret-token",
};
test("conf-search uses CQL search and normalizes page results", async () => {
const calls: Array<{ url: string; init: RequestInit | undefined }> = [];
const fetchImpl: typeof fetch = async (input, init) => {
calls.push({ url: typeof input === "string" ? input : input.toString(), init });
return jsonResponse({
results: [
{
content: {
id: "123",
type: "page",
title: "Runbook",
_links: { webui: "/spaces/OPS/pages/123/Runbook" },
},
excerpt: "Operational runbook",
},
],
start: 5,
limit: 1,
size: 1,
totalSize: 7,
});
};
const result = await runCli({
args: ["conf-search", "--query", "title ~ \"Runbook\"", "--max-results", "1", "--start-at", "5"],
env: baseEnv,
fetchImpl,
});
assert.equal(calls.length, 1);
assert.equal(
calls[0]?.url,
"https://example.atlassian.net/wiki/rest/api/search?cql=title+%7E+%22Runbook%22&limit=1&start=5",
);
assert.equal(calls[0]?.init?.method, "GET");
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
data: {
pages: [
{
id: "123",
title: "Runbook",
type: "page",
excerpt: "Operational runbook",
url: "https://example.atlassian.net/spaces/OPS/pages/123/Runbook",
},
],
startAt: 5,
maxResults: 1,
total: 7,
},
});
});
test("conf-get returns normalized page details plus raw payload", async () => {
const rawPage = {
id: "123",
status: "current",
title: "Runbook",
spaceId: "OPS",
version: { number: 4 },
body: {
storage: {
value: "<p>Runbook</p>",
representation: "storage",
},
},
_links: {
webui: "/spaces/OPS/pages/123/Runbook",
},
};
const result = await runCli({
args: ["conf-get", "--page", "123"],
env: baseEnv,
fetchImpl: async () => jsonResponse(rawPage),
});
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
data: {
page: {
id: "123",
title: "Runbook",
type: "page",
status: "current",
spaceId: "OPS",
version: 4,
body: "<p>Runbook</p>",
url: "https://example.atlassian.net/spaces/OPS/pages/123/Runbook",
},
},
raw: rawPage,
});
});
test("conf-children returns normalized direct children with pagination", async () => {
const result = await runCli({
args: ["conf-children", "--page", "123", "--max-results", "2", "--start-at", "1"],
env: baseEnv,
fetchImpl: async (input) => {
const url = typeof input === "string" ? input : input.toString();
assert.equal(
url,
"https://example.atlassian.net/wiki/api/v2/pages/123/direct-children?limit=2&cursor=1",
);
return jsonResponse({
results: [
{
id: "124",
title: "Child page",
type: "page",
status: "current",
spaceId: "OPS",
_links: { webui: "/spaces/OPS/pages/124/Child+page" },
},
],
});
},
});
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
data: {
pages: [
{
id: "124",
title: "Child page",
type: "page",
status: "current",
spaceId: "OPS",
url: "https://example.atlassian.net/spaces/OPS/pages/124/Child+page",
},
],
nextCursor: null,
},
});
});
test("conf-create dry-run emits a storage-format request body", async () => {
const workspace = createTempWorkspace();
try {
workspace.write("page.storage.html", "<p>Runbook</p>");
const result = await runCli({
args: [
"conf-create",
"--title",
"Runbook",
"--body-file",
"page.storage.html",
"--dry-run",
],
cwd: workspace.cwd,
env: {
...baseEnv,
ATLASSIAN_DEFAULT_SPACE: "OPS",
},
});
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
dryRun: true,
data: {
method: "POST",
url: "https://example.atlassian.net/wiki/api/v2/pages",
body: {
spaceId: "OPS",
title: "Runbook",
status: "current",
body: {
representation: "storage",
value: "<p>Runbook</p>",
},
},
},
});
} finally {
workspace.cleanup();
}
});
test("conf-update fetches the current page version and increments it for dry-run", async () => {
const workspace = createTempWorkspace();
const calls: Array<{ url: string; init: RequestInit | undefined }> = [];
try {
workspace.write("page.storage.html", "<p>Updated</p>");
const result = await runCli({
args: [
"conf-update",
"--page",
"123",
"--title",
"Runbook",
"--body-file",
"page.storage.html",
"--dry-run",
],
cwd: workspace.cwd,
env: baseEnv,
fetchImpl: async (input, init) => {
calls.push({ url: typeof input === "string" ? input : input.toString(), init });
return jsonResponse({
id: "123",
spaceId: "OPS",
status: "current",
title: "Runbook",
version: { number: 4 },
body: {
storage: {
value: "<p>Old</p>",
representation: "storage",
},
},
});
},
});
assert.equal(calls.length, 1);
assert.equal(
calls[0]?.url,
"https://example.atlassian.net/wiki/api/v2/pages/123?body-format=storage",
);
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
dryRun: true,
data: {
method: "PUT",
url: "https://example.atlassian.net/wiki/api/v2/pages/123",
body: {
id: "123",
status: "current",
title: "Runbook",
spaceId: "OPS",
version: {
number: 5,
},
body: {
representation: "storage",
value: "<p>Updated</p>",
},
},
},
});
} finally {
workspace.cleanup();
}
});
test("conf-comment dry-run targets footer comments", async () => {
const workspace = createTempWorkspace();
try {
workspace.write("comment.storage.html", "<p>Looks good</p>");
const result = await runCli({
args: ["conf-comment", "--page", "123", "--body-file", "comment.storage.html", "--dry-run"],
cwd: workspace.cwd,
env: baseEnv,
});
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
dryRun: true,
data: {
method: "POST",
url: "https://example.atlassian.net/wiki/api/v2/footer-comments",
body: {
pageId: "123",
body: {
representation: "storage",
value: "<p>Looks good</p>",
},
},
},
});
} finally {
workspace.cleanup();
}
});
test("conf-update surfaces version conflicts clearly", async () => {
const workspace = createTempWorkspace();
try {
workspace.write("page.storage.html", "<p>Updated</p>");
await assert.rejects(
runCli({
args: [
"conf-update",
"--page",
"123",
"--title",
"Runbook",
"--body-file",
"page.storage.html",
],
cwd: workspace.cwd,
env: baseEnv,
fetchImpl: async (input, init) => {
const url = typeof input === "string" ? input : input.toString();
if (url.endsWith("?body-format=storage")) {
return jsonResponse({
id: "123",
spaceId: "OPS",
status: "current",
title: "Runbook",
version: { number: 4 },
body: {
storage: {
value: "<p>Old</p>",
representation: "storage",
},
},
});
}
assert.equal(init?.method, "PUT");
return new Response(JSON.stringify({ message: "Conflict" }), {
status: 409,
statusText: "Conflict",
headers: { "content-type": "application/json" },
});
},
}),
/Confluence update conflict: page 123 was updated by someone else/,
);
} finally {
workspace.cleanup();
}
});

View File

@@ -0,0 +1,72 @@
import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from "node:fs";
import { tmpdir } from "node:os";
import path from "node:path";
import { buildProgram } from "../src/cli.js";
type RunCliOptions = {
args: string[];
cwd?: string;
env?: NodeJS.ProcessEnv;
fetchImpl?: typeof fetch;
};
class MemoryWriter {
private readonly chunks: string[] = [];
write(chunk: string | Uint8Array) {
this.chunks.push(typeof chunk === "string" ? chunk : Buffer.from(chunk).toString("utf8"));
return true;
}
toString() {
return this.chunks.join("");
}
}
export async function runCli(options: RunCliOptions) {
const stdout = new MemoryWriter();
const stderr = new MemoryWriter();
const program = buildProgram({
cwd: options.cwd,
env: options.env,
fetchImpl: options.fetchImpl,
stdout,
stderr,
});
await program.parseAsync(options.args, { from: "user" });
return {
stdout: stdout.toString(),
stderr: stderr.toString(),
};
}
export function createTempWorkspace() {
const cwd = mkdtempSync(path.join(tmpdir(), "atlassian-skill-"));
return {
cwd,
cleanup() {
rmSync(cwd, { recursive: true, force: true });
},
write(relativePath: string, contents: string) {
const target = path.join(cwd, relativePath);
mkdirSync(path.dirname(target), { recursive: true });
writeFileSync(target, contents, "utf8");
return target;
},
};
}
export function jsonResponse(payload: unknown, init?: ResponseInit) {
return new Response(JSON.stringify(payload), {
status: init?.status ?? 200,
statusText: init?.statusText,
headers: {
"content-type": "application/json",
...(init?.headers ?? {}),
},
});
}

View File

@@ -0,0 +1,131 @@
import test from "node:test";
import assert from "node:assert/strict";
import { sendJsonRequest } from "../src/http.js";
import { runCli } from "./helpers.js";
const baseConfig = {
baseUrl: "https://example.atlassian.net",
jiraBaseUrl: "https://example.atlassian.net",
confluenceBaseUrl: "https://example.atlassian.net",
email: "dev@example.com",
apiToken: "secret-token",
};
const baseEnv = {
ATLASSIAN_BASE_URL: "https://example.atlassian.net",
ATLASSIAN_EMAIL: "dev@example.com",
ATLASSIAN_API_TOKEN: "secret-token",
};
test("health probes Jira and Confluence independently", async () => {
const calls: string[] = [];
const result = await runCli({
args: ["health"],
env: baseEnv,
fetchImpl: async (input) => {
const url = typeof input === "string" ? input : input.toString();
calls.push(url);
if (url.endsWith("/rest/api/3/myself")) {
return new Response(JSON.stringify({ accountId: "1" }), {
status: 200,
headers: { "content-type": "application/json" },
});
}
return new Response(JSON.stringify({ message: "Forbidden" }), {
status: 403,
statusText: "Forbidden",
headers: { "content-type": "application/json" },
});
},
});
assert.deepEqual(calls, [
"https://example.atlassian.net/rest/api/3/myself",
"https://example.atlassian.net/wiki/api/v2/spaces?limit=1",
]);
assert.deepEqual(JSON.parse(result.stdout), {
ok: false,
data: {
baseUrl: "https://example.atlassian.net",
jiraBaseUrl: "https://example.atlassian.net",
confluenceBaseUrl: "https://example.atlassian.net",
products: {
jira: {
ok: true,
status: 200,
},
confluence: {
ok: false,
status: 403,
message:
"Confluence health check failed: 403 Forbidden - verify product permissions for this account",
},
},
},
});
});
test("sendJsonRequest maps 401, 403, 404, and 429 to actionable messages", async () => {
const cases = [
{
status: 401,
statusText: "Unauthorized",
expected: /401 Unauthorized - check ATLASSIAN_EMAIL and ATLASSIAN_API_TOKEN/,
},
{
status: 403,
statusText: "Forbidden",
expected: /403 Forbidden - verify product permissions for this account/,
},
{
status: 404,
statusText: "Not Found",
expected: /404 Not Found - verify the resource identifier or API path/,
},
{
status: 429,
statusText: "Too Many Requests",
expected: /429 Too Many Requests - retry later or reduce request rate/,
},
] as const;
for (const entry of cases) {
await assert.rejects(
sendJsonRequest({
config: baseConfig,
url: "https://example.atlassian.net/rest/api/3/issue/ENG-1",
method: "GET",
errorPrefix: "Jira request failed",
fetchImpl: async () =>
new Response(JSON.stringify({ message: entry.statusText }), {
status: entry.status,
statusText: entry.statusText,
headers: { "content-type": "application/json" },
}),
}),
entry.expected,
);
}
});
test("sendJsonRequest reports malformed JSON responses clearly", async () => {
await assert.rejects(
sendJsonRequest({
config: baseConfig,
url: "https://example.atlassian.net/rest/api/3/issue/ENG-1",
method: "GET",
errorPrefix: "Jira request failed",
fetchImpl: async () =>
new Response("{", {
status: 200,
headers: { "content-type": "application/json" },
}),
}),
/Malformed JSON response from Atlassian API/,
);
});

View File

@@ -0,0 +1,321 @@
import test from "node:test";
import assert from "node:assert/strict";
import { markdownToAdf } from "../src/adf.js";
import { createTempWorkspace, jsonResponse, runCli } from "./helpers.js";
const baseEnv = {
ATLASSIAN_BASE_URL: "https://example.atlassian.net",
ATLASSIAN_EMAIL: "dev@example.com",
ATLASSIAN_API_TOKEN: "secret-token",
};
test("jira-search emits normalized results and uses pagination inputs", async () => {
const calls: Array<{ url: string; init: RequestInit | undefined }> = [];
const fetchImpl: typeof fetch = async (input, init) => {
const url = typeof input === "string" ? input : input.toString();
calls.push({ url, init });
return jsonResponse({
startAt: 10,
maxResults: 2,
total: 25,
issues: [
{
key: "ENG-1",
fields: {
summary: "Add Jira search command",
issuetype: { name: "Story" },
status: { name: "In Progress" },
assignee: { displayName: "Ada Lovelace" },
created: "2026-03-01T00:00:00.000Z",
updated: "2026-03-02T00:00:00.000Z",
},
},
],
});
};
const result = await runCli({
args: ["jira-search", "--jql", "project = ENG", "--max-results", "2", "--start-at", "10"],
env: baseEnv,
fetchImpl,
});
assert.equal(calls.length, 1);
assert.equal(calls[0]?.url, "https://example.atlassian.net/rest/api/3/search");
assert.equal(calls[0]?.init?.method, "POST");
assert.match(String(calls[0]?.init?.headers), /Authorization/);
assert.deepEqual(JSON.parse(String(calls[0]?.init?.body)), {
jql: "project = ENG",
maxResults: 2,
startAt: 10,
fields: ["summary", "issuetype", "status", "assignee", "created", "updated"],
});
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
data: {
issues: [
{
key: "ENG-1",
summary: "Add Jira search command",
issueType: "Story",
status: "In Progress",
assignee: "Ada Lovelace",
created: "2026-03-01T00:00:00.000Z",
updated: "2026-03-02T00:00:00.000Z",
url: "https://example.atlassian.net/browse/ENG-1",
},
],
startAt: 10,
maxResults: 2,
total: 25,
},
});
});
test("jira-get returns normalized fields plus the raw Jira payload", async () => {
const rawIssue = {
key: "ENG-42",
fields: {
summary: "Ship v1",
issuetype: { name: "Task" },
status: { name: "Done" },
assignee: { displayName: "Grace Hopper" },
created: "2026-03-03T00:00:00.000Z",
updated: "2026-03-04T00:00:00.000Z",
},
};
const fetchImpl: typeof fetch = async () => jsonResponse(rawIssue);
const result = await runCli({
args: ["jira-get", "--issue", "ENG-42"],
env: baseEnv,
fetchImpl,
});
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
data: {
issue: {
key: "ENG-42",
summary: "Ship v1",
issueType: "Task",
status: "Done",
assignee: "Grace Hopper",
created: "2026-03-03T00:00:00.000Z",
updated: "2026-03-04T00:00:00.000Z",
url: "https://example.atlassian.net/browse/ENG-42",
},
},
raw: rawIssue,
});
});
test("markdownToAdf converts headings, paragraphs, and bullet lists", () => {
assert.deepEqual(markdownToAdf("# Summary\n\nBuild the Jira skill.\n\n- Search\n- Comment"), {
type: "doc",
version: 1,
content: [
{
type: "heading",
attrs: { level: 1 },
content: [{ type: "text", text: "Summary" }],
},
{
type: "paragraph",
content: [{ type: "text", text: "Build the Jira skill." }],
},
{
type: "bulletList",
content: [
{
type: "listItem",
content: [
{
type: "paragraph",
content: [{ type: "text", text: "Search" }],
},
],
},
{
type: "listItem",
content: [
{
type: "paragraph",
content: [{ type: "text", text: "Comment" }],
},
],
},
],
},
],
});
});
test("jira-create dry-run emits an ADF request body without calling Jira", async () => {
const workspace = createTempWorkspace();
try {
workspace.write("description.md", "# New story\n\n- one\n- two");
let called = false;
const result = await runCli({
args: [
"jira-create",
"--type",
"Story",
"--summary",
"Create the Atlassian skill",
"--description-file",
"description.md",
"--dry-run",
],
cwd: workspace.cwd,
env: {
...baseEnv,
ATLASSIAN_DEFAULT_PROJECT: "ENG",
},
fetchImpl: async () => {
called = true;
return jsonResponse({});
},
});
assert.equal(called, false);
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
dryRun: true,
data: {
method: "POST",
url: "https://example.atlassian.net/rest/api/3/issue",
body: {
fields: {
project: { key: "ENG" },
issuetype: { name: "Story" },
summary: "Create the Atlassian skill",
description: markdownToAdf("# New story\n\n- one\n- two"),
},
},
},
});
} finally {
workspace.cleanup();
}
});
test("jira-update, jira-comment, and jira-transition dry-runs build the expected Jira requests", async () => {
const workspace = createTempWorkspace();
try {
workspace.write("issue.md", "Updated description");
workspace.write("comment.md", "Comment body");
const update = await runCli({
args: [
"jira-update",
"--issue",
"ENG-9",
"--summary",
"Updated summary",
"--description-file",
"issue.md",
"--dry-run",
],
cwd: workspace.cwd,
env: baseEnv,
});
const comment = await runCli({
args: ["jira-comment", "--issue", "ENG-9", "--body-file", "comment.md", "--dry-run"],
cwd: workspace.cwd,
env: baseEnv,
});
const transition = await runCli({
args: ["jira-transition", "--issue", "ENG-9", "--transition", "31", "--dry-run"],
cwd: workspace.cwd,
env: baseEnv,
});
assert.deepEqual(JSON.parse(update.stdout), {
ok: true,
dryRun: true,
data: {
method: "PUT",
url: "https://example.atlassian.net/rest/api/3/issue/ENG-9",
body: {
fields: {
summary: "Updated summary",
description: markdownToAdf("Updated description"),
},
},
},
});
assert.deepEqual(JSON.parse(comment.stdout), {
ok: true,
dryRun: true,
data: {
method: "POST",
url: "https://example.atlassian.net/rest/api/3/issue/ENG-9/comment",
body: {
body: markdownToAdf("Comment body"),
},
},
});
assert.deepEqual(JSON.parse(transition.stdout), {
ok: true,
dryRun: true,
data: {
method: "POST",
url: "https://example.atlassian.net/rest/api/3/issue/ENG-9/transitions",
body: {
transition: {
id: "31",
},
},
},
});
} finally {
workspace.cleanup();
}
});
test("jira-transitions returns normalized transition options", async () => {
const fetchImpl: typeof fetch = async () =>
jsonResponse({
transitions: [
{
id: "21",
name: "Start Progress",
to: { name: "In Progress" },
hasScreen: false,
},
],
});
const result = await runCli({
args: ["jira-transitions", "--issue", "ENG-9"],
env: baseEnv,
fetchImpl,
});
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
data: {
transitions: [
{
id: "21",
name: "Start Progress",
toStatus: "In Progress",
hasScreen: false,
},
],
},
});
});

View File

@@ -0,0 +1,129 @@
import test from "node:test";
import assert from "node:assert/strict";
import { createTempWorkspace, jsonResponse, runCli } from "./helpers.js";
const baseEnv = {
ATLASSIAN_BASE_URL: "https://example.atlassian.net",
ATLASSIAN_EMAIL: "dev@example.com",
ATLASSIAN_API_TOKEN: "secret-token",
};
test("raw rejects DELETE requests", async () => {
await assert.rejects(
runCli({
args: ["raw", "--product", "jira", "--method", "DELETE", "--path", "/rest/api/3/issue/ENG-1"],
env: baseEnv,
}),
/raw only allows GET, POST, and PUT/,
);
});
test("raw rejects unsupported Jira and Confluence prefixes", async () => {
await assert.rejects(
runCli({
args: ["raw", "--product", "jira", "--method", "GET", "--path", "/wiki/api/v2/pages"],
env: baseEnv,
}),
/raw path is not allowed for jira/,
);
await assert.rejects(
runCli({
args: ["raw", "--product", "confluence", "--method", "GET", "--path", "/rest/api/3/issue/ENG-1"],
env: baseEnv,
}),
/raw path is not allowed for confluence/,
);
});
test("raw GET executes against the validated Jira endpoint", async () => {
let call: { url: string; init: RequestInit | undefined } | undefined;
const result = await runCli({
args: ["raw", "--product", "jira", "--method", "GET", "--path", "/rest/api/3/issue/ENG-1"],
env: baseEnv,
fetchImpl: async (input, init) => {
call = { url: typeof input === "string" ? input : input.toString(), init };
return jsonResponse({ id: "10001", key: "ENG-1" });
},
});
assert.deepEqual(call, {
url: "https://example.atlassian.net/rest/api/3/issue/ENG-1",
init: {
method: "GET",
headers: [
["Accept", "application/json"],
["Authorization", `Basic ${Buffer.from("dev@example.com:secret-token").toString("base64")}`],
],
},
});
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
data: {
id: "10001",
key: "ENG-1",
},
});
});
test("raw POST dry-run reads only workspace-scoped body files", async () => {
const workspace = createTempWorkspace();
try {
workspace.write("payload.json", "{\"title\":\"Runbook\"}");
const result = await runCli({
args: [
"raw",
"--product",
"confluence",
"--method",
"POST",
"--path",
"/wiki/api/v2/pages",
"--body-file",
"payload.json",
"--dry-run",
],
cwd: workspace.cwd,
env: baseEnv,
});
assert.deepEqual(JSON.parse(result.stdout), {
ok: true,
dryRun: true,
data: {
method: "POST",
url: "https://example.atlassian.net/wiki/api/v2/pages",
body: {
title: "Runbook",
},
},
});
await assert.rejects(
runCli({
args: [
"raw",
"--product",
"confluence",
"--method",
"POST",
"--path",
"/wiki/api/v2/pages",
"--body-file",
"../outside.json",
"--dry-run",
],
cwd: workspace.cwd,
env: baseEnv,
}),
/--body-file must stay within the active workspace/,
);
} finally {
workspace.cleanup();
}
});

View File

@@ -0,0 +1,15 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": true,
"skipLibCheck": true,
"resolveJsonModule": true,
"types": ["node"],
"outDir": "dist"
},
"include": ["src/**/*.ts", "scripts/**/*.ts", "tests/**/*.ts"]
}

View File

@@ -47,7 +47,10 @@ If the user has already specified a reviewer CLI and model (e.g., "create a plan
- For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription) - For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription)
- Accept any model string the user provides - Accept any model string the user provides
Store the chosen `REVIEWER_CLI` and `REVIEWER_MODEL` for Phase 6 (Iterative Plan Review). 3. **Max review rounds for the plan?** (default: 10)
- If the user does not provide a value, set `MAX_ROUNDS=10`.
Store the chosen `REVIEWER_CLI`, `REVIEWER_MODEL`, and `MAX_ROUNDS` for Phase 6 (Iterative Plan Review).
### Phase 4: Design (REQUIRED SUB-SKILL) ### Phase 4: Design (REQUIRED SUB-SKILL)
- Invoke `superpowers:brainstorming` explicitly. - Invoke `superpowers:brainstorming` explicitly.
@@ -61,7 +64,7 @@ Store the chosen `REVIEWER_CLI` and `REVIEWER_MODEL` for Phase 6 (Iterative Plan
### Phase 6: Iterative Plan Review ### Phase 6: Iterative Plan Review
Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (max 5 rounds). Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (default max 10 rounds).
**Skip this phase entirely if reviewer was set to `skip`.** **Skip this phase entirely if reviewer was set to `skip`.**
@@ -86,10 +89,60 @@ Resolve the shared reviewer helper from the installed Claude Code skills directo
REVIEWER_RUNTIME=~/.claude/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.claude/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Plan to Temp File #### Step 2: Write Plan to Temp File
Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`. Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`.
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script:
@@ -113,8 +166,21 @@ codex exec \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -123,17 +189,34 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/plan-${REVIEW_ID}.md and review the implementation plan. Focus on: "Review the implementation plan below. Focus on:
$(cat /tmp/plan-${REVIEW_ID}.md)
1. Correctness — Will this plan achieve the stated goals? 1. Correctness — Will this plan achieve the stated goals?
2. Risks — What could go wrong? Edge cases? Data loss? 2. Risks — What could go wrong? Edge cases? Data loss?
3. Missing steps — Is anything forgotten? 3. Missing steps — Is anything forgotten?
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -151,8 +234,21 @@ cursor-agent -p \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
``` ```
@@ -166,13 +262,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/plan-review-${REVIEW_ID}.sh \ --command-file /tmp/plan-review-${REVIEW_ID}.sh \
--stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \
--status-file /tmp/plan-review-${REVIEW_ID}.status --status-file /tmp/plan-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/plan-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -182,6 +281,13 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/plan-review-${REVIEW_ID}.runner.out /tmp/plan-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable.
#### Step 4: Read Review & Check Verdict #### Step 4: Read Review & Check Verdict
@@ -198,17 +304,19 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
[Reviewer feedback] [Reviewer feedback]
``` ```
3. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** → proceed to Phase 7 (Initialize workspace) 5. Check verdict:
- **VERDICT: REVISE** → go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings → proceed to Phase 7 (Initialize workspace)
- No clear verdict but positive / no actionable items → treat as approved - **VERDICT: APPROVED** with only `P3` findings → optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding → go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` → treat as approved
- Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` → surface status log and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` → surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (5) reached → proceed with warning - Max rounds (`MAX_ROUNDS`) reached → present the outcome to the user for a manual decision (proceed or stop)
#### Step 5: Revise the Plan #### Step 5: Revise the Plan
Address each issue the reviewer raised. Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`. Address the reviewer findings in priority order (`P0``P1``P2`, then `P3` when practical). Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`.
Summarize revisions for the user: Summarize revisions for the user:
@@ -219,7 +327,9 @@ Summarize revisions for the user:
If a revision contradicts the user's explicit requirements, skip it and note it for the user. If a revision contradicts the user's explicit requirements, skip it and note it for the user.
#### Step 6: Re-submit to Reviewer (Rounds 2-5) #### Step 6: Re-submit to Reviewer (Rounds 2-N)
Rewrite `/tmp/plan-review-${REVIEW_ID}.sh` for the next round. The script should contain the reviewer invocation only; do not run it directly.
**If `REVIEWER_CLI` is `codex`:** **If `REVIEWER_CLI` is `codex`:**
@@ -233,8 +343,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -249,15 +359,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've revised the plan. Updated version is in /tmp/plan-${REVIEW_ID}.md. I've revised the plan. Updated version is below.
$(cat /tmp/plan-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review the full plan. If solid, end with: VERDICT: APPROVED Re-review the full plan using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -275,8 +388,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md
@@ -295,7 +408,7 @@ Return to Step 4.
**Status:** Approved after N round(s) **Status:** Approved after N round(s)
[or] [or]
**Status:** Max rounds (5) reached — not fully approved **Status:** Max rounds (`MAX_ROUNDS`) reached — not fully approved
[Final feedback / remaining concerns] [Final feedback / remaining concerns]
``` ```
@@ -347,6 +460,28 @@ When handing off to execution, instruct:
Private plan files under `~/.claude/plans/` are planning artifacts and must not be used as execution source of truth. Private plan files under `~/.claude/plans/` are planning artifacts and must not be used as execution source of truth.
### Phase 10: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed Claude Code skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.claude/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the create-plan run (approved, max rounds reached, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "create-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
**ALWAYS update `story-tracker.md` before/after each story. NEVER proceed with stale tracker state.** **ALWAYS update `story-tracker.md` before/after each story. NEVER proceed with stale tracker state.**
@@ -385,6 +520,7 @@ After completing any story:
- [ ] `.gitignore` ignore-rule commit was created if needed - [ ] `.gitignore` ignore-rule commit was created if needed
- [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/` - [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/`
- [ ] Reviewer configured or explicitly skipped - [ ] Reviewer configured or explicitly skipped
- [ ] Max review rounds confirmed (default: 10)
- [ ] Plan review completed (approved or max rounds) — or skipped - [ ] Plan review completed (approved or max rounds) — or skipped
- [ ] `original-plan.md` copied from `~/.claude/plans/` plan file - [ ] `original-plan.md` copied from `~/.claude/plans/` plan file
- [ ] `final-transcript.md` present - [ ] `final-transcript.md` present
@@ -392,6 +528,7 @@ After completing any story:
- [ ] `story-tracker.md` created with all stories as `pending` - [ ] `story-tracker.md` created with all stories as `pending`
- [ ] `continuation-runbook.md` present - [ ] `continuation-runbook.md` present
- [ ] Handoff explicitly says to read runbook first and execute from plan folder - [ ] Handoff explicitly says to read runbook first and execute from plan folder
- [ ] Telegram notification attempted if configured
## Exit Triggers for Question Phase ## Exit Triggers for Question Phase
User says: "ready", "done", "let's plan", "proceed", "enough questions" User says: "ready", "done", "let's plan", "proceed", "enough questions"

View File

@@ -72,7 +72,10 @@ If the user has already specified a reviewer CLI and model (e.g., "create a plan
- For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription) - For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription)
- Accept any model string the user provides - Accept any model string the user provides
Store the chosen `REVIEWER_CLI` and `REVIEWER_MODEL` for Phase 6 (Iterative Plan Review). 3. **Max review rounds for the plan?** (default: 10)
- If the user does not provide a value, set `MAX_ROUNDS=10`.
Store the chosen `REVIEWER_CLI`, `REVIEWER_MODEL`, and `MAX_ROUNDS` for Phase 6 (Iterative Plan Review).
### Phase 4: Design (REQUIRED SUB-SKILL) ### Phase 4: Design (REQUIRED SUB-SKILL)
@@ -84,7 +87,7 @@ Invoke `superpowers:writing-plans`, then break work into milestones and bite-siz
### Phase 6: Iterative Plan Review ### Phase 6: Iterative Plan Review
Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (max 5 rounds). Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (default max 10 rounds).
**Skip this phase entirely if reviewer was set to `skip`.** **Skip this phase entirely if reviewer was set to `skip`.**
@@ -109,10 +112,60 @@ Resolve the shared reviewer helper from the installed Codex skills directory:
REVIEWER_RUNTIME=~/.codex/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.codex/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Plan to Temp File #### Step 2: Write Plan to Temp File
Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`. Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`.
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script:
@@ -136,8 +189,21 @@ codex exec \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -146,17 +212,34 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/plan-${REVIEW_ID}.md and review the implementation plan. Focus on: "Review the implementation plan below. Focus on:
$(cat /tmp/plan-${REVIEW_ID}.md)
1. Correctness — Will this plan achieve the stated goals? 1. Correctness — Will this plan achieve the stated goals?
2. Risks — What could go wrong? Edge cases? Data loss? 2. Risks — What could go wrong? Edge cases? Data loss?
3. Missing steps — Is anything forgotten? 3. Missing steps — Is anything forgotten?
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -174,8 +257,21 @@ cursor-agent -p \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
``` ```
@@ -189,13 +285,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/plan-review-${REVIEW_ID}.sh \ --command-file /tmp/plan-review-${REVIEW_ID}.sh \
--stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \
--status-file /tmp/plan-review-${REVIEW_ID}.status --status-file /tmp/plan-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/plan-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -205,6 +304,11 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/plan-review-${REVIEW_ID}.runner.out /tmp/plan-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable. Fallback is allowed only when the helper is missing or not executable.
@@ -223,17 +327,19 @@ Fallback is allowed only when the helper is missing or not executable.
[Reviewer feedback] [Reviewer feedback]
``` ```
3. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** → proceed to Phase 7 (Initialize workspace) 5. Check verdict:
- **VERDICT: REVISE** → go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings → proceed to Phase 7 (Initialize workspace)
- No clear verdict but positive / no actionable items → treat as approved - **VERDICT: APPROVED** with only `P3` findings → optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding → go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` → treat as approved
- Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` → surface status log and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` → surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (5) reached → proceed with warning - Max rounds (`MAX_ROUNDS`) reached → present the outcome to the user for a manual decision (proceed or stop)
#### Step 5: Revise the Plan #### Step 5: Revise the Plan
Address each issue the reviewer raised. Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`. Address the reviewer findings in priority order (`P0``P1``P2`, then `P3` when practical). Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`.
Summarize revisions for the user: Summarize revisions for the user:
@@ -244,7 +350,9 @@ Summarize revisions for the user:
If a revision contradicts the user's explicit requirements, skip it and note it for the user. If a revision contradicts the user's explicit requirements, skip it and note it for the user.
#### Step 6: Re-submit to Reviewer (Rounds 2-5) #### Step 6: Re-submit to Reviewer (Rounds 2-N)
Rewrite `/tmp/plan-review-${REVIEW_ID}.sh` for the next round. The script should contain the reviewer invocation only; do not run it directly.
**If `REVIEWER_CLI` is `codex`:** **If `REVIEWER_CLI` is `codex`:**
@@ -258,8 +366,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -274,15 +382,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've revised the plan. Updated version is in /tmp/plan-${REVIEW_ID}.md. I've revised the plan. Updated version is below.
$(cat /tmp/plan-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review the full plan. If solid, end with: VERDICT: APPROVED Re-review the full plan using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -300,8 +411,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md
@@ -320,7 +431,7 @@ Return to Step 4.
**Status:** Approved after N round(s) **Status:** Approved after N round(s)
[or] [or]
**Status:** Max rounds (5) reached — not fully approved **Status:** Max rounds (`MAX_ROUNDS`) reached — not fully approved
[Final feedback / remaining concerns] [Final feedback / remaining concerns]
``` ```
@@ -367,19 +478,42 @@ Always instruct the executing agent:
Do not rely on planner-private files during implementation. Do not rely on planner-private files during implementation.
### Phase 10: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed Codex skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.codex/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the create-plan run (approved, max rounds reached, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "create-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Quick Reference ## Quick Reference
| Phase | Action | Required Output | | Phase | Action | Required Output |
|---|---|---| |---|---|---|
| 1 | Analyze codebase/context | Constraints and known patterns | | 1 | Analyze codebase/context | Constraints and known patterns |
| 2 | Gather requirements (one question at a time) | Confirmed scope and success criteria | | 2 | Gather requirements (one question at a time) | Confirmed scope and success criteria |
| 3 | Configure reviewer CLI and model | `REVIEWER_CLI` and `REVIEWER_MODEL` (or `skip`) | | 3 | Configure reviewer CLI and model | `REVIEWER_CLI`, `REVIEWER_MODEL`, `MAX_ROUNDS` (or `skip`) |
| 4 | Invoke `superpowers:brainstorming` | Chosen design approach | | 4 | Invoke `superpowers:brainstorming` | Chosen design approach |
| 5 | Invoke `superpowers:writing-plans` | Milestones and bite-sized stories | | 5 | Invoke `superpowers:writing-plans` | Milestones and bite-sized stories |
| 6 | Iterative plan review (max 5 rounds) | Reviewer approval or max-rounds warning | | 6 | Iterative plan review (max `MAX_ROUNDS` rounds) | Reviewer approval or max-rounds warning |
| 7 | Initialize `ai_plan/` + `.gitignore` | Local planning workspace ready | | 7 | Initialize `ai_plan/` + `.gitignore` | Local planning workspace ready |
| 8 | Build plan package from templates | Full plan folder with required files | | 8 | Build plan package from templates | Full plan folder with required files |
| 9 | Handoff with runbook-first instruction | Resumable execution context | | 9 | Handoff with runbook-first instruction | Resumable execution context |
| 10 | Send Telegram notification | User notified or notification status reported |
## Execution Rules to Include in Plan (MANDATORY) ## Execution Rules to Include in Plan (MANDATORY)
@@ -406,6 +540,7 @@ Do not rely on planner-private files during implementation.
- Handoff without explicit "read runbook first" direction. - Handoff without explicit "read runbook first" direction.
- Skipping the reviewer phase without explicit user opt-out. - Skipping the reviewer phase without explicit user opt-out.
- Not capturing the Codex session ID for resume in subsequent review rounds. - Not capturing the Codex session ID for resume in subsequent review rounds.
- Using any notification path other than Telegram.
## Rationalizations and Counters ## Rationalizations and Counters
@@ -433,6 +568,7 @@ Do not rely on planner-private files during implementation.
- [ ] `.gitignore` ignore-rule commit was created if needed - [ ] `.gitignore` ignore-rule commit was created if needed
- [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/` - [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/`
- [ ] Reviewer configured or explicitly skipped - [ ] Reviewer configured or explicitly skipped
- [ ] Max review rounds confirmed (default: 10)
- [ ] Plan review completed (approved or max rounds) — or skipped - [ ] Plan review completed (approved or max rounds) — or skipped
- [ ] `original-plan.md` present - [ ] `original-plan.md` present
- [ ] `final-transcript.md` present - [ ] `final-transcript.md` present
@@ -440,3 +576,4 @@ Do not rely on planner-private files during implementation.
- [ ] `story-tracker.md` present - [ ] `story-tracker.md` present
- [ ] `continuation-runbook.md` present - [ ] `continuation-runbook.md` present
- [ ] Handoff explicitly says to read runbook first and execute from plan folder - [ ] Handoff explicitly says to read runbook first and execute from plan folder
- [ ] Telegram notification attempted if configured

View File

@@ -73,7 +73,10 @@ If the user has already specified a reviewer CLI and model (e.g., "create a plan
- For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription) - For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription)
- Accept any model string the user provides - Accept any model string the user provides
Store the chosen `REVIEWER_CLI` and `REVIEWER_MODEL` for Phase 6 (Iterative Plan Review). 3. **Max review rounds for the plan?** (default: 10)
- If the user does not provide a value, set `MAX_ROUNDS=10`.
Store the chosen `REVIEWER_CLI`, `REVIEWER_MODEL`, and `MAX_ROUNDS` for Phase 6 (Iterative Plan Review).
### Phase 4: Design (REQUIRED SUB-SKILL) ### Phase 4: Design (REQUIRED SUB-SKILL)
@@ -86,7 +89,7 @@ Story IDs: `S-{milestone}{sequence}`.
### Phase 6: Iterative Plan Review ### Phase 6: Iterative Plan Review
Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (max 5 rounds). Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (default max 10 rounds).
**Skip this phase entirely if reviewer was set to `skip`.** **Skip this phase entirely if reviewer was set to `skip`.**
@@ -115,10 +118,60 @@ else
fi fi
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Plan to Temp File #### Step 2: Write Plan to Temp File
Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`. Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`.
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script:
@@ -142,8 +195,21 @@ codex exec \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -152,17 +218,34 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/plan-${REVIEW_ID}.md and review the implementation plan. Focus on: "Review the implementation plan below. Focus on:
$(cat /tmp/plan-${REVIEW_ID}.md)
1. Correctness — Will this plan achieve the stated goals? 1. Correctness — Will this plan achieve the stated goals?
2. Risks — What could go wrong? Edge cases? Data loss? 2. Risks — What could go wrong? Edge cases? Data loss?
3. Missing steps — Is anything forgotten? 3. Missing steps — Is anything forgotten?
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -180,8 +263,21 @@ cursor-agent -p \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
``` ```
@@ -201,13 +297,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/plan-review-${REVIEW_ID}.sh \ --command-file /tmp/plan-review-${REVIEW_ID}.sh \
--stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \
--status-file /tmp/plan-review-${REVIEW_ID}.status --status-file /tmp/plan-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/plan-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -217,6 +316,11 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/plan-review-${REVIEW_ID}.runner.out /tmp/plan-review-${REVIEW_ID}.md
```
#### Step 4: Read Review & Check Verdict #### Step 4: Read Review & Check Verdict
@@ -233,17 +337,19 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
[Reviewer feedback] [Reviewer feedback]
``` ```
3. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** → proceed to Phase 7 (Initialize workspace) 5. Check verdict:
- **VERDICT: REVISE** → go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings → proceed to Phase 7 (Initialize workspace)
- No clear verdict but positive / no actionable items → treat as approved - **VERDICT: APPROVED** with only `P3` findings → optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding → go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` → treat as approved
- Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` → surface status log and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` → surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (5) reached → proceed with warning - Max rounds (`MAX_ROUNDS`) reached → present the outcome to the user for a manual decision (proceed or stop)
#### Step 5: Revise the Plan #### Step 5: Revise the Plan
Address each issue the reviewer raised. Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`. Address the reviewer findings in priority order (`P0``P1``P2`, then `P3` when practical). Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`.
Summarize revisions for the user: Summarize revisions for the user:
@@ -254,7 +360,9 @@ Summarize revisions for the user:
If a revision contradicts the user's explicit requirements, skip it and note it for the user. If a revision contradicts the user's explicit requirements, skip it and note it for the user.
#### Step 6: Re-submit to Reviewer (Rounds 2-5) #### Step 6: Re-submit to Reviewer (Rounds 2-N)
Rewrite `/tmp/plan-review-${REVIEW_ID}.sh` for the next round. The script should contain the reviewer invocation only; do not run it directly.
**If `REVIEWER_CLI` is `codex`:** **If `REVIEWER_CLI` is `codex`:**
@@ -268,8 +376,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -284,15 +392,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've revised the plan. Updated version is in /tmp/plan-${REVIEW_ID}.md. I've revised the plan. Updated version is below.
$(cat /tmp/plan-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review the full plan. If solid, end with: VERDICT: APPROVED Re-review the full plan using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -310,8 +421,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md
@@ -330,7 +441,7 @@ Return to Step 4.
**Status:** Approved after N round(s) **Status:** Approved after N round(s)
[or] [or]
**Status:** Max rounds (5) reached — not fully approved **Status:** Max rounds (`MAX_ROUNDS`) reached — not fully approved
[Final feedback / remaining concerns] [Final feedback / remaining concerns]
``` ```
@@ -377,19 +488,46 @@ Always instruct the executing agent:
Do not rely on planner-private files during implementation. Do not rely on planner-private files during implementation.
### Phase 10: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from Cursor's installed skills directory:
```bash
if [ -x .cursor/skills/reviewer-runtime/notify-telegram.sh ]; then
TELEGRAM_NOTIFY_RUNTIME=.cursor/skills/reviewer-runtime/notify-telegram.sh
else
TELEGRAM_NOTIFY_RUNTIME=~/.cursor/skills/reviewer-runtime/notify-telegram.sh
fi
```
On every terminal outcome for the create-plan run (approved, max rounds reached, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "create-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Quick Reference ## Quick Reference
| Phase | Action | Required Output | | Phase | Action | Required Output |
|---|---|---| |---|---|---|
| 1 | Analyze codebase/context | Constraints and known patterns | | 1 | Analyze codebase/context | Constraints and known patterns |
| 2 | Gather requirements (one question at a time) | Confirmed scope and success criteria | | 2 | Gather requirements (one question at a time) | Confirmed scope and success criteria |
| 3 | Configure reviewer CLI and model | `REVIEWER_CLI` and `REVIEWER_MODEL` (or `skip`) | | 3 | Configure reviewer CLI and model | `REVIEWER_CLI`, `REVIEWER_MODEL`, `MAX_ROUNDS` (or `skip`) |
| 4 | Invoke `superpowers:brainstorming` | Chosen design approach | | 4 | Invoke `superpowers:brainstorming` | Chosen design approach |
| 5 | Invoke `superpowers:writing-plans` | Milestones and bite-sized stories | | 5 | Invoke `superpowers:writing-plans` | Milestones and bite-sized stories |
| 6 | Iterative plan review (max 5 rounds) | Reviewer approval or max-rounds warning | | 6 | Iterative plan review (max `MAX_ROUNDS` rounds) | Reviewer approval or max-rounds warning |
| 7 | Initialize `ai_plan/` + `.gitignore` | Local planning workspace ready | | 7 | Initialize `ai_plan/` + `.gitignore` | Local planning workspace ready |
| 8 | Build plan package from templates | Full plan folder with required files | | 8 | Build plan package from templates | Full plan folder with required files |
| 9 | Handoff with runbook-first instruction | Resumable execution context | | 9 | Handoff with runbook-first instruction | Resumable execution context |
| 10 | Send Telegram notification | User notified or notification status reported |
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
@@ -430,6 +568,7 @@ After completing any story:
- Omitting one or more required files in the plan package. - Omitting one or more required files in the plan package.
- Handoff without explicit "read runbook first" direction. - Handoff without explicit "read runbook first" direction.
- Skipping the reviewer phase without explicit user opt-out. - Skipping the reviewer phase without explicit user opt-out.
- Using any notification path other than Telegram.
## Red Flags - Stop and Correct ## Red Flags - Stop and Correct
@@ -447,6 +586,7 @@ After completing any story:
- [ ] `.gitignore` ignore-rule commit was created if needed - [ ] `.gitignore` ignore-rule commit was created if needed
- [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/` - [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/`
- [ ] Reviewer configured or explicitly skipped - [ ] Reviewer configured or explicitly skipped
- [ ] Max review rounds confirmed (default: 10)
- [ ] Plan review completed (approved or max rounds) — or skipped - [ ] Plan review completed (approved or max rounds) — or skipped
- [ ] `original-plan.md` present - [ ] `original-plan.md` present
- [ ] `final-transcript.md` present - [ ] `final-transcript.md` present
@@ -454,6 +594,7 @@ After completing any story:
- [ ] `story-tracker.md` present - [ ] `story-tracker.md` present
- [ ] `continuation-runbook.md` present - [ ] `continuation-runbook.md` present
- [ ] Handoff explicitly says to read runbook first and execute from plan folder - [ ] Handoff explicitly says to read runbook first and execute from plan folder
- [ ] Telegram notification attempted if configured
## Exit Triggers for Question Phase ## Exit Triggers for Question Phase
User says: "ready", "done", "let's plan", "proceed", "enough questions" User says: "ready", "done", "let's plan", "proceed", "enough questions"

View File

@@ -59,7 +59,10 @@ If the user has already specified a reviewer CLI and model (e.g., "create a plan
- For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription) - For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription)
- Accept any model string the user provides - Accept any model string the user provides
Store the chosen `REVIEWER_CLI` and `REVIEWER_MODEL` for Phase 7 (Iterative Plan Review). 3. **Max review rounds for the plan?** (default: 10)
- If the user does not provide a value, set `MAX_ROUNDS=10`.
Store the chosen `REVIEWER_CLI`, `REVIEWER_MODEL`, and `MAX_ROUNDS` for Phase 7 (Iterative Plan Review).
### Phase 5: Design (REQUIRED SUB-SKILL) ### Phase 5: Design (REQUIRED SUB-SKILL)
@@ -78,7 +81,7 @@ Story IDs: `S-{milestone}{sequence}`.
### Phase 7: Iterative Plan Review ### Phase 7: Iterative Plan Review
Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (max 5 rounds). Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (default max 10 rounds).
**Skip this phase entirely if reviewer was set to `skip`.** **Skip this phase entirely if reviewer was set to `skip`.**
@@ -103,10 +106,60 @@ Resolve the shared reviewer helper from the installed OpenCode skills directory:
REVIEWER_RUNTIME=~/.config/opencode/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.config/opencode/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Plan to Temp File #### Step 2: Write Plan to Temp File
Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`. Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`.
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script:
@@ -130,8 +183,21 @@ codex exec \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -140,17 +206,34 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/plan-${REVIEW_ID}.md and review the implementation plan. Focus on: "Review the implementation plan below. Focus on:
$(cat /tmp/plan-${REVIEW_ID}.md)
1. Correctness — Will this plan achieve the stated goals? 1. Correctness — Will this plan achieve the stated goals?
2. Risks — What could go wrong? Edge cases? Data loss? 2. Risks — What could go wrong? Edge cases? Data loss?
3. Missing steps — Is anything forgotten? 3. Missing steps — Is anything forgotten?
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -168,8 +251,21 @@ cursor-agent -p \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
``` ```
@@ -183,13 +279,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/plan-review-${REVIEW_ID}.sh \ --command-file /tmp/plan-review-${REVIEW_ID}.sh \
--stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \
--status-file /tmp/plan-review-${REVIEW_ID}.status --status-file /tmp/plan-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/plan-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -199,6 +298,11 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/plan-review-${REVIEW_ID}.runner.out /tmp/plan-review-${REVIEW_ID}.md
```
#### Step 4: Read Review & Check Verdict #### Step 4: Read Review & Check Verdict
@@ -215,17 +319,19 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
[Reviewer feedback] [Reviewer feedback]
``` ```
3. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** → proceed to Phase 8 (Initialize workspace) 5. Check verdict:
- **VERDICT: REVISE** → go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings → proceed to Phase 8 (Initialize workspace)
- No clear verdict but positive / no actionable items → treat as approved - **VERDICT: APPROVED** with only `P3` findings → optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding → go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` → treat as approved
- Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` → surface status log and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` → surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (5) reached → proceed with warning - Max rounds (`MAX_ROUNDS`) reached → present the outcome to the user for a manual decision (proceed or stop)
#### Step 5: Revise the Plan #### Step 5: Revise the Plan
Address each issue the reviewer raised. Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`. Address the reviewer findings in priority order (`P0``P1``P2`, then `P3` when practical). Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`.
Summarize revisions for the user: Summarize revisions for the user:
@@ -236,7 +342,9 @@ Summarize revisions for the user:
If a revision contradicts the user's explicit requirements, skip it and note it for the user. If a revision contradicts the user's explicit requirements, skip it and note it for the user.
#### Step 6: Re-submit to Reviewer (Rounds 2-5) #### Step 6: Re-submit to Reviewer (Rounds 2-N)
Rewrite `/tmp/plan-review-${REVIEW_ID}.sh` for the next round. The script should contain the reviewer invocation only; do not run it directly.
**If `REVIEWER_CLI` is `codex`:** **If `REVIEWER_CLI` is `codex`:**
@@ -250,8 +358,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -266,15 +374,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've revised the plan. Updated version is in /tmp/plan-${REVIEW_ID}.md. I've revised the plan. Updated version is below.
$(cat /tmp/plan-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review the full plan. If solid, end with: VERDICT: APPROVED Re-review the full plan using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -292,8 +403,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md
@@ -312,7 +423,7 @@ Return to Step 4.
**Status:** Approved after N round(s) **Status:** Approved after N round(s)
[or] [or]
**Status:** Max rounds (5) reached — not fully approved **Status:** Max rounds (`MAX_ROUNDS`) reached — not fully approved
[Final feedback / remaining concerns] [Final feedback / remaining concerns]
``` ```
@@ -357,6 +468,28 @@ Use templates from this skill's `templates/` folder.
Always instruct the executing agent: Always instruct the executing agent:
> Read `ai_plan/YYYY-MM-DD-<short-title>/continuation-runbook.md` first, then execute from `ai_plan` files only. > Read `ai_plan/YYYY-MM-DD-<short-title>/continuation-runbook.md` first, then execute from `ai_plan` files only.
### Phase 11: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed OpenCode skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.config/opencode/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the create-plan run (approved, max rounds reached, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "create-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
Before starting any story: Before starting any story:
@@ -393,6 +526,7 @@ After completing any story:
- [ ] `.gitignore` ignore-rule commit was created if needed - [ ] `.gitignore` ignore-rule commit was created if needed
- [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/` - [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/`
- [ ] Reviewer configured or explicitly skipped - [ ] Reviewer configured or explicitly skipped
- [ ] Max review rounds confirmed (default: 10)
- [ ] Plan review completed (approved or max rounds) — or skipped - [ ] Plan review completed (approved or max rounds) — or skipped
- [ ] `original-plan.md` present - [ ] `original-plan.md` present
- [ ] `final-transcript.md` present - [ ] `final-transcript.md` present
@@ -400,6 +534,7 @@ After completing any story:
- [ ] `story-tracker.md` created with all stories as `pending` - [ ] `story-tracker.md` created with all stories as `pending`
- [ ] `continuation-runbook.md` present - [ ] `continuation-runbook.md` present
- [ ] Handoff explicitly says to read runbook first and execute from plan folder - [ ] Handoff explicitly says to read runbook first and execute from plan folder
- [ ] Telegram notification attempted if configured
## Exit Triggers for Question Phase ## Exit Triggers for Question Phase
User says: "ready", "done", "let's plan", "proceed", "enough questions" User says: "ready", "done", "let's plan", "proceed", "enough questions"

View File

@@ -151,6 +151,20 @@ Resolve the shared runtime helper path before writing the command script:
REVIEWER_RUNTIME=~/.claude/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.claude/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Review Payload #### Step 2: Write Review Payload
Write to `/tmp/milestone-${REVIEW_ID}.md`: Write to `/tmp/milestone-${REVIEW_ID}.md`:
@@ -176,6 +190,42 @@ Write to `/tmp/milestone-${REVIEW_ID}.md`:
[test output with pass/fail counts] [test output with pass/fail counts]
``` ```
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script:
@@ -201,8 +251,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -211,7 +275,9 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/milestone-${REVIEW_ID}.md and review this milestone implementation. "Review this milestone implementation using the following spec, acceptance criteria, git diff, and verification output:
$(cat /tmp/milestone-${REVIEW_ID}.md)
Evaluate: Evaluate:
1. Correctness — Does the implementation match the milestone spec? 1. Correctness — Does the implementation match the milestone spec?
@@ -220,10 +286,25 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -243,8 +324,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -258,13 +353,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/milestone-review-${REVIEW_ID}.sh \ --command-file /tmp/milestone-review-${REVIEW_ID}.sh \
--stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \
--status-file /tmp/milestone-review-${REVIEW_ID}.status --status-file /tmp/milestone-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/milestone-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -274,6 +372,11 @@ jq -r '.result' /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/milestone-review-${REVIEW_ID}.runner.out /tmp/milestone-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable. Fallback is allowed only when the helper is missing or not executable.
@@ -292,17 +395,19 @@ Fallback is allowed only when the helper is missing or not executable.
[Reviewer feedback] [Reviewer feedback]
``` ```
4. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** -> proceed to Phase 4 Step 6 (commit & approve) 5. Check verdict:
- **VERDICT: REVISE** -> go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings -> proceed to Phase 4 Step 6 (commit & approve)
- No clear verdict but positive / no actionable items -> treat as approved - **VERDICT: APPROVED** with only `P3` findings -> optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding -> go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` -> treat as approved
- Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` -> surface status log, note any `stall-warning` heartbeat lines as non-terminal operator hints, and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` -> surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop) - Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop)
#### Step 5: Address Feedback & Re-verify #### Step 5: Address Feedback & Re-verify
1. Address each issue the reviewer raised (do NOT commit yet). 1. Address the reviewer findings in priority order (`P0` -> `P1` -> `P2`, then `P3` when practical) (do NOT commit yet).
2. Re-run verification (lint/typecheck/tests) — all must pass. 2. Re-run verification (lint/typecheck/tests) — all must pass.
3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output. 3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output.
@@ -331,8 +436,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -347,16 +452,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've addressed your feedback. Updated diff and verification output are in /tmp/milestone-${REVIEW_ID}.md. I've addressed your feedback. Updated diff and verification output are below.
$(cat /tmp/milestone-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read \ --strict-mcp-config \
> /tmp/milestone-review-${REVIEW_ID}.md --setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -374,8 +481,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -437,6 +544,28 @@ Present summary:
**Branch:** implement/<plan-folder-name> (merged and deleted) **Branch:** implement/<plan-folder-name> (merged and deleted)
``` ```
### Phase 8: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed Claude Code skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.claude/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the implement-plan run (fully completed, stopped after max rounds, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "implement-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
**ALWAYS update `story-tracker.md` before/after each story. NEVER proceed with stale tracker state.** **ALWAYS update `story-tracker.md` before/after each story. NEVER proceed with stale tracker state.**
@@ -471,3 +600,4 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- [ ] Final test suite passes - [ ] Final test suite passes
- [ ] Worktree branch merged to parent and worktree deleted - [ ] Worktree branch merged to parent and worktree deleted
- [ ] Story tracker updated with final status - [ ] Story tracker updated with final status
- [ ] Telegram notification attempted if configured

View File

@@ -184,6 +184,20 @@ Resolve the shared runtime helper path before writing the command script:
REVIEWER_RUNTIME=~/.codex/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.codex/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Review Payload #### Step 2: Write Review Payload
Write to `/tmp/milestone-${REVIEW_ID}.md`: Write to `/tmp/milestone-${REVIEW_ID}.md`:
@@ -209,6 +223,42 @@ Write to `/tmp/milestone-${REVIEW_ID}.md`:
[test output with pass/fail counts] [test output with pass/fail counts]
``` ```
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script:
@@ -234,8 +284,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -244,7 +308,9 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/milestone-${REVIEW_ID}.md and review this milestone implementation. "Review this milestone implementation using the following spec, acceptance criteria, git diff, and verification output:
$(cat /tmp/milestone-${REVIEW_ID}.md)
Evaluate: Evaluate:
1. Correctness — Does the implementation match the milestone spec? 1. Correctness — Does the implementation match the milestone spec?
@@ -253,10 +319,25 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -276,8 +357,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -291,13 +386,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/milestone-review-${REVIEW_ID}.sh \ --command-file /tmp/milestone-review-${REVIEW_ID}.sh \
--stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \
--status-file /tmp/milestone-review-${REVIEW_ID}.status --status-file /tmp/milestone-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/milestone-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -307,6 +405,11 @@ jq -r '.result' /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/milestone-review-${REVIEW_ID}.runner.out /tmp/milestone-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable. Fallback is allowed only when the helper is missing or not executable.
@@ -325,17 +428,19 @@ Fallback is allowed only when the helper is missing or not executable.
[Reviewer feedback] [Reviewer feedback]
``` ```
4. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** -> proceed to Phase 4 Step 6 (commit & approve) 5. Check verdict:
- **VERDICT: REVISE** -> go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings -> proceed to Phase 4 Step 6 (commit & approve)
- No clear verdict but positive / no actionable items -> treat as approved - **VERDICT: APPROVED** with only `P3` findings -> optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding -> go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` -> treat as approved
- Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` -> surface status log, note any `stall-warning` heartbeat lines as non-terminal operator hints, and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` -> surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop) - Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop)
#### Step 5: Address Feedback & Re-verify #### Step 5: Address Feedback & Re-verify
1. Address each issue the reviewer raised (do NOT commit yet). 1. Address the reviewer findings in priority order (`P0` -> `P1` -> `P2`, then `P3` when practical) (do NOT commit yet).
2. Re-run verification (lint/typecheck/tests) — all must pass. 2. Re-run verification (lint/typecheck/tests) — all must pass.
3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output. 3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output.
@@ -364,8 +469,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -380,16 +485,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've addressed your feedback. Updated diff and verification output are in /tmp/milestone-${REVIEW_ID}.md. I've addressed your feedback. Updated diff and verification output are below.
$(cat /tmp/milestone-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read \ --strict-mcp-config \
> /tmp/milestone-review-${REVIEW_ID}.md --setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -407,8 +514,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -470,6 +577,28 @@ Present summary:
**Branch:** implement/<plan-folder-name> (merged and deleted) **Branch:** implement/<plan-folder-name> (merged and deleted)
``` ```
### Phase 8: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed Codex skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.codex/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the implement-plan run (fully completed, stopped after max rounds, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "implement-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Quick Reference ## Quick Reference
| Phase | Action | Required Output | | Phase | Action | Required Output |
@@ -481,6 +610,7 @@ Present summary:
| 5 | Milestone review loop (per milestone) | Reviewer approval or max rounds + user override | | 5 | Milestone review loop (per milestone) | Reviewer approval or max rounds + user override |
| 6 | Invoke `superpowers:finishing-a-development-branch` | Branch merged to parent, worktree deleted | | 6 | Invoke `superpowers:finishing-a-development-branch` | Branch merged to parent, worktree deleted |
| 7 | Final report | Summary presented | | 7 | Final report | Summary presented |
| 8 | Send Telegram notification | User notified or notification status reported |
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
@@ -508,6 +638,7 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- Not capturing the Codex session ID for resume in subsequent review rounds. - Not capturing the Codex session ID for resume in subsequent review rounds.
- Forgetting to update `story-tracker.md` between stories. - Forgetting to update `story-tracker.md` between stories.
- Creating a new worktree when one already exists for a resumed plan. - Creating a new worktree when one already exists for a resumed plan.
- Using any notification path other than Telegram.
## Rationalizations and Counters ## Rationalizations and Counters
@@ -545,3 +676,4 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- [ ] Final test suite passes - [ ] Final test suite passes
- [ ] Worktree branch merged to parent and worktree deleted - [ ] Worktree branch merged to parent and worktree deleted
- [ ] Story tracker updated with final status - [ ] Story tracker updated with final status
- [ ] Telegram notification attempted if configured

View File

@@ -188,6 +188,20 @@ else
fi fi
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Review Payload #### Step 2: Write Review Payload
Write to `/tmp/milestone-${REVIEW_ID}.md`: Write to `/tmp/milestone-${REVIEW_ID}.md`:
@@ -213,6 +227,42 @@ Write to `/tmp/milestone-${REVIEW_ID}.md`:
[test output with pass/fail counts] [test output with pass/fail counts]
``` ```
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script:
@@ -238,8 +288,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -248,7 +312,9 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/milestone-${REVIEW_ID}.md and review this milestone implementation. "Review this milestone implementation using the following spec, acceptance criteria, git diff, and verification output:
$(cat /tmp/milestone-${REVIEW_ID}.md)
Evaluate: Evaluate:
1. Correctness — Does the implementation match the milestone spec? 1. Correctness — Does the implementation match the milestone spec?
@@ -257,10 +323,25 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -280,8 +361,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -301,13 +396,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/milestone-review-${REVIEW_ID}.sh \ --command-file /tmp/milestone-review-${REVIEW_ID}.sh \
--stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \
--status-file /tmp/milestone-review-${REVIEW_ID}.status --status-file /tmp/milestone-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/milestone-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -317,6 +415,11 @@ jq -r '.result' /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/milestone-review-${REVIEW_ID}.runner.out /tmp/milestone-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable. Fallback is allowed only when the helper is missing or not executable.
@@ -335,17 +438,19 @@ Fallback is allowed only when the helper is missing or not executable.
[Reviewer feedback] [Reviewer feedback]
``` ```
4. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** -> proceed to Phase 4 Step 6 (commit & approve) 5. Check verdict:
- **VERDICT: REVISE** -> go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings -> proceed to Phase 4 Step 6 (commit & approve)
- No clear verdict but positive / no actionable items -> treat as approved - **VERDICT: APPROVED** with only `P3` findings -> optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding -> go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` -> treat as approved
- Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` -> surface status log, note any `stall-warning` heartbeat lines as non-terminal operator hints, and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` -> surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop) - Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop)
#### Step 5: Address Feedback & Re-verify #### Step 5: Address Feedback & Re-verify
1. Address each issue the reviewer raised (do NOT commit yet). 1. Address the reviewer findings in priority order (`P0` -> `P1` -> `P2`, then `P3` when practical) (do NOT commit yet).
2. Re-run verification (lint/typecheck/tests) — all must pass. 2. Re-run verification (lint/typecheck/tests) — all must pass.
3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output. 3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output.
@@ -374,8 +479,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -390,16 +495,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've addressed your feedback. Updated diff and verification output are in /tmp/milestone-${REVIEW_ID}.md. I've addressed your feedback. Updated diff and verification output are below.
$(cat /tmp/milestone-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read \ --strict-mcp-config \
> /tmp/milestone-review-${REVIEW_ID}.md --setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -417,8 +524,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -480,6 +587,32 @@ Present summary:
**Branch:** implement/<plan-folder-name> (merged and deleted) **Branch:** implement/<plan-folder-name> (merged and deleted)
``` ```
### Phase 8: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from Cursor's installed skills directory:
```bash
if [ -x .cursor/skills/reviewer-runtime/notify-telegram.sh ]; then
TELEGRAM_NOTIFY_RUNTIME=.cursor/skills/reviewer-runtime/notify-telegram.sh
else
TELEGRAM_NOTIFY_RUNTIME=~/.cursor/skills/reviewer-runtime/notify-telegram.sh
fi
```
On every terminal outcome for the implement-plan run (fully completed, stopped after max rounds, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "implement-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Quick Reference ## Quick Reference
| Phase | Action | Required Output | | Phase | Action | Required Output |
@@ -491,6 +624,7 @@ Present summary:
| 5 | Milestone review loop (per milestone) | Reviewer approval or max rounds + user override | | 5 | Milestone review loop (per milestone) | Reviewer approval or max rounds + user override |
| 6 | Invoke `superpowers:finishing-a-development-branch` | Branch merged to parent, worktree deleted | | 6 | Invoke `superpowers:finishing-a-development-branch` | Branch merged to parent, worktree deleted |
| 7 | Final report | Summary presented | | 7 | Final report | Summary presented |
| 8 | Send Telegram notification | User notified or notification status reported |
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
@@ -516,6 +650,7 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- Skipping worktree setup and working directly on the main branch. - Skipping worktree setup and working directly on the main branch.
- Forgetting to update `story-tracker.md` between stories. - Forgetting to update `story-tracker.md` between stories.
- Creating a new worktree when one already exists for a resumed plan. - Creating a new worktree when one already exists for a resumed plan.
- Using any notification path other than Telegram.
## Red Flags - Stop and Correct ## Red Flags - Stop and Correct
@@ -544,3 +679,4 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- [ ] Final test suite passes - [ ] Final test suite passes
- [ ] Worktree branch merged to parent and worktree deleted - [ ] Worktree branch merged to parent and worktree deleted
- [ ] Story tracker updated with final status - [ ] Story tracker updated with final status
- [ ] Telegram notification attempted if configured

View File

@@ -169,6 +169,20 @@ Resolve the shared runtime helper path before writing the command script:
REVIEWER_RUNTIME=~/.config/opencode/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.config/opencode/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Review Payload #### Step 2: Write Review Payload
Write to `/tmp/milestone-${REVIEW_ID}.md`: Write to `/tmp/milestone-${REVIEW_ID}.md`:
@@ -194,6 +208,42 @@ Write to `/tmp/milestone-${REVIEW_ID}.md`:
[test output with pass/fail counts] [test output with pass/fail counts]
``` ```
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script:
@@ -219,8 +269,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -229,7 +293,9 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/milestone-${REVIEW_ID}.md and review this milestone implementation. "Review this milestone implementation using the following spec, acceptance criteria, git diff, and verification output:
$(cat /tmp/milestone-${REVIEW_ID}.md)
Evaluate: Evaluate:
1. Correctness — Does the implementation match the milestone spec? 1. Correctness — Does the implementation match the milestone spec?
@@ -238,10 +304,25 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -261,8 +342,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -276,13 +371,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/milestone-review-${REVIEW_ID}.sh \ --command-file /tmp/milestone-review-${REVIEW_ID}.sh \
--stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \
--status-file /tmp/milestone-review-${REVIEW_ID}.status --status-file /tmp/milestone-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/milestone-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -292,6 +390,11 @@ jq -r '.result' /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/milestone-review-${REVIEW_ID}.runner.out /tmp/milestone-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable. Fallback is allowed only when the helper is missing or not executable.
@@ -310,17 +413,19 @@ Fallback is allowed only when the helper is missing or not executable.
[Reviewer feedback] [Reviewer feedback]
``` ```
4. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** -> proceed to Phase 5 Step 6 (commit & approve) 5. Check verdict:
- **VERDICT: REVISE** -> go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings -> proceed to Phase 5 Step 6 (commit & approve)
- No clear verdict but positive / no actionable items -> treat as approved - **VERDICT: APPROVED** with only `P3` findings -> optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding -> go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` -> treat as approved
- Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` -> surface status log, note any `stall-warning` heartbeat lines as non-terminal operator hints, and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` -> surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop) - Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop)
#### Step 5: Address Feedback & Re-verify #### Step 5: Address Feedback & Re-verify
1. Address each issue the reviewer raised (do NOT commit yet). 1. Address the reviewer findings in priority order (`P0` -> `P1` -> `P2`, then `P3` when practical) (do NOT commit yet).
2. Re-run verification (lint/typecheck/tests) — all must pass. 2. Re-run verification (lint/typecheck/tests) — all must pass.
3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output. 3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output.
@@ -349,8 +454,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -365,16 +470,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've addressed your feedback. Updated diff and verification output are in /tmp/milestone-${REVIEW_ID}.md. I've addressed your feedback. Updated diff and verification output are below.
$(cat /tmp/milestone-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read \ --strict-mcp-config \
> /tmp/milestone-review-${REVIEW_ID}.md --setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -392,8 +499,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -455,6 +562,28 @@ Present summary:
**Branch:** implement/<plan-folder-name> (merged and deleted) **Branch:** implement/<plan-folder-name> (merged and deleted)
``` ```
### Phase 9: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed OpenCode skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.config/opencode/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the implement-plan run (fully completed, stopped after max rounds, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "implement-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
Before starting any story: Before starting any story:
@@ -487,3 +616,4 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- [ ] Final test suite passes - [ ] Final test suite passes
- [ ] Worktree branch merged to parent and worktree deleted - [ ] Worktree branch merged to parent and worktree deleted
- [ ] Story tracker updated with final status - [ ] Story tracker updated with final status
- [ ] Telegram notification attempted if configured

View File

@@ -0,0 +1,99 @@
#!/usr/bin/env bash
set -euo pipefail
DEFAULT_API_BASE_URL="https://api.telegram.org"
DEFAULT_PARSE_MODE="HTML"
MAX_MESSAGE_LENGTH=4096
BOT_TOKEN=${TELEGRAM_BOT_TOKEN:-}
CHAT_ID=${TELEGRAM_CHAT_ID:-}
API_BASE_URL=${TELEGRAM_API_BASE_URL:-$DEFAULT_API_BASE_URL}
PARSE_MODE=${TELEGRAM_PARSE_MODE:-$DEFAULT_PARSE_MODE}
MESSAGE=""
MESSAGE_FILE=""
usage() {
cat <<'EOF'
Usage:
notify-telegram.sh --message <text> [--bot-token <token>] [--chat-id <id>] [--api-base-url <url>]
notify-telegram.sh --message-file <path> [--bot-token <token>] [--chat-id <id>] [--api-base-url <url>]
Environment fallbacks:
TELEGRAM_BOT_TOKEN
TELEGRAM_CHAT_ID
TELEGRAM_API_BASE_URL
TELEGRAM_PARSE_MODE
EOF
}
fail_usage() {
echo "Error: $*" >&2
usage >&2
exit 2
}
parse_args() {
while [[ $# -gt 0 ]]; do
case "$1" in
--bot-token)
BOT_TOKEN=${2:-}
shift 2
;;
--chat-id)
CHAT_ID=${2:-}
shift 2
;;
--api-base-url)
API_BASE_URL=${2:-}
shift 2
;;
--message)
MESSAGE=${2:-}
shift 2
;;
--message-file)
MESSAGE_FILE=${2:-}
shift 2
;;
--help|-h)
usage
exit 0
;;
*)
fail_usage "unknown argument: $1"
;;
esac
done
if [[ -n "$MESSAGE" && -n "$MESSAGE_FILE" ]]; then
fail_usage "use either --message or --message-file, not both"
fi
if [[ -n "$MESSAGE_FILE" ]]; then
[[ -r "$MESSAGE_FILE" ]] || fail_usage "message file is not readable: $MESSAGE_FILE"
MESSAGE=$(<"$MESSAGE_FILE")
fi
[[ -n "$MESSAGE" ]] || fail_usage "message is required"
[[ -n "$BOT_TOKEN" ]] || fail_usage "bot token is required (use --bot-token or TELEGRAM_BOT_TOKEN)"
[[ -n "$CHAT_ID" ]] || fail_usage "chat id is required (use --chat-id or TELEGRAM_CHAT_ID)"
command -v curl >/dev/null 2>&1 || fail_usage "curl is required"
if [[ ${#MESSAGE} -gt "$MAX_MESSAGE_LENGTH" ]]; then
MESSAGE=${MESSAGE:0:$MAX_MESSAGE_LENGTH}
fi
}
main() {
parse_args "$@"
curl -fsS -X POST \
"${API_BASE_URL%/}/bot${BOT_TOKEN}/sendMessage" \
--data-urlencode "chat_id=${CHAT_ID}" \
--data-urlencode "text=${MESSAGE}" \
--data-urlencode "parse_mode=${PARSE_MODE}" \
--data-urlencode "disable_web_page_preview=true" \
>/dev/null
}
main "$@"

View File

@@ -2,6 +2,7 @@
set -euo pipefail set -euo pipefail
DEFAULT_POLL_SECONDS=10 DEFAULT_POLL_SECONDS=10
DEFAULT_HEARTBEAT_SECONDS=60
DEFAULT_SOFT_TIMEOUT_SECONDS=600 DEFAULT_SOFT_TIMEOUT_SECONDS=600
DEFAULT_STALL_WARNING_SECONDS=300 DEFAULT_STALL_WARNING_SECONDS=300
DEFAULT_HARD_TIMEOUT_SECONDS=1800 DEFAULT_HARD_TIMEOUT_SECONDS=1800
@@ -12,7 +13,9 @@ COMMAND_FILE=""
STDOUT_FILE="" STDOUT_FILE=""
STDERR_FILE="" STDERR_FILE=""
STATUS_FILE="" STATUS_FILE=""
SUCCESS_FILES=()
POLL_SECONDS=$DEFAULT_POLL_SECONDS POLL_SECONDS=$DEFAULT_POLL_SECONDS
HEARTBEAT_SECONDS=$DEFAULT_HEARTBEAT_SECONDS
SOFT_TIMEOUT_SECONDS=$DEFAULT_SOFT_TIMEOUT_SECONDS SOFT_TIMEOUT_SECONDS=$DEFAULT_SOFT_TIMEOUT_SECONDS
STALL_WARNING_SECONDS=$DEFAULT_STALL_WARNING_SECONDS STALL_WARNING_SECONDS=$DEFAULT_STALL_WARNING_SECONDS
HARD_TIMEOUT_SECONDS=$DEFAULT_HARD_TIMEOUT_SECONDS HARD_TIMEOUT_SECONDS=$DEFAULT_HARD_TIMEOUT_SECONDS
@@ -29,7 +32,9 @@ Usage:
--stdout-file <path> \ --stdout-file <path> \
--stderr-file <path> \ --stderr-file <path> \
--status-file <path> \ --status-file <path> \
[--success-file <path>] \
[--poll-seconds <int>] \ [--poll-seconds <int>] \
[--heartbeat-seconds <int>] \
[--soft-timeout-seconds <int>] \ [--soft-timeout-seconds <int>] \
[--stall-warning-seconds <int>] \ [--stall-warning-seconds <int>] \
[--hard-timeout-seconds <int>] [--hard-timeout-seconds <int>]
@@ -55,6 +60,25 @@ escape_note() {
printf '%s' "$note" printf '%s' "$note"
} }
join_success_files() {
if [[ ${#SUCCESS_FILES[@]} -eq 0 ]]; then
printf ''
return 0
fi
local joined=""
local path
for path in "${SUCCESS_FILES[@]}"; do
if [[ -n "$joined" ]]; then
joined+=", "
fi
joined+="$path"
done
printf '%s' "$joined"
}
iso_timestamp() { iso_timestamp() {
date -u +"%Y-%m-%dT%H:%M:%SZ" date -u +"%Y-%m-%dT%H:%M:%SZ"
} }
@@ -147,10 +171,18 @@ parse_args() {
STATUS_FILE=${2:-} STATUS_FILE=${2:-}
shift 2 shift 2
;; ;;
--success-file)
SUCCESS_FILES+=("${2:-}")
shift 2
;;
--poll-seconds) --poll-seconds)
POLL_SECONDS=${2:-} POLL_SECONDS=${2:-}
shift 2 shift 2
;; ;;
--heartbeat-seconds)
HEARTBEAT_SECONDS=${2:-}
shift 2
;;
--soft-timeout-seconds) --soft-timeout-seconds)
SOFT_TIMEOUT_SECONDS=${2:-} SOFT_TIMEOUT_SECONDS=${2:-}
shift 2 shift 2
@@ -179,11 +211,13 @@ parse_args() {
[[ -n "$STATUS_FILE" ]] || fail_usage "--status-file is required" [[ -n "$STATUS_FILE" ]] || fail_usage "--status-file is required"
require_integer "poll-seconds" "$POLL_SECONDS" require_integer "poll-seconds" "$POLL_SECONDS"
require_integer "heartbeat-seconds" "$HEARTBEAT_SECONDS"
require_integer "soft-timeout-seconds" "$SOFT_TIMEOUT_SECONDS" require_integer "soft-timeout-seconds" "$SOFT_TIMEOUT_SECONDS"
require_integer "stall-warning-seconds" "$STALL_WARNING_SECONDS" require_integer "stall-warning-seconds" "$STALL_WARNING_SECONDS"
require_integer "hard-timeout-seconds" "$HARD_TIMEOUT_SECONDS" require_integer "hard-timeout-seconds" "$HARD_TIMEOUT_SECONDS"
[[ "$POLL_SECONDS" -gt 0 ]] || fail_usage "poll-seconds must be > 0" [[ "$POLL_SECONDS" -gt 0 ]] || fail_usage "poll-seconds must be > 0"
[[ "$HEARTBEAT_SECONDS" -gt 0 ]] || fail_usage "heartbeat-seconds must be > 0"
[[ "$SOFT_TIMEOUT_SECONDS" -gt 0 ]] || fail_usage "soft-timeout-seconds must be > 0" [[ "$SOFT_TIMEOUT_SECONDS" -gt 0 ]] || fail_usage "soft-timeout-seconds must be > 0"
[[ "$STALL_WARNING_SECONDS" -gt 0 ]] || fail_usage "stall-warning-seconds must be > 0" [[ "$STALL_WARNING_SECONDS" -gt 0 ]] || fail_usage "stall-warning-seconds must be > 0"
[[ "$HARD_TIMEOUT_SECONDS" -gt 0 ]] || fail_usage "hard-timeout-seconds must be > 0" [[ "$HARD_TIMEOUT_SECONDS" -gt 0 ]] || fail_usage "hard-timeout-seconds must be > 0"
@@ -227,8 +261,10 @@ main() {
local last_stdout_bytes=0 local last_stdout_bytes=0
local last_stderr_bytes=0 local last_stderr_bytes=0
local last_output_change_time=$START_TIME local last_output_change_time=$START_TIME
local last_heartbeat_time=$START_TIME
local soft_timeout_logged=0 local soft_timeout_logged=0
local stall_warning_logged=0 local stall_warning_logged=0
local heartbeat_count=0
while kill -0 "$CHILD_PID" 2>/dev/null; do while kill -0 "$CHILD_PID" 2>/dev/null; do
sleep "$POLL_SECONDS" sleep "$POLL_SECONDS"
@@ -239,6 +275,12 @@ main() {
stdout_bytes=$(file_bytes "$STDOUT_FILE") stdout_bytes=$(file_bytes "$STDOUT_FILE")
stderr_bytes=$(file_bytes "$STDERR_FILE") stderr_bytes=$(file_bytes "$STDERR_FILE")
if [[ $((now - last_heartbeat_time)) -ge "$HEARTBEAT_SECONDS" ]]; then
heartbeat_count=$((heartbeat_count + 1))
append_status info in-progress "In progress ${heartbeat_count}"
last_heartbeat_time=$now
fi
if [[ "$stdout_bytes" -ne "$last_stdout_bytes" || "$stderr_bytes" -ne "$last_stderr_bytes" ]]; then if [[ "$stdout_bytes" -ne "$last_stdout_bytes" || "$stderr_bytes" -ne "$last_stderr_bytes" ]]; then
last_output_change_time=$now last_output_change_time=$now
stall_warning_logged=0 stall_warning_logged=0
@@ -285,6 +327,7 @@ main() {
trap - EXIT trap - EXIT
local final_stdout_bytes final_stderr_bytes local final_stdout_bytes final_stderr_bytes
local success_file success_bytes
final_stdout_bytes=$(file_bytes "$STDOUT_FILE") final_stdout_bytes=$(file_bytes "$STDOUT_FILE")
final_stderr_bytes=$(file_bytes "$STDERR_FILE") final_stderr_bytes=$(file_bytes "$STDERR_FILE")
@@ -294,6 +337,16 @@ main() {
exit 0 exit 0
fi fi
if [[ ${#SUCCESS_FILES[@]} -gt 0 ]]; then
for success_file in "${SUCCESS_FILES[@]}"; do
success_bytes=$(file_bytes "$success_file")
if [[ "$success_bytes" -gt 0 ]]; then
append_status info completed "reviewer completed successfully via success file $(join_success_files)"
exit 0
fi
done
fi
append_status error completed-empty-output "reviewer exited successfully with empty stdout" append_status error completed-empty-output "reviewer exited successfully with empty stdout"
exit "$EXIT_COMPLETED_EMPTY_OUTPUT" exit "$EXIT_COMPLETED_EMPTY_OUTPUT"
fi fi

Some files were not shown because too many files have changed in this diff Show More