Compare commits

..

8 Commits

Author SHA1 Message Date
Stefano Fiorini
c344e96984 Remove external web automation provenance refs 2026-04-09 11:11:40 -05:00
Stefano Fiorini
879cccf383 Refine web automation provenance exclusions 2026-04-09 10:31:41 -05:00
Stefano Fiorini
c97b7d44e5 feat(web-automation): implement milestone M2 mirror and docs 2026-04-09 10:21:21 -05:00
Stefano Fiorini
99fe6eab4e feat(web-automation): implement milestone M1 canonical codex migration 2026-04-09 10:13:25 -05:00
Stefano Fiorini
e917387d4f Force sending Telegram notification upon stopping for user input 2026-03-25 11:59:10 -05:00
Stefano Fiorini
63a048a26c Align reviewer runtime and Telegram notifications 2026-03-24 11:45:58 -05:00
Stefano Fiorini
4d37674626 fix: isolate claude reviewer templates 2026-03-08 18:04:07 -05:00
Stefano Fiorini
d44a2288b4 Update Atlassian skill prereq checks 2026-03-06 08:23:49 -06:00
63 changed files with 4805 additions and 2264 deletions

1
.gitignore vendored
View File

@@ -4,3 +4,4 @@
/skills/atlassian/shared/scripts/node_modules/ /skills/atlassian/shared/scripts/node_modules/
/skills/atlassian/*/scripts/.env /skills/atlassian/*/scripts/.env
/skills/atlassian/*/scripts/node_modules/ /skills/atlassian/*/scripts/node_modules/
/skills/web-automation/*/scripts/node_modules/

View File

@@ -68,9 +68,9 @@ ai-coding-skills/
| implement-plan | claude-code | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) | | implement-plan | claude-code | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) |
| implement-plan | opencode | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) | | implement-plan | opencode | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) |
| implement-plan | cursor | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) | | implement-plan | cursor | Worktree-isolated plan execution with iterative cross-model milestone review | Ready | [IMPLEMENT-PLAN](docs/IMPLEMENT-PLAN.md) |
| web-automation | codex | Playwright + Camoufox browsing/scraping/auth automation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) | | web-automation | codex | CloakBrowser-backed browsing, scraping, auth, flow automation, and install validation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) |
| web-automation | claude-code | Playwright + Camoufox browsing/scraping/auth automation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) | | web-automation | claude-code | CloakBrowser-backed browsing, scraping, auth, flow automation, and install validation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) |
| web-automation | opencode | Playwright + Camoufox browsing/scraping/auth automation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) | | web-automation | opencode | CloakBrowser-backed browsing, scraping, auth, flow automation, and install validation | Ready | [WEB-AUTOMATION](docs/WEB-AUTOMATION.md) |
- Docs index: `docs/README.md` - Docs index: `docs/README.md`
- Atlassian guide: `docs/ATLASSIAN.md` - Atlassian guide: `docs/ATLASSIAN.md`

View File

@@ -23,6 +23,8 @@ pnpm --dir skills/atlassian/shared/scripts sync:agents
- `ATLASSIAN_EMAIL` - `ATLASSIAN_EMAIL`
- `ATLASSIAN_API_TOKEN` - `ATLASSIAN_API_TOKEN`
The `ATLASSIAN_*` values may come from the shell environment or a `.env` file in the installed agent-specific `scripts/` folder.
Optional: Optional:
- `ATLASSIAN_JIRA_BASE_URL` - `ATLASSIAN_JIRA_BASE_URL`
@@ -121,9 +123,7 @@ Run in the installed `scripts/` folder:
```bash ```bash
node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')" node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')"
test -n \"$ATLASSIAN_BASE_URL\" node -e 'require("dotenv").config({ path: ".env" }); const required = ["ATLASSIAN_BASE_URL", "ATLASSIAN_EMAIL", "ATLASSIAN_API_TOKEN"]; const missing = required.filter((key) => !(process.env[key] || "").trim()); if (missing.length) { console.error("Missing required Atlassian config: " + missing.join(", ")); process.exit(1); } console.log("OK: Atlassian config present")'
test -n \"$ATLASSIAN_EMAIL\"
test -n \"$ATLASSIAN_API_TOKEN\"
pnpm atlassian health pnpm atlassian health
``` ```

View File

@@ -18,6 +18,7 @@ Create structured implementation plans with milestone and story tracking, and op
- Claude Code: `~/.claude/skills/reviewer-runtime/run-review.sh` - Claude Code: `~/.claude/skills/reviewer-runtime/run-review.sh`
- OpenCode: `~/.config/opencode/skills/reviewer-runtime/run-review.sh` - OpenCode: `~/.config/opencode/skills/reviewer-runtime/run-review.sh`
- Cursor: `.cursor/skills/reviewer-runtime/run-review.sh` or `~/.cursor/skills/reviewer-runtime/run-review.sh` - Cursor: `.cursor/skills/reviewer-runtime/run-review.sh` or `~/.cursor/skills/reviewer-runtime/run-review.sh`
- Telegram notification setup is documented in [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md)
If dependencies are missing, stop and return: If dependencies are missing, stop and return:
@@ -115,10 +116,13 @@ Verify Superpowers dependencies exist in your agent skills root:
- Creates plans under `ai_plan/YYYY-MM-DD-<short-title>/`. - Creates plans under `ai_plan/YYYY-MM-DD-<short-title>/`.
- Ensures `/ai_plan/` is in `.gitignore`. - Ensures `/ai_plan/` is in `.gitignore`.
- Commits `.gitignore` update locally when added. - Commits `.gitignore` update locally when added.
- Asks which reviewer CLI and model to use (or accepts `skip` for no review). - Asks which reviewer CLI, model, and max rounds to use (or accepts `skip` for no review).
- Iteratively reviews the plan with the chosen reviewer (max 5 rounds) before generating files. - Iteratively reviews the plan with the chosen reviewer (default max 10 rounds) before generating files.
- Runs reviewer commands through `reviewer-runtime/run-review.sh` when available, with fallback to direct synchronous execution only if the helper is missing. - Runs reviewer commands through `reviewer-runtime/run-review.sh` when available, with fallback to direct synchronous execution only if the helper is missing.
- Waits as long as the reviewer runtime keeps emitting per-minute `In progress N` heartbeats.
- Requires reviewer findings to be ordered `P0` through `P3`, with `P3` explicitly non-blocking.
- Captures reviewer stderr and helper status logs for diagnostics and retains them on failed, empty-output, or operator-decision review rounds. - Captures reviewer stderr and helper status logs for diagnostics and retains them on failed, empty-output, or operator-decision review rounds.
- Sends completion notifications through Telegram only when the shared setup in [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md) is installed and configured.
- Produces: - Produces:
- `original-plan.md` - `original-plan.md`
- `final-transcript.md` - `final-transcript.md`
@@ -130,13 +134,24 @@ Verify Superpowers dependencies exist in your agent skills root:
After the plan is created (design + milestones + stories), the skill sends it to a second model for review: After the plan is created (design + milestones + stories), the skill sends it to a second model for review:
1. **Configure** — user picks a reviewer CLI (`codex`, `claude`, `cursor`) and model, or skips 1. **Configure** — user picks a reviewer CLI (`codex`, `claude`, `cursor`), a model, and optional max rounds (default 10), or skips
2. **Prepare** — plan payload and a bash reviewer command script are written to temp files 2. **Prepare** — plan payload and a bash reviewer command script are written to temp files
3. **Run** — the command script is executed through `reviewer-runtime/run-review.sh` when installed 3. **Run** — the command script is executed through `reviewer-runtime/run-review.sh` when installed
4. **Feedback** — reviewer evaluates correctness, risks, missing steps, alternatives, security 4. **Feedback** — reviewer evaluates correctness, risks, missing steps, alternatives, security, and returns `## Summary`, `## Findings`, and `## Verdict`
5. **Revise** — the planning agent addresses each issue and re-submits 5. **Prioritize** — findings are ordered `P0`, `P1`, `P2`, `P3`
6. **Repeat**up to 5 rounds until the reviewer returns `VERDICT: APPROVED` 6. **Revise**the planning agent addresses findings in priority order and re-submits
7. **Finalize** — approved plan is used to generate the plan file package 7. **Repeat** — up to max rounds until the reviewer returns `VERDICT: APPROVED`
8. **Finalize** — approved plan is used to generate the plan file package
### Reviewer Output Contract
- `P0` = total blocker
- `P1` = major risk
- `P2` = must-fix before approval
- `P3` = cosmetic / nice to have
- Each severity section should use `- None.` when empty
- `VERDICT: APPROVED` is valid only when no `P0`, `P1`, or `P2` findings remain
- `P3` findings are non-blocking, but the caller should still try to fix them when cheap and safe
### Runtime Artifacts ### Runtime Artifacts
@@ -153,23 +168,25 @@ The review flow may create these temp artifacts:
Status log lines use this format: Status log lines use this format:
```text ```text
ts=<ISO-8601> level=<info|warn|error> state=<running-silent|running-active|stall-warning|completed|completed-empty-output|failed|needs-operator-decision> elapsed_s=<int> pid=<int> stdout_bytes=<int> stderr_bytes=<int> note="<short message>" ts=<ISO-8601> level=<info|warn|error> state=<running-silent|running-active|in-progress|stall-warning|completed|completed-empty-output|failed|needs-operator-decision> elapsed_s=<int> pid=<int> stdout_bytes=<int> stderr_bytes=<int> note="<short message>"
``` ```
`stall-warning` is a heartbeat/status-log state only. It is not a terminal review result. `in-progress` is the liveness heartbeat emitted roughly once per minute with `note="In progress N"`.
`stall-warning` is a non-terminal status-log state only. It does not mean the caller should stop waiting if `in-progress` heartbeats continue.
### Failure Handling ### Failure Handling
- `completed-empty-output` means the reviewer exited without producing review text; surface `.stderr` and `.status`, then retry only after diagnosing the cause. - `completed-empty-output` means the reviewer exited without producing review text; surface `.stderr` and `.status`, then retry only after diagnosing the cause.
- `needs-operator-decision` means the helper reached hard-timeout escalation; surface `.status` and decide whether to keep waiting, abort, or retry with different parameters. - `needs-operator-decision` means the helper reached hard-timeout escalation; surface `.status` and decide whether to extend the timeout, abort, or retry with different parameters.
- Successful rounds clean up temp artifacts. Failed, empty-output, and operator-decision rounds should retain `.stderr`, `.status`, and `.runner.out` until diagnosed. - Successful rounds clean up temp artifacts. Failed, empty-output, and operator-decision rounds should retain `.stderr`, `.status`, and `.runner.out` until diagnosed.
- As long as fresh `in-progress` heartbeats continue to arrive roughly once per minute, the caller should keep waiting.
### Supported Reviewer CLIs ### Supported Reviewer CLIs
| CLI | Command | Session Resume | Read-Only Mode | | CLI | Command | Session Resume | Read-Only Mode |
|---|---|---|---| |---|---|---|---|
| `codex` | `codex exec -m <model> -s read-only` | Yes (`codex exec resume <id>`) | `-s read-only` | | `codex` | `codex exec -m <model> -s read-only` | Yes (`codex exec resume <id>`) | `-s read-only` |
| `claude` | `claude -p --model <model> --allowedTools Read` | No (fresh call each round) | `--allowedTools Read` | | `claude` | `claude -p --model <model> --strict-mcp-config --setting-sources user` | No (fresh call each round) | `--strict-mcp-config --setting-sources user` |
| `cursor` | `cursor-agent -p --mode=ask --model <model> --trust --output-format json` | Yes (`--resume <id>`) | `--mode=ask` | | `cursor` | `cursor-agent -p --mode=ask --model <model> --trust --output-format json` | Yes (`--resume <id>`) | `--mode=ask` |
For all three CLIs, the preferred execution path is: For all three CLIs, the preferred execution path is:
@@ -178,6 +195,13 @@ For all three CLIs, the preferred execution path is:
2. run that script through `reviewer-runtime/run-review.sh` 2. run that script through `reviewer-runtime/run-review.sh`
3. fall back to direct synchronous execution only if the helper is missing or not executable 3. fall back to direct synchronous execution only if the helper is missing or not executable
## Notifications
- Telegram is the only supported notification path.
- Shared setup: [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md)
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
## Template Guardrails ## Template Guardrails
All plan templates now include guardrail sections that enforce: All plan templates now include guardrail sections that enforce:

View File

@@ -25,6 +25,7 @@ Execute an existing plan (created by `create-plan`) in an isolated git worktree,
- Claude Code: `~/.claude/skills/reviewer-runtime/run-review.sh` - Claude Code: `~/.claude/skills/reviewer-runtime/run-review.sh`
- OpenCode: `~/.config/opencode/skills/reviewer-runtime/run-review.sh` - OpenCode: `~/.config/opencode/skills/reviewer-runtime/run-review.sh`
- Cursor: `.cursor/skills/reviewer-runtime/run-review.sh` or `~/.cursor/skills/reviewer-runtime/run-review.sh` - Cursor: `.cursor/skills/reviewer-runtime/run-review.sh` or `~/.cursor/skills/reviewer-runtime/run-review.sh`
- Telegram notification setup is documented in [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md)
If dependencies are missing, stop and return: If dependencies are missing, stop and return:
@@ -133,10 +134,13 @@ Verify Superpowers execution dependencies exist in your agent skills root:
- Runs lint/typecheck/tests as a gate before each milestone review. - Runs lint/typecheck/tests as a gate before each milestone review.
- Sends each milestone to a reviewer CLI for approval (max rounds configurable, default 10). - Sends each milestone to a reviewer CLI for approval (max rounds configurable, default 10).
- Runs reviewer commands through `reviewer-runtime/run-review.sh` when available, with fallback to direct synchronous execution only if the helper is missing. - Runs reviewer commands through `reviewer-runtime/run-review.sh` when available, with fallback to direct synchronous execution only if the helper is missing.
- Waits as long as the reviewer runtime keeps emitting per-minute `In progress N` heartbeats.
- Requires reviewer findings to be ordered `P0` through `P3`, with `P3` explicitly non-blocking.
- Captures reviewer stderr and helper status logs for diagnostics and retains them on failed, empty-output, or operator-decision review rounds. - Captures reviewer stderr and helper status logs for diagnostics and retains them on failed, empty-output, or operator-decision review rounds.
- Commits each milestone locally only after reviewer approval (does not push). - Commits each milestone locally only after reviewer approval (does not push).
- After all milestones approved, merges worktree branch to parent and deletes worktree. - After all milestones approved, merges worktree branch to parent and deletes worktree.
- Supports resume: detects existing worktree and `in-dev`/`completed` stories. - Supports resume: detects existing worktree and `in-dev`/`completed` stories.
- Sends completion notifications through Telegram only when the shared setup in [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md) is installed and configured.
## Milestone Review Loop ## Milestone Review Loop
@@ -145,11 +149,22 @@ After each milestone is implemented and verified, the skill sends it to a second
1. **Configure** — user picks a reviewer CLI (`codex`, `claude`, `cursor`) and model, or skips 1. **Configure** — user picks a reviewer CLI (`codex`, `claude`, `cursor`) and model, or skips
2. **Prepare** — milestone payload and a bash reviewer command script are written to temp files 2. **Prepare** — milestone payload and a bash reviewer command script are written to temp files
3. **Run** — the command script is executed through `reviewer-runtime/run-review.sh` when installed 3. **Run** — the command script is executed through `reviewer-runtime/run-review.sh` when installed
4. **Feedback** — reviewer evaluates correctness, acceptance criteria, code quality, test coverage, security 4. **Feedback** — reviewer evaluates correctness, acceptance criteria, code quality, test coverage, security, and returns `## Summary`, `## Findings`, and `## Verdict`
5. **Revise** — the implementing agent addresses each issue, re-verifies, and re-submits 5. **Prioritize** — findings are ordered `P0`, `P1`, `P2`, `P3`
6. **Repeat**up to max rounds (default 10) until the reviewer returns `VERDICT: APPROVED` 6. **Revise**the implementing agent addresses findings in priority order, re-verifies, and re-submits
7. **Repeat** — up to max rounds (default 10) until the reviewer returns `VERDICT: APPROVED`
7. **Approve** — milestone is marked approved in `story-tracker.md` 7. **Approve** — milestone is marked approved in `story-tracker.md`
### Reviewer Output Contract
- `P0` = total blocker
- `P1` = major risk
- `P2` = must-fix before approval
- `P3` = cosmetic / nice to have
- Each severity section should use `- None.` when empty
- `VERDICT: APPROVED` is valid only when no `P0`, `P1`, or `P2` findings remain
- `P3` findings are non-blocking, but the caller should still try to fix them when cheap and safe
### Runtime Artifacts ### Runtime Artifacts
The milestone review flow may create these temp artifacts: The milestone review flow may create these temp artifacts:
@@ -165,23 +180,25 @@ The milestone review flow may create these temp artifacts:
Status log lines use this format: Status log lines use this format:
```text ```text
ts=<ISO-8601> level=<info|warn|error> state=<running-silent|running-active|stall-warning|completed|completed-empty-output|failed|needs-operator-decision> elapsed_s=<int> pid=<int> stdout_bytes=<int> stderr_bytes=<int> note="<short message>" ts=<ISO-8601> level=<info|warn|error> state=<running-silent|running-active|in-progress|stall-warning|completed|completed-empty-output|failed|needs-operator-decision> elapsed_s=<int> pid=<int> stdout_bytes=<int> stderr_bytes=<int> note="<short message>"
``` ```
`stall-warning` is a heartbeat/status-log state only. It is not a terminal review result. `in-progress` is the liveness heartbeat emitted roughly once per minute with `note="In progress N"`.
`stall-warning` is a non-terminal status-log state only. It does not mean the caller should stop waiting if `in-progress` heartbeats continue.
### Failure Handling ### Failure Handling
- `completed-empty-output` means the reviewer exited without producing review text; surface `.stderr` and `.status`, then retry only after diagnosing the cause. - `completed-empty-output` means the reviewer exited without producing review text; surface `.stderr` and `.status`, then retry only after diagnosing the cause.
- `needs-operator-decision` means the helper reached hard-timeout escalation; surface `.status` and decide whether to keep waiting, abort, or retry with different parameters. - `needs-operator-decision` means the helper reached hard-timeout escalation; surface `.status` and decide whether to extend the timeout, abort, or retry with different parameters.
- Successful rounds clean up temp artifacts. Failed, empty-output, and operator-decision rounds should retain `.stderr`, `.status`, and `.runner.out` until diagnosed. - Successful rounds clean up temp artifacts. Failed, empty-output, and operator-decision rounds should retain `.stderr`, `.status`, and `.runner.out` until diagnosed.
- As long as fresh `in-progress` heartbeats continue to arrive roughly once per minute, the caller should keep waiting.
### Supported Reviewer CLIs ### Supported Reviewer CLIs
| CLI | Command | Session Resume | Read-Only Mode | | CLI | Command | Session Resume | Read-Only Mode |
|---|---|---|---| |---|---|---|---|
| `codex` | `codex exec -m <model> -s read-only` | Yes (`codex exec resume <id>`) | `-s read-only` | | `codex` | `codex exec -m <model> -s read-only` | Yes (`codex exec resume <id>`) | `-s read-only` |
| `claude` | `claude -p --model <model> --allowedTools Read` | No (fresh call each round) | `--allowedTools Read` | | `claude` | `claude -p --model <model> --strict-mcp-config --setting-sources user` | No (fresh call each round) | `--strict-mcp-config --setting-sources user` |
| `cursor` | `cursor-agent -p --mode=ask --model <model> --trust --output-format json` | Yes (`--resume <id>`) | `--mode=ask` | | `cursor` | `cursor-agent -p --mode=ask --model <model> --trust --output-format json` | Yes (`--resume <id>`) | `--mode=ask` |
For all three CLIs, the preferred execution path is: For all three CLIs, the preferred execution path is:
@@ -190,6 +207,13 @@ For all three CLIs, the preferred execution path is:
2. run that script through `reviewer-runtime/run-review.sh` 2. run that script through `reviewer-runtime/run-review.sh`
3. fall back to direct synchronous execution only if the helper is missing or not executable 3. fall back to direct synchronous execution only if the helper is missing or not executable
## Notifications
- Telegram is the only supported notification path.
- Shared setup: [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md)
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
The helper also supports manual override flags for diagnostics: The helper also supports manual override flags for diagnostics:
```bash ```bash

View File

@@ -7,6 +7,7 @@ This directory contains user-facing docs for each skill.
- [ATLASSIAN.md](./ATLASSIAN.md) — Includes requirements, generated bundle sync, install, auth, safety rules, and usage examples for the Atlassian skill. - [ATLASSIAN.md](./ATLASSIAN.md) — Includes requirements, generated bundle sync, install, auth, safety rules, and usage examples for the Atlassian skill.
- [CREATE-PLAN.md](./CREATE-PLAN.md) — Includes requirements, install, verification, and execution workflow for create-plan. - [CREATE-PLAN.md](./CREATE-PLAN.md) — Includes requirements, install, verification, and execution workflow for create-plan.
- [IMPLEMENT-PLAN.md](./IMPLEMENT-PLAN.md) — Includes requirements, install, verification, and milestone review workflow for implement-plan. - [IMPLEMENT-PLAN.md](./IMPLEMENT-PLAN.md) — Includes requirements, install, verification, and milestone review workflow for implement-plan.
- [TELEGRAM-NOTIFICATIONS.md](./TELEGRAM-NOTIFICATIONS.md) — Shared Telegram notification setup used by reviewer-driven skills.
- [WEB-AUTOMATION.md](./WEB-AUTOMATION.md) — Includes requirements, install, dependency verification, and usage examples for web-automation. - [WEB-AUTOMATION.md](./WEB-AUTOMATION.md) — Includes requirements, install, dependency verification, and usage examples for web-automation.
## Repo Setup ## Repo Setup

View File

@@ -0,0 +1,97 @@
# TELEGRAM-NOTIFICATIONS
## Purpose
Shared setup for Telegram notifications used by reviewer-driven skills such as `create-plan` and `implement-plan`, both for completion and for pauses that need user attention.
## Requirements
- Telegram bot token in `TELEGRAM_BOT_TOKEN`
- Telegram chat id in `TELEGRAM_CHAT_ID`
- Notification helper installed beside the shared reviewer runtime:
- Codex: `~/.codex/skills/reviewer-runtime/notify-telegram.sh`
- Claude Code: `~/.claude/skills/reviewer-runtime/notify-telegram.sh`
- OpenCode: `~/.config/opencode/skills/reviewer-runtime/notify-telegram.sh`
- Cursor: `.cursor/skills/reviewer-runtime/notify-telegram.sh` or `~/.cursor/skills/reviewer-runtime/notify-telegram.sh`
## Install
The helper ships from `skills/reviewer-runtime/` together with `run-review.sh`.
### Codex
```bash
mkdir -p ~/.codex/skills/reviewer-runtime
cp -R skills/reviewer-runtime/* ~/.codex/skills/reviewer-runtime/
```
### Claude Code
```bash
mkdir -p ~/.claude/skills/reviewer-runtime
cp -R skills/reviewer-runtime/* ~/.claude/skills/reviewer-runtime/
```
### OpenCode
```bash
mkdir -p ~/.config/opencode/skills/reviewer-runtime
cp -R skills/reviewer-runtime/* ~/.config/opencode/skills/reviewer-runtime/
```
### Cursor
Repo-local install:
```bash
mkdir -p .cursor/skills/reviewer-runtime
cp -R skills/reviewer-runtime/* .cursor/skills/reviewer-runtime/
```
Global install:
```bash
mkdir -p ~/.cursor/skills/reviewer-runtime
cp -R skills/reviewer-runtime/* ~/.cursor/skills/reviewer-runtime/
```
## Verify Installation
```bash
test -x ~/.codex/skills/reviewer-runtime/notify-telegram.sh || true
test -x ~/.claude/skills/reviewer-runtime/notify-telegram.sh || true
test -x ~/.config/opencode/skills/reviewer-runtime/notify-telegram.sh || true
test -x .cursor/skills/reviewer-runtime/notify-telegram.sh || test -x ~/.cursor/skills/reviewer-runtime/notify-telegram.sh || true
```
## Configure Telegram
Export the required variables before running a skill that sends Telegram notifications:
```bash
export TELEGRAM_BOT_TOKEN="<bot-token>"
export TELEGRAM_CHAT_ID="<chat-id>"
```
Optional:
```bash
export TELEGRAM_API_BASE_URL="https://api.telegram.org"
```
## Test the Helper
Example:
```bash
TELEGRAM_BOT_TOKEN="<bot-token>" \
TELEGRAM_CHAT_ID="<chat-id>" \
skills/reviewer-runtime/notify-telegram.sh --message "Telegram notification test"
```
## Rules
- Telegram is the only supported notification path for these skills.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- Skills should report when Telegram is not configured instead of silently pretending a notification was sent.

View File

@@ -2,15 +2,25 @@
## Purpose ## Purpose
Automate browsing and scraping with Playwright + Camoufox. Automate rendered browsing, scraping, authentication, and multi-step browser flows with Playwright-compatible CloakBrowser.
## What Ships In Every Variant
- `browse.ts` for direct navigation and screenshots
- `auth.ts` for form and Microsoft SSO login flows
- `scrape.ts` for markdown extraction
- `flow.ts` for natural-language or JSON browser steps
- `extract.js` for one-shot rendered JSON extraction
- `check-install.js` for install and wiring validation
- `scan-local-app.ts` for configurable local-app smoke scans
## Requirements ## Requirements
- Node.js 20+ - Node.js 20+
- pnpm - pnpm
- `cloakbrowser`
- `playwright-core` - `playwright-core`
- `camoufox-js` - Network access to download the CloakBrowser binary on first use
- Network access to download Camoufox browser artifacts
## Install ## Install
@@ -21,8 +31,9 @@ mkdir -p ~/.codex/skills/web-automation
cp -R skills/web-automation/codex/* ~/.codex/skills/web-automation/ cp -R skills/web-automation/codex/* ~/.codex/skills/web-automation/
cd ~/.codex/skills/web-automation/scripts cd ~/.codex/skills/web-automation/scripts
pnpm install pnpm install
pnpm add playwright-core camoufox-js npx cloakbrowser install
npx camoufox-js fetch pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
``` ```
### Claude Code ### Claude Code
@@ -32,8 +43,9 @@ mkdir -p ~/.claude/skills/web-automation
cp -R skills/web-automation/claude-code/* ~/.claude/skills/web-automation/ cp -R skills/web-automation/claude-code/* ~/.claude/skills/web-automation/
cd ~/.claude/skills/web-automation/scripts cd ~/.claude/skills/web-automation/scripts
pnpm install pnpm install
pnpm add playwright-core camoufox-js npx cloakbrowser install
npx camoufox-js fetch pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
``` ```
### OpenCode ### OpenCode
@@ -43,25 +55,79 @@ mkdir -p ~/.config/opencode/skills/web-automation
cp -R skills/web-automation/opencode/* ~/.config/opencode/skills/web-automation/ cp -R skills/web-automation/opencode/* ~/.config/opencode/skills/web-automation/
cd ~/.config/opencode/skills/web-automation/scripts cd ~/.config/opencode/skills/web-automation/scripts
pnpm install pnpm install
pnpm add playwright-core camoufox-js npx cloakbrowser install
npx camoufox-js fetch pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
``` ```
## Verify Installation & Dependencies ## Update To The Latest CloakBrowser
Run inside the installed `scripts/` directory for the variant you are using:
```bash
pnpm up cloakbrowser playwright-core
npx cloakbrowser install
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
```
This repo intentionally treats `cloakbrowser` as a refreshable dependency: update to the latest available compatible release, then regenerate the lockfile from that resolved set.
## Verify Installation & Wiring
Run in the installed `scripts/` folder: Run in the installed `scripts/` folder:
```bash ```bash
node -e "require.resolve('playwright-core/package.json');require.resolve('camoufox-js/package.json');console.log('OK: playwright-core + camoufox-js installed')" node check-install.js
node -e "const fs=require('fs');const t=fs.readFileSync('browse.ts','utf8');if(!/camoufox-js/.test(t)){throw new Error('browse.ts is not configured for Camoufox')}console.log('OK: Camoufox integration detected in browse.ts')"
``` ```
If checks fail, stop and return: Expected checks:
"Missing dependency/config: web-automation requires `playwright-core` + `camoufox-js` and Camoufox-based scripts. Run setup in this skill, then retry." - `cloakbrowser` and `playwright-core` resolve correctly
- `browse.ts` is wired to CloakBrowser
If the check fails, stop and return:
"Missing dependency/config: web-automation requires `cloakbrowser` and `playwright-core` with CloakBrowser-based scripts. Run setup in this skill, then retry."
If runtime later fails with native-binding issues, run:
```bash
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
```
## Environment Variables
- `CLOAKBROWSER_PROFILE_PATH`
- `CLOAKBROWSER_HEADLESS`
- `CLOAKBROWSER_USERNAME`
- `CLOAKBROWSER_PASSWORD`
There are no `CAMOUFOX_*` compatibility aliases in this migration.
## Usage Examples ## Usage Examples
- Browse: `npx tsx browse.ts --url "https://example.com"` - Browse: `npx tsx browse.ts --url "https://example.com"`
- Scrape: `npx tsx scrape.ts --url "https://example.com" --mode main --output page.md` - Scrape markdown: `npx tsx scrape.ts --url "https://example.com" --mode main --output page.md`
- Auth: `npx tsx auth.ts --url "https://example.com/login"` - Authenticate: `npx tsx auth.ts --url "https://example.com/login"`
- Natural-language flow: `npx tsx flow.ts --instruction 'go to https://example.com then click on "Login"'`
- JSON extract: `node extract.js "https://example.com"`
- Local smoke scan: `SCAN_BASE_URL=http://localhost:3000 SCAN_ROUTES=/,/dashboard npx tsx scan-local-app.ts`
## Local App Smoke Scan
`scan-local-app.ts` is generic. Configure it with:
- `SCAN_BASE_URL`
- `SCAN_LOGIN_PATH`
- `SCAN_USERNAME`
- `SCAN_PASSWORD`
- `SCAN_USERNAME_SELECTOR`
- `SCAN_PASSWORD_SELECTOR`
- `SCAN_SUBMIT_SELECTOR`
- `SCAN_ROUTES`
- `SCAN_REPORT_PATH`
- `SCAN_HEADLESS`
If `SCAN_USERNAME` or `SCAN_PASSWORD` are omitted, the script falls back to `CLOAKBROWSER_USERNAME` and `CLOAKBROWSER_PASSWORD`.

View File

@@ -16,6 +16,8 @@ Portable Atlassian workflows for Claude Code using a shared TypeScript CLI.
- `ATLASSIAN_EMAIL` - `ATLASSIAN_EMAIL`
- `ATLASSIAN_API_TOKEN` - `ATLASSIAN_API_TOKEN`
The `ATLASSIAN_*` values may come from the shell environment or a `.env` file in `~/.claude/skills/atlassian/scripts`.
## First-Time Setup ## First-Time Setup
```bash ```bash
@@ -30,15 +32,13 @@ pnpm install
```bash ```bash
cd ~/.claude/skills/atlassian/scripts cd ~/.claude/skills/atlassian/scripts
node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')" node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')"
test -n \"$ATLASSIAN_BASE_URL\" node -e 'require("dotenv").config({ path: ".env" }); const required = ["ATLASSIAN_BASE_URL", "ATLASSIAN_EMAIL", "ATLASSIAN_API_TOKEN"]; const missing = required.filter((key) => !(process.env[key] || "").trim()); if (missing.length) { console.error("Missing required Atlassian config: " + missing.join(", ")); process.exit(1); } console.log("OK: Atlassian config present")'
test -n \"$ATLASSIAN_EMAIL\"
test -n \"$ATLASSIAN_API_TOKEN\"
pnpm atlassian health pnpm atlassian health
``` ```
If any check fails, stop and return: If any check fails, stop and return:
`Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Run setup and configure ATLASSIAN_* env vars, then retry.` `Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Configure ATLASSIAN_* in the shell environment or scripts/.env, then retry.`
## Supported Commands ## Supported Commands

View File

@@ -16,6 +16,8 @@ Portable Atlassian workflows for Codex using a shared TypeScript CLI.
- `ATLASSIAN_EMAIL` - `ATLASSIAN_EMAIL`
- `ATLASSIAN_API_TOKEN` - `ATLASSIAN_API_TOKEN`
The `ATLASSIAN_*` values may come from the shell environment or a `.env` file in `~/.codex/skills/atlassian/scripts`.
## First-Time Setup ## First-Time Setup
```bash ```bash
@@ -32,15 +34,13 @@ Run before using the skill:
```bash ```bash
cd ~/.codex/skills/atlassian/scripts cd ~/.codex/skills/atlassian/scripts
node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')" node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')"
test -n \"$ATLASSIAN_BASE_URL\" node -e 'require("dotenv").config({ path: ".env" }); const required = ["ATLASSIAN_BASE_URL", "ATLASSIAN_EMAIL", "ATLASSIAN_API_TOKEN"]; const missing = required.filter((key) => !(process.env[key] || "").trim()); if (missing.length) { console.error("Missing required Atlassian config: " + missing.join(", ")); process.exit(1); } console.log("OK: Atlassian config present")'
test -n \"$ATLASSIAN_EMAIL\"
test -n \"$ATLASSIAN_API_TOKEN\"
pnpm atlassian health pnpm atlassian health
``` ```
If any check fails, stop and return: If any check fails, stop and return:
`Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Run setup and configure ATLASSIAN_* env vars, then retry.` `Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Configure ATLASSIAN_* in the shell environment or scripts/.env, then retry.`
## Supported Commands ## Supported Commands

View File

@@ -17,6 +17,8 @@ Portable Atlassian workflows for Cursor Agent CLI using a shared TypeScript CLI.
- `ATLASSIAN_EMAIL` - `ATLASSIAN_EMAIL`
- `ATLASSIAN_API_TOKEN` - `ATLASSIAN_API_TOKEN`
The `ATLASSIAN_*` values may come from the shell environment or a `.env` file in the installed `scripts/` folder.
## First-Time Setup ## First-Time Setup
Repo-local install: Repo-local install:
@@ -45,15 +47,13 @@ Repo-local form:
cursor-agent --version cursor-agent --version
cd .cursor/skills/atlassian/scripts cd .cursor/skills/atlassian/scripts
node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')" node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')"
test -n \"$ATLASSIAN_BASE_URL\" node -e 'require("dotenv").config({ path: ".env" }); const required = ["ATLASSIAN_BASE_URL", "ATLASSIAN_EMAIL", "ATLASSIAN_API_TOKEN"]; const missing = required.filter((key) => !(process.env[key] || "").trim()); if (missing.length) { console.error("Missing required Atlassian config: " + missing.join(", ")); process.exit(1); } console.log("OK: Atlassian config present")'
test -n \"$ATLASSIAN_EMAIL\"
test -n \"$ATLASSIAN_API_TOKEN\"
pnpm atlassian health pnpm atlassian health
``` ```
If any check fails, stop and return: If any check fails, stop and return:
`Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Run setup and configure ATLASSIAN_* env vars, then retry.` `Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Configure ATLASSIAN_* in the shell environment or scripts/.env, then retry.`
## Supported Commands ## Supported Commands

View File

@@ -16,6 +16,8 @@ Portable Atlassian workflows for OpenCode using a shared TypeScript CLI.
- `ATLASSIAN_EMAIL` - `ATLASSIAN_EMAIL`
- `ATLASSIAN_API_TOKEN` - `ATLASSIAN_API_TOKEN`
The `ATLASSIAN_*` values may come from the shell environment or a `.env` file in `~/.config/opencode/skills/atlassian/scripts`.
## First-Time Setup ## First-Time Setup
```bash ```bash
@@ -30,15 +32,13 @@ pnpm install
```bash ```bash
cd ~/.config/opencode/skills/atlassian/scripts cd ~/.config/opencode/skills/atlassian/scripts
node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')" node -e "require.resolve('commander');require.resolve('dotenv');console.log('OK: runtime dependencies installed')"
test -n \"$ATLASSIAN_BASE_URL\" node -e 'require("dotenv").config({ path: ".env" }); const required = ["ATLASSIAN_BASE_URL", "ATLASSIAN_EMAIL", "ATLASSIAN_API_TOKEN"]; const missing = required.filter((key) => !(process.env[key] || "").trim()); if (missing.length) { console.error("Missing required Atlassian config: " + missing.join(", ")); process.exit(1); } console.log("OK: Atlassian config present")'
test -n \"$ATLASSIAN_EMAIL\"
test -n \"$ATLASSIAN_API_TOKEN\"
pnpm atlassian health pnpm atlassian health
``` ```
If any check fails, stop and return: If any check fails, stop and return:
`Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Run setup and configure ATLASSIAN_* env vars, then retry.` `Missing dependency/config: atlassian requires installed CLI dependencies and valid Atlassian Cloud credentials. Configure ATLASSIAN_* in the shell environment or scripts/.env, then retry.`
## Supported Commands ## Supported Commands

View File

@@ -47,7 +47,10 @@ If the user has already specified a reviewer CLI and model (e.g., "create a plan
- For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription) - For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription)
- Accept any model string the user provides - Accept any model string the user provides
Store the chosen `REVIEWER_CLI` and `REVIEWER_MODEL` for Phase 6 (Iterative Plan Review). 3. **Max review rounds for the plan?** (default: 10)
- If the user does not provide a value, set `MAX_ROUNDS=10`.
Store the chosen `REVIEWER_CLI`, `REVIEWER_MODEL`, and `MAX_ROUNDS` for Phase 6 (Iterative Plan Review).
### Phase 4: Design (REQUIRED SUB-SKILL) ### Phase 4: Design (REQUIRED SUB-SKILL)
- Invoke `superpowers:brainstorming` explicitly. - Invoke `superpowers:brainstorming` explicitly.
@@ -61,7 +64,7 @@ Store the chosen `REVIEWER_CLI` and `REVIEWER_MODEL` for Phase 6 (Iterative Plan
### Phase 6: Iterative Plan Review ### Phase 6: Iterative Plan Review
Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (max 5 rounds). Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (default max 10 rounds).
**Skip this phase entirely if reviewer was set to `skip`.** **Skip this phase entirely if reviewer was set to `skip`.**
@@ -86,10 +89,60 @@ Resolve the shared reviewer helper from the installed Claude Code skills directo
REVIEWER_RUNTIME=~/.claude/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.claude/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Plan to Temp File #### Step 2: Write Plan to Temp File
Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`. Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`.
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script:
@@ -113,8 +166,21 @@ codex exec \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -123,17 +189,34 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/plan-${REVIEW_ID}.md and review the implementation plan. Focus on: "Review the implementation plan below. Focus on:
$(cat /tmp/plan-${REVIEW_ID}.md)
1. Correctness — Will this plan achieve the stated goals? 1. Correctness — Will this plan achieve the stated goals?
2. Risks — What could go wrong? Edge cases? Data loss? 2. Risks — What could go wrong? Edge cases? Data loss?
3. Missing steps — Is anything forgotten? 3. Missing steps — Is anything forgotten?
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -151,8 +234,21 @@ cursor-agent -p \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
``` ```
@@ -166,13 +262,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/plan-review-${REVIEW_ID}.sh \ --command-file /tmp/plan-review-${REVIEW_ID}.sh \
--stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \
--status-file /tmp/plan-review-${REVIEW_ID}.status --status-file /tmp/plan-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/plan-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -182,6 +281,13 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/plan-review-${REVIEW_ID}.runner.out /tmp/plan-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable.
#### Step 4: Read Review & Check Verdict #### Step 4: Read Review & Check Verdict
@@ -198,17 +304,19 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
[Reviewer feedback] [Reviewer feedback]
``` ```
3. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** → proceed to Phase 7 (Initialize workspace) 5. Check verdict:
- **VERDICT: REVISE** → go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings → proceed to Phase 7 (Initialize workspace)
- No clear verdict but positive / no actionable items → treat as approved - **VERDICT: APPROVED** with only `P3` findings → optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding → go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` → treat as approved
- Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` → surface status log and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` → surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (5) reached → proceed with warning - Max rounds (`MAX_ROUNDS`) reached → present the outcome to the user for a manual decision (proceed or stop)
#### Step 5: Revise the Plan #### Step 5: Revise the Plan
Address each issue the reviewer raised. Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`. Address the reviewer findings in priority order (`P0``P1``P2`, then `P3` when practical). Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`.
Summarize revisions for the user: Summarize revisions for the user:
@@ -219,7 +327,9 @@ Summarize revisions for the user:
If a revision contradicts the user's explicit requirements, skip it and note it for the user. If a revision contradicts the user's explicit requirements, skip it and note it for the user.
#### Step 6: Re-submit to Reviewer (Rounds 2-5) #### Step 6: Re-submit to Reviewer (Rounds 2-N)
Rewrite `/tmp/plan-review-${REVIEW_ID}.sh` for the next round. The script should contain the reviewer invocation only; do not run it directly.
**If `REVIEWER_CLI` is `codex`:** **If `REVIEWER_CLI` is `codex`:**
@@ -233,8 +343,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -249,15 +359,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've revised the plan. Updated version is in /tmp/plan-${REVIEW_ID}.md. I've revised the plan. Updated version is below.
$(cat /tmp/plan-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review the full plan. If solid, end with: VERDICT: APPROVED Re-review the full plan using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -275,8 +388,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md
@@ -295,7 +408,7 @@ Return to Step 4.
**Status:** Approved after N round(s) **Status:** Approved after N round(s)
[or] [or]
**Status:** Max rounds (5) reached — not fully approved **Status:** Max rounds (`MAX_ROUNDS`) reached — not fully approved
[Final feedback / remaining concerns] [Final feedback / remaining concerns]
``` ```
@@ -347,6 +460,28 @@ When handing off to execution, instruct:
Private plan files under `~/.claude/plans/` are planning artifacts and must not be used as execution source of truth. Private plan files under `~/.claude/plans/` are planning artifacts and must not be used as execution source of truth.
### Phase 10: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed Claude Code skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.claude/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the create-plan run (approved, max rounds reached, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "create-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
**ALWAYS update `story-tracker.md` before/after each story. NEVER proceed with stale tracker state.** **ALWAYS update `story-tracker.md` before/after each story. NEVER proceed with stale tracker state.**
@@ -385,6 +520,7 @@ After completing any story:
- [ ] `.gitignore` ignore-rule commit was created if needed - [ ] `.gitignore` ignore-rule commit was created if needed
- [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/` - [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/`
- [ ] Reviewer configured or explicitly skipped - [ ] Reviewer configured or explicitly skipped
- [ ] Max review rounds confirmed (default: 10)
- [ ] Plan review completed (approved or max rounds) — or skipped - [ ] Plan review completed (approved or max rounds) — or skipped
- [ ] `original-plan.md` copied from `~/.claude/plans/` plan file - [ ] `original-plan.md` copied from `~/.claude/plans/` plan file
- [ ] `final-transcript.md` present - [ ] `final-transcript.md` present
@@ -392,6 +528,7 @@ After completing any story:
- [ ] `story-tracker.md` created with all stories as `pending` - [ ] `story-tracker.md` created with all stories as `pending`
- [ ] `continuation-runbook.md` present - [ ] `continuation-runbook.md` present
- [ ] Handoff explicitly says to read runbook first and execute from plan folder - [ ] Handoff explicitly says to read runbook first and execute from plan folder
- [ ] Telegram notification attempted if configured
## Exit Triggers for Question Phase ## Exit Triggers for Question Phase
User says: "ready", "done", "let's plan", "proceed", "enough questions" User says: "ready", "done", "let's plan", "proceed", "enough questions"

View File

@@ -72,7 +72,10 @@ If the user has already specified a reviewer CLI and model (e.g., "create a plan
- For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription) - For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription)
- Accept any model string the user provides - Accept any model string the user provides
Store the chosen `REVIEWER_CLI` and `REVIEWER_MODEL` for Phase 6 (Iterative Plan Review). 3. **Max review rounds for the plan?** (default: 10)
- If the user does not provide a value, set `MAX_ROUNDS=10`.
Store the chosen `REVIEWER_CLI`, `REVIEWER_MODEL`, and `MAX_ROUNDS` for Phase 6 (Iterative Plan Review).
### Phase 4: Design (REQUIRED SUB-SKILL) ### Phase 4: Design (REQUIRED SUB-SKILL)
@@ -84,7 +87,7 @@ Invoke `superpowers:writing-plans`, then break work into milestones and bite-siz
### Phase 6: Iterative Plan Review ### Phase 6: Iterative Plan Review
Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (max 5 rounds). Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (default max 10 rounds).
**Skip this phase entirely if reviewer was set to `skip`.** **Skip this phase entirely if reviewer was set to `skip`.**
@@ -109,10 +112,60 @@ Resolve the shared reviewer helper from the installed Codex skills directory:
REVIEWER_RUNTIME=~/.codex/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.codex/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Plan to Temp File #### Step 2: Write Plan to Temp File
Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`. Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`.
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script:
@@ -136,8 +189,21 @@ codex exec \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -146,17 +212,34 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/plan-${REVIEW_ID}.md and review the implementation plan. Focus on: "Review the implementation plan below. Focus on:
$(cat /tmp/plan-${REVIEW_ID}.md)
1. Correctness — Will this plan achieve the stated goals? 1. Correctness — Will this plan achieve the stated goals?
2. Risks — What could go wrong? Edge cases? Data loss? 2. Risks — What could go wrong? Edge cases? Data loss?
3. Missing steps — Is anything forgotten? 3. Missing steps — Is anything forgotten?
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -174,8 +257,21 @@ cursor-agent -p \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
``` ```
@@ -189,13 +285,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/plan-review-${REVIEW_ID}.sh \ --command-file /tmp/plan-review-${REVIEW_ID}.sh \
--stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \
--status-file /tmp/plan-review-${REVIEW_ID}.status --status-file /tmp/plan-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/plan-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -205,6 +304,11 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/plan-review-${REVIEW_ID}.runner.out /tmp/plan-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable. Fallback is allowed only when the helper is missing or not executable.
@@ -223,17 +327,19 @@ Fallback is allowed only when the helper is missing or not executable.
[Reviewer feedback] [Reviewer feedback]
``` ```
3. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** → proceed to Phase 7 (Initialize workspace) 5. Check verdict:
- **VERDICT: REVISE** → go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings → proceed to Phase 7 (Initialize workspace)
- No clear verdict but positive / no actionable items → treat as approved - **VERDICT: APPROVED** with only `P3` findings → optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding → go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` → treat as approved
- Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` → surface status log and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` → surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (5) reached → proceed with warning - Max rounds (`MAX_ROUNDS`) reached → present the outcome to the user for a manual decision (proceed or stop)
#### Step 5: Revise the Plan #### Step 5: Revise the Plan
Address each issue the reviewer raised. Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`. Address the reviewer findings in priority order (`P0``P1``P2`, then `P3` when practical). Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`.
Summarize revisions for the user: Summarize revisions for the user:
@@ -244,7 +350,9 @@ Summarize revisions for the user:
If a revision contradicts the user's explicit requirements, skip it and note it for the user. If a revision contradicts the user's explicit requirements, skip it and note it for the user.
#### Step 6: Re-submit to Reviewer (Rounds 2-5) #### Step 6: Re-submit to Reviewer (Rounds 2-N)
Rewrite `/tmp/plan-review-${REVIEW_ID}.sh` for the next round. The script should contain the reviewer invocation only; do not run it directly.
**If `REVIEWER_CLI` is `codex`:** **If `REVIEWER_CLI` is `codex`:**
@@ -258,8 +366,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -274,15 +382,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've revised the plan. Updated version is in /tmp/plan-${REVIEW_ID}.md. I've revised the plan. Updated version is below.
$(cat /tmp/plan-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review the full plan. If solid, end with: VERDICT: APPROVED Re-review the full plan using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -300,8 +411,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md
@@ -320,7 +431,7 @@ Return to Step 4.
**Status:** Approved after N round(s) **Status:** Approved after N round(s)
[or] [or]
**Status:** Max rounds (5) reached — not fully approved **Status:** Max rounds (`MAX_ROUNDS`) reached — not fully approved
[Final feedback / remaining concerns] [Final feedback / remaining concerns]
``` ```
@@ -367,19 +478,42 @@ Always instruct the executing agent:
Do not rely on planner-private files during implementation. Do not rely on planner-private files during implementation.
### Phase 10: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed Codex skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.codex/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the create-plan run (approved, max rounds reached, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "create-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Quick Reference ## Quick Reference
| Phase | Action | Required Output | | Phase | Action | Required Output |
|---|---|---| |---|---|---|
| 1 | Analyze codebase/context | Constraints and known patterns | | 1 | Analyze codebase/context | Constraints and known patterns |
| 2 | Gather requirements (one question at a time) | Confirmed scope and success criteria | | 2 | Gather requirements (one question at a time) | Confirmed scope and success criteria |
| 3 | Configure reviewer CLI and model | `REVIEWER_CLI` and `REVIEWER_MODEL` (or `skip`) | | 3 | Configure reviewer CLI and model | `REVIEWER_CLI`, `REVIEWER_MODEL`, `MAX_ROUNDS` (or `skip`) |
| 4 | Invoke `superpowers:brainstorming` | Chosen design approach | | 4 | Invoke `superpowers:brainstorming` | Chosen design approach |
| 5 | Invoke `superpowers:writing-plans` | Milestones and bite-sized stories | | 5 | Invoke `superpowers:writing-plans` | Milestones and bite-sized stories |
| 6 | Iterative plan review (max 5 rounds) | Reviewer approval or max-rounds warning | | 6 | Iterative plan review (max `MAX_ROUNDS` rounds) | Reviewer approval or max-rounds warning |
| 7 | Initialize `ai_plan/` + `.gitignore` | Local planning workspace ready | | 7 | Initialize `ai_plan/` + `.gitignore` | Local planning workspace ready |
| 8 | Build plan package from templates | Full plan folder with required files | | 8 | Build plan package from templates | Full plan folder with required files |
| 9 | Handoff with runbook-first instruction | Resumable execution context | | 9 | Handoff with runbook-first instruction | Resumable execution context |
| 10 | Send Telegram notification | User notified or notification status reported |
## Execution Rules to Include in Plan (MANDATORY) ## Execution Rules to Include in Plan (MANDATORY)
@@ -406,6 +540,7 @@ Do not rely on planner-private files during implementation.
- Handoff without explicit "read runbook first" direction. - Handoff without explicit "read runbook first" direction.
- Skipping the reviewer phase without explicit user opt-out. - Skipping the reviewer phase without explicit user opt-out.
- Not capturing the Codex session ID for resume in subsequent review rounds. - Not capturing the Codex session ID for resume in subsequent review rounds.
- Using any notification path other than Telegram.
## Rationalizations and Counters ## Rationalizations and Counters
@@ -433,6 +568,7 @@ Do not rely on planner-private files during implementation.
- [ ] `.gitignore` ignore-rule commit was created if needed - [ ] `.gitignore` ignore-rule commit was created if needed
- [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/` - [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/`
- [ ] Reviewer configured or explicitly skipped - [ ] Reviewer configured or explicitly skipped
- [ ] Max review rounds confirmed (default: 10)
- [ ] Plan review completed (approved or max rounds) — or skipped - [ ] Plan review completed (approved or max rounds) — or skipped
- [ ] `original-plan.md` present - [ ] `original-plan.md` present
- [ ] `final-transcript.md` present - [ ] `final-transcript.md` present
@@ -440,3 +576,4 @@ Do not rely on planner-private files during implementation.
- [ ] `story-tracker.md` present - [ ] `story-tracker.md` present
- [ ] `continuation-runbook.md` present - [ ] `continuation-runbook.md` present
- [ ] Handoff explicitly says to read runbook first and execute from plan folder - [ ] Handoff explicitly says to read runbook first and execute from plan folder
- [ ] Telegram notification attempted if configured

View File

@@ -73,7 +73,10 @@ If the user has already specified a reviewer CLI and model (e.g., "create a plan
- For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription) - For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription)
- Accept any model string the user provides - Accept any model string the user provides
Store the chosen `REVIEWER_CLI` and `REVIEWER_MODEL` for Phase 6 (Iterative Plan Review). 3. **Max review rounds for the plan?** (default: 10)
- If the user does not provide a value, set `MAX_ROUNDS=10`.
Store the chosen `REVIEWER_CLI`, `REVIEWER_MODEL`, and `MAX_ROUNDS` for Phase 6 (Iterative Plan Review).
### Phase 4: Design (REQUIRED SUB-SKILL) ### Phase 4: Design (REQUIRED SUB-SKILL)
@@ -86,7 +89,7 @@ Story IDs: `S-{milestone}{sequence}`.
### Phase 6: Iterative Plan Review ### Phase 6: Iterative Plan Review
Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (max 5 rounds). Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (default max 10 rounds).
**Skip this phase entirely if reviewer was set to `skip`.** **Skip this phase entirely if reviewer was set to `skip`.**
@@ -115,10 +118,60 @@ else
fi fi
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Plan to Temp File #### Step 2: Write Plan to Temp File
Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`. Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`.
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script:
@@ -142,8 +195,21 @@ codex exec \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -152,17 +218,34 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/plan-${REVIEW_ID}.md and review the implementation plan. Focus on: "Review the implementation plan below. Focus on:
$(cat /tmp/plan-${REVIEW_ID}.md)
1. Correctness — Will this plan achieve the stated goals? 1. Correctness — Will this plan achieve the stated goals?
2. Risks — What could go wrong? Edge cases? Data loss? 2. Risks — What could go wrong? Edge cases? Data loss?
3. Missing steps — Is anything forgotten? 3. Missing steps — Is anything forgotten?
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -180,8 +263,21 @@ cursor-agent -p \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
``` ```
@@ -201,13 +297,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/plan-review-${REVIEW_ID}.sh \ --command-file /tmp/plan-review-${REVIEW_ID}.sh \
--stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \
--status-file /tmp/plan-review-${REVIEW_ID}.status --status-file /tmp/plan-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/plan-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -217,6 +316,11 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/plan-review-${REVIEW_ID}.runner.out /tmp/plan-review-${REVIEW_ID}.md
```
#### Step 4: Read Review & Check Verdict #### Step 4: Read Review & Check Verdict
@@ -233,17 +337,19 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
[Reviewer feedback] [Reviewer feedback]
``` ```
3. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** → proceed to Phase 7 (Initialize workspace) 5. Check verdict:
- **VERDICT: REVISE** → go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings → proceed to Phase 7 (Initialize workspace)
- No clear verdict but positive / no actionable items → treat as approved - **VERDICT: APPROVED** with only `P3` findings → optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding → go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` → treat as approved
- Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` → surface status log and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` → surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (5) reached → proceed with warning - Max rounds (`MAX_ROUNDS`) reached → present the outcome to the user for a manual decision (proceed or stop)
#### Step 5: Revise the Plan #### Step 5: Revise the Plan
Address each issue the reviewer raised. Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`. Address the reviewer findings in priority order (`P0``P1``P2`, then `P3` when practical). Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`.
Summarize revisions for the user: Summarize revisions for the user:
@@ -254,7 +360,9 @@ Summarize revisions for the user:
If a revision contradicts the user's explicit requirements, skip it and note it for the user. If a revision contradicts the user's explicit requirements, skip it and note it for the user.
#### Step 6: Re-submit to Reviewer (Rounds 2-5) #### Step 6: Re-submit to Reviewer (Rounds 2-N)
Rewrite `/tmp/plan-review-${REVIEW_ID}.sh` for the next round. The script should contain the reviewer invocation only; do not run it directly.
**If `REVIEWER_CLI` is `codex`:** **If `REVIEWER_CLI` is `codex`:**
@@ -268,8 +376,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -284,15 +392,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've revised the plan. Updated version is in /tmp/plan-${REVIEW_ID}.md. I've revised the plan. Updated version is below.
$(cat /tmp/plan-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review the full plan. If solid, end with: VERDICT: APPROVED Re-review the full plan using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -310,8 +421,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md
@@ -330,7 +441,7 @@ Return to Step 4.
**Status:** Approved after N round(s) **Status:** Approved after N round(s)
[or] [or]
**Status:** Max rounds (5) reached — not fully approved **Status:** Max rounds (`MAX_ROUNDS`) reached — not fully approved
[Final feedback / remaining concerns] [Final feedback / remaining concerns]
``` ```
@@ -377,19 +488,46 @@ Always instruct the executing agent:
Do not rely on planner-private files during implementation. Do not rely on planner-private files during implementation.
### Phase 10: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from Cursor's installed skills directory:
```bash
if [ -x .cursor/skills/reviewer-runtime/notify-telegram.sh ]; then
TELEGRAM_NOTIFY_RUNTIME=.cursor/skills/reviewer-runtime/notify-telegram.sh
else
TELEGRAM_NOTIFY_RUNTIME=~/.cursor/skills/reviewer-runtime/notify-telegram.sh
fi
```
On every terminal outcome for the create-plan run (approved, max rounds reached, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "create-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Quick Reference ## Quick Reference
| Phase | Action | Required Output | | Phase | Action | Required Output |
|---|---|---| |---|---|---|
| 1 | Analyze codebase/context | Constraints and known patterns | | 1 | Analyze codebase/context | Constraints and known patterns |
| 2 | Gather requirements (one question at a time) | Confirmed scope and success criteria | | 2 | Gather requirements (one question at a time) | Confirmed scope and success criteria |
| 3 | Configure reviewer CLI and model | `REVIEWER_CLI` and `REVIEWER_MODEL` (or `skip`) | | 3 | Configure reviewer CLI and model | `REVIEWER_CLI`, `REVIEWER_MODEL`, `MAX_ROUNDS` (or `skip`) |
| 4 | Invoke `superpowers:brainstorming` | Chosen design approach | | 4 | Invoke `superpowers:brainstorming` | Chosen design approach |
| 5 | Invoke `superpowers:writing-plans` | Milestones and bite-sized stories | | 5 | Invoke `superpowers:writing-plans` | Milestones and bite-sized stories |
| 6 | Iterative plan review (max 5 rounds) | Reviewer approval or max-rounds warning | | 6 | Iterative plan review (max `MAX_ROUNDS` rounds) | Reviewer approval or max-rounds warning |
| 7 | Initialize `ai_plan/` + `.gitignore` | Local planning workspace ready | | 7 | Initialize `ai_plan/` + `.gitignore` | Local planning workspace ready |
| 8 | Build plan package from templates | Full plan folder with required files | | 8 | Build plan package from templates | Full plan folder with required files |
| 9 | Handoff with runbook-first instruction | Resumable execution context | | 9 | Handoff with runbook-first instruction | Resumable execution context |
| 10 | Send Telegram notification | User notified or notification status reported |
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
@@ -430,6 +568,7 @@ After completing any story:
- Omitting one or more required files in the plan package. - Omitting one or more required files in the plan package.
- Handoff without explicit "read runbook first" direction. - Handoff without explicit "read runbook first" direction.
- Skipping the reviewer phase without explicit user opt-out. - Skipping the reviewer phase without explicit user opt-out.
- Using any notification path other than Telegram.
## Red Flags - Stop and Correct ## Red Flags - Stop and Correct
@@ -447,6 +586,7 @@ After completing any story:
- [ ] `.gitignore` ignore-rule commit was created if needed - [ ] `.gitignore` ignore-rule commit was created if needed
- [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/` - [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/`
- [ ] Reviewer configured or explicitly skipped - [ ] Reviewer configured or explicitly skipped
- [ ] Max review rounds confirmed (default: 10)
- [ ] Plan review completed (approved or max rounds) — or skipped - [ ] Plan review completed (approved or max rounds) — or skipped
- [ ] `original-plan.md` present - [ ] `original-plan.md` present
- [ ] `final-transcript.md` present - [ ] `final-transcript.md` present
@@ -454,6 +594,7 @@ After completing any story:
- [ ] `story-tracker.md` present - [ ] `story-tracker.md` present
- [ ] `continuation-runbook.md` present - [ ] `continuation-runbook.md` present
- [ ] Handoff explicitly says to read runbook first and execute from plan folder - [ ] Handoff explicitly says to read runbook first and execute from plan folder
- [ ] Telegram notification attempted if configured
## Exit Triggers for Question Phase ## Exit Triggers for Question Phase
User says: "ready", "done", "let's plan", "proceed", "enough questions" User says: "ready", "done", "let's plan", "proceed", "enough questions"

View File

@@ -59,7 +59,10 @@ If the user has already specified a reviewer CLI and model (e.g., "create a plan
- For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription) - For `cursor`: **run `cursor-agent models` first** to see your account's available models (availability varies by subscription)
- Accept any model string the user provides - Accept any model string the user provides
Store the chosen `REVIEWER_CLI` and `REVIEWER_MODEL` for Phase 7 (Iterative Plan Review). 3. **Max review rounds for the plan?** (default: 10)
- If the user does not provide a value, set `MAX_ROUNDS=10`.
Store the chosen `REVIEWER_CLI`, `REVIEWER_MODEL`, and `MAX_ROUNDS` for Phase 7 (Iterative Plan Review).
### Phase 5: Design (REQUIRED SUB-SKILL) ### Phase 5: Design (REQUIRED SUB-SKILL)
@@ -78,7 +81,7 @@ Story IDs: `S-{milestone}{sequence}`.
### Phase 7: Iterative Plan Review ### Phase 7: Iterative Plan Review
Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (max 5 rounds). Send the plan to the configured reviewer CLI for feedback. Revise and re-submit until approved (default max 10 rounds).
**Skip this phase entirely if reviewer was set to `skip`.** **Skip this phase entirely if reviewer was set to `skip`.**
@@ -103,10 +106,60 @@ Resolve the shared reviewer helper from the installed OpenCode skills directory:
REVIEWER_RUNTIME=~/.config/opencode/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.config/opencode/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/plan-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Plan to Temp File #### Step 2: Write Plan to Temp File
Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`. Write the complete plan (milestones, stories, design decisions, specs) to `/tmp/plan-${REVIEW_ID}.md`.
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/plan-review-${REVIEW_ID}.sh` as a bash script:
@@ -130,8 +183,21 @@ codex exec \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -140,17 +206,34 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/plan-${REVIEW_ID}.md and review the implementation plan. Focus on: "Review the implementation plan below. Focus on:
$(cat /tmp/plan-${REVIEW_ID}.md)
1. Correctness — Will this plan achieve the stated goals? 1. Correctness — Will this plan achieve the stated goals?
2. Risks — What could go wrong? Edge cases? Data loss? 2. Risks — What could go wrong? Edge cases? Data loss?
3. Missing steps — Is anything forgotten? 3. Missing steps — Is anything forgotten?
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -168,8 +251,21 @@ cursor-agent -p \
4. Alternatives — Is there a simpler or better approach? 4. Alternatives — Is there a simpler or better approach?
5. Security — Any security concerns? 5. Security — Any security concerns?
Be specific and actionable. If the plan is solid, end with exactly: VERDICT: APPROVED Return exactly these sections in order:
If changes are needed, end with exactly: VERDICT: REVISE" \ ## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
``` ```
@@ -183,13 +279,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/plan-review-${REVIEW_ID}.sh \ --command-file /tmp/plan-review-${REVIEW_ID}.sh \
--stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/plan-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/plan-review-${REVIEW_ID}.stderr \
--status-file /tmp/plan-review-${REVIEW_ID}.status --status-file /tmp/plan-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr bash /tmp/plan-review-${REVIEW_ID}.sh >/tmp/plan-review-${REVIEW_ID}.runner.out 2>/tmp/plan-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/plan-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -199,6 +298,11 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/plan-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/plan-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/plan-review-${REVIEW_ID}.runner.out /tmp/plan-review-${REVIEW_ID}.md
```
#### Step 4: Read Review & Check Verdict #### Step 4: Read Review & Check Verdict
@@ -215,17 +319,19 @@ jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_I
[Reviewer feedback] [Reviewer feedback]
``` ```
3. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** → proceed to Phase 8 (Initialize workspace) 5. Check verdict:
- **VERDICT: REVISE** → go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings → proceed to Phase 8 (Initialize workspace)
- No clear verdict but positive / no actionable items → treat as approved - **VERDICT: APPROVED** with only `P3` findings → optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding → go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` → treat as approved
- Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` → treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` → surface status log and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` → surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (5) reached → proceed with warning - Max rounds (`MAX_ROUNDS`) reached → present the outcome to the user for a manual decision (proceed or stop)
#### Step 5: Revise the Plan #### Step 5: Revise the Plan
Address each issue the reviewer raised. Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`. Address the reviewer findings in priority order (`P0``P1``P2`, then `P3` when practical). Update the plan in conversation context and rewrite `/tmp/plan-${REVIEW_ID}.md`.
Summarize revisions for the user: Summarize revisions for the user:
@@ -236,7 +342,9 @@ Summarize revisions for the user:
If a revision contradicts the user's explicit requirements, skip it and note it for the user. If a revision contradicts the user's explicit requirements, skip it and note it for the user.
#### Step 6: Re-submit to Reviewer (Rounds 2-5) #### Step 6: Re-submit to Reviewer (Rounds 2-N)
Rewrite `/tmp/plan-review-${REVIEW_ID}.sh` for the next round. The script should contain the reviewer invocation only; do not run it directly.
**If `REVIEWER_CLI` is `codex`:** **If `REVIEWER_CLI` is `codex`:**
@@ -250,8 +358,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -266,15 +374,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've revised the plan. Updated version is in /tmp/plan-${REVIEW_ID}.md. I've revised the plan. Updated version is below.
$(cat /tmp/plan-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review the full plan. If solid, end with: VERDICT: APPROVED Re-review the full plan using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -292,8 +403,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.json
jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md jq -r '.result' /tmp/plan-review-${REVIEW_ID}.json > /tmp/plan-review-${REVIEW_ID}.md
@@ -312,7 +423,7 @@ Return to Step 4.
**Status:** Approved after N round(s) **Status:** Approved after N round(s)
[or] [or]
**Status:** Max rounds (5) reached — not fully approved **Status:** Max rounds (`MAX_ROUNDS`) reached — not fully approved
[Final feedback / remaining concerns] [Final feedback / remaining concerns]
``` ```
@@ -357,6 +468,28 @@ Use templates from this skill's `templates/` folder.
Always instruct the executing agent: Always instruct the executing agent:
> Read `ai_plan/YYYY-MM-DD-<short-title>/continuation-runbook.md` first, then execute from `ai_plan` files only. > Read `ai_plan/YYYY-MM-DD-<short-title>/continuation-runbook.md` first, then execute from `ai_plan` files only.
### Phase 11: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed OpenCode skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.config/opencode/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the create-plan run (approved, max rounds reached, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "create-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
Before starting any story: Before starting any story:
@@ -393,6 +526,7 @@ After completing any story:
- [ ] `.gitignore` ignore-rule commit was created if needed - [ ] `.gitignore` ignore-rule commit was created if needed
- [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/` - [ ] Plan directory created under `ai_plan/YYYY-MM-DD-<short-title>/`
- [ ] Reviewer configured or explicitly skipped - [ ] Reviewer configured or explicitly skipped
- [ ] Max review rounds confirmed (default: 10)
- [ ] Plan review completed (approved or max rounds) — or skipped - [ ] Plan review completed (approved or max rounds) — or skipped
- [ ] `original-plan.md` present - [ ] `original-plan.md` present
- [ ] `final-transcript.md` present - [ ] `final-transcript.md` present
@@ -400,6 +534,7 @@ After completing any story:
- [ ] `story-tracker.md` created with all stories as `pending` - [ ] `story-tracker.md` created with all stories as `pending`
- [ ] `continuation-runbook.md` present - [ ] `continuation-runbook.md` present
- [ ] Handoff explicitly says to read runbook first and execute from plan folder - [ ] Handoff explicitly says to read runbook first and execute from plan folder
- [ ] Telegram notification attempted if configured
## Exit Triggers for Question Phase ## Exit Triggers for Question Phase
User says: "ready", "done", "let's plan", "proceed", "enough questions" User says: "ready", "done", "let's plan", "proceed", "enough questions"

View File

@@ -151,6 +151,20 @@ Resolve the shared runtime helper path before writing the command script:
REVIEWER_RUNTIME=~/.claude/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.claude/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Review Payload #### Step 2: Write Review Payload
Write to `/tmp/milestone-${REVIEW_ID}.md`: Write to `/tmp/milestone-${REVIEW_ID}.md`:
@@ -176,6 +190,42 @@ Write to `/tmp/milestone-${REVIEW_ID}.md`:
[test output with pass/fail counts] [test output with pass/fail counts]
``` ```
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script:
@@ -201,8 +251,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -211,7 +275,9 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/milestone-${REVIEW_ID}.md and review this milestone implementation. "Review this milestone implementation using the following spec, acceptance criteria, git diff, and verification output:
$(cat /tmp/milestone-${REVIEW_ID}.md)
Evaluate: Evaluate:
1. Correctness — Does the implementation match the milestone spec? 1. Correctness — Does the implementation match the milestone spec?
@@ -220,10 +286,25 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -243,8 +324,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -258,13 +353,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/milestone-review-${REVIEW_ID}.sh \ --command-file /tmp/milestone-review-${REVIEW_ID}.sh \
--stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \
--status-file /tmp/milestone-review-${REVIEW_ID}.status --status-file /tmp/milestone-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/milestone-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -274,6 +372,11 @@ jq -r '.result' /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/milestone-review-${REVIEW_ID}.runner.out /tmp/milestone-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable. Fallback is allowed only when the helper is missing or not executable.
@@ -292,17 +395,19 @@ Fallback is allowed only when the helper is missing or not executable.
[Reviewer feedback] [Reviewer feedback]
``` ```
4. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** -> proceed to Phase 4 Step 6 (commit & approve) 5. Check verdict:
- **VERDICT: REVISE** -> go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings -> proceed to Phase 4 Step 6 (commit & approve)
- No clear verdict but positive / no actionable items -> treat as approved - **VERDICT: APPROVED** with only `P3` findings -> optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding -> go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` -> treat as approved
- Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` -> surface status log, note any `stall-warning` heartbeat lines as non-terminal operator hints, and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` -> surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop) - Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop)
#### Step 5: Address Feedback & Re-verify #### Step 5: Address Feedback & Re-verify
1. Address each issue the reviewer raised (do NOT commit yet). 1. Address the reviewer findings in priority order (`P0` -> `P1` -> `P2`, then `P3` when practical) (do NOT commit yet).
2. Re-run verification (lint/typecheck/tests) — all must pass. 2. Re-run verification (lint/typecheck/tests) — all must pass.
3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output. 3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output.
@@ -331,8 +436,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -347,16 +452,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've addressed your feedback. Updated diff and verification output are in /tmp/milestone-${REVIEW_ID}.md. I've addressed your feedback. Updated diff and verification output are below.
$(cat /tmp/milestone-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read \ --strict-mcp-config \
> /tmp/milestone-review-${REVIEW_ID}.md --setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -374,8 +481,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -437,6 +544,28 @@ Present summary:
**Branch:** implement/<plan-folder-name> (merged and deleted) **Branch:** implement/<plan-folder-name> (merged and deleted)
``` ```
### Phase 8: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed Claude Code skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.claude/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the implement-plan run (fully completed, stopped after max rounds, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "implement-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
**ALWAYS update `story-tracker.md` before/after each story. NEVER proceed with stale tracker state.** **ALWAYS update `story-tracker.md` before/after each story. NEVER proceed with stale tracker state.**
@@ -471,3 +600,4 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- [ ] Final test suite passes - [ ] Final test suite passes
- [ ] Worktree branch merged to parent and worktree deleted - [ ] Worktree branch merged to parent and worktree deleted
- [ ] Story tracker updated with final status - [ ] Story tracker updated with final status
- [ ] Telegram notification attempted if configured

View File

@@ -184,6 +184,20 @@ Resolve the shared runtime helper path before writing the command script:
REVIEWER_RUNTIME=~/.codex/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.codex/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Review Payload #### Step 2: Write Review Payload
Write to `/tmp/milestone-${REVIEW_ID}.md`: Write to `/tmp/milestone-${REVIEW_ID}.md`:
@@ -209,6 +223,42 @@ Write to `/tmp/milestone-${REVIEW_ID}.md`:
[test output with pass/fail counts] [test output with pass/fail counts]
``` ```
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script:
@@ -234,8 +284,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -244,7 +308,9 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/milestone-${REVIEW_ID}.md and review this milestone implementation. "Review this milestone implementation using the following spec, acceptance criteria, git diff, and verification output:
$(cat /tmp/milestone-${REVIEW_ID}.md)
Evaluate: Evaluate:
1. Correctness — Does the implementation match the milestone spec? 1. Correctness — Does the implementation match the milestone spec?
@@ -253,10 +319,25 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -276,8 +357,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -291,13 +386,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/milestone-review-${REVIEW_ID}.sh \ --command-file /tmp/milestone-review-${REVIEW_ID}.sh \
--stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \
--status-file /tmp/milestone-review-${REVIEW_ID}.status --status-file /tmp/milestone-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/milestone-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -307,6 +405,11 @@ jq -r '.result' /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/milestone-review-${REVIEW_ID}.runner.out /tmp/milestone-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable. Fallback is allowed only when the helper is missing or not executable.
@@ -325,17 +428,19 @@ Fallback is allowed only when the helper is missing or not executable.
[Reviewer feedback] [Reviewer feedback]
``` ```
4. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** -> proceed to Phase 4 Step 6 (commit & approve) 5. Check verdict:
- **VERDICT: REVISE** -> go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings -> proceed to Phase 4 Step 6 (commit & approve)
- No clear verdict but positive / no actionable items -> treat as approved - **VERDICT: APPROVED** with only `P3` findings -> optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding -> go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` -> treat as approved
- Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` -> surface status log, note any `stall-warning` heartbeat lines as non-terminal operator hints, and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` -> surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop) - Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop)
#### Step 5: Address Feedback & Re-verify #### Step 5: Address Feedback & Re-verify
1. Address each issue the reviewer raised (do NOT commit yet). 1. Address the reviewer findings in priority order (`P0` -> `P1` -> `P2`, then `P3` when practical) (do NOT commit yet).
2. Re-run verification (lint/typecheck/tests) — all must pass. 2. Re-run verification (lint/typecheck/tests) — all must pass.
3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output. 3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output.
@@ -364,8 +469,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -380,16 +485,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've addressed your feedback. Updated diff and verification output are in /tmp/milestone-${REVIEW_ID}.md. I've addressed your feedback. Updated diff and verification output are below.
$(cat /tmp/milestone-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read \ --strict-mcp-config \
> /tmp/milestone-review-${REVIEW_ID}.md --setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -407,8 +514,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -470,6 +577,28 @@ Present summary:
**Branch:** implement/<plan-folder-name> (merged and deleted) **Branch:** implement/<plan-folder-name> (merged and deleted)
``` ```
### Phase 8: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed Codex skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.codex/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the implement-plan run (fully completed, stopped after max rounds, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "implement-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Quick Reference ## Quick Reference
| Phase | Action | Required Output | | Phase | Action | Required Output |
@@ -481,6 +610,7 @@ Present summary:
| 5 | Milestone review loop (per milestone) | Reviewer approval or max rounds + user override | | 5 | Milestone review loop (per milestone) | Reviewer approval or max rounds + user override |
| 6 | Invoke `superpowers:finishing-a-development-branch` | Branch merged to parent, worktree deleted | | 6 | Invoke `superpowers:finishing-a-development-branch` | Branch merged to parent, worktree deleted |
| 7 | Final report | Summary presented | | 7 | Final report | Summary presented |
| 8 | Send Telegram notification | User notified or notification status reported |
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
@@ -508,6 +638,7 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- Not capturing the Codex session ID for resume in subsequent review rounds. - Not capturing the Codex session ID for resume in subsequent review rounds.
- Forgetting to update `story-tracker.md` between stories. - Forgetting to update `story-tracker.md` between stories.
- Creating a new worktree when one already exists for a resumed plan. - Creating a new worktree when one already exists for a resumed plan.
- Using any notification path other than Telegram.
## Rationalizations and Counters ## Rationalizations and Counters
@@ -545,3 +676,4 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- [ ] Final test suite passes - [ ] Final test suite passes
- [ ] Worktree branch merged to parent and worktree deleted - [ ] Worktree branch merged to parent and worktree deleted
- [ ] Story tracker updated with final status - [ ] Story tracker updated with final status
- [ ] Telegram notification attempted if configured

View File

@@ -188,6 +188,20 @@ else
fi fi
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Review Payload #### Step 2: Write Review Payload
Write to `/tmp/milestone-${REVIEW_ID}.md`: Write to `/tmp/milestone-${REVIEW_ID}.md`:
@@ -213,6 +227,42 @@ Write to `/tmp/milestone-${REVIEW_ID}.md`:
[test output with pass/fail counts] [test output with pass/fail counts]
``` ```
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script:
@@ -238,8 +288,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -248,7 +312,9 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/milestone-${REVIEW_ID}.md and review this milestone implementation. "Review this milestone implementation using the following spec, acceptance criteria, git diff, and verification output:
$(cat /tmp/milestone-${REVIEW_ID}.md)
Evaluate: Evaluate:
1. Correctness — Does the implementation match the milestone spec? 1. Correctness — Does the implementation match the milestone spec?
@@ -257,10 +323,25 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -280,8 +361,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -301,13 +396,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/milestone-review-${REVIEW_ID}.sh \ --command-file /tmp/milestone-review-${REVIEW_ID}.sh \
--stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \
--status-file /tmp/milestone-review-${REVIEW_ID}.status --status-file /tmp/milestone-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/milestone-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -317,6 +415,11 @@ jq -r '.result' /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/milestone-review-${REVIEW_ID}.runner.out /tmp/milestone-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable. Fallback is allowed only when the helper is missing or not executable.
@@ -335,17 +438,19 @@ Fallback is allowed only when the helper is missing or not executable.
[Reviewer feedback] [Reviewer feedback]
``` ```
4. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** -> proceed to Phase 4 Step 6 (commit & approve) 5. Check verdict:
- **VERDICT: REVISE** -> go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings -> proceed to Phase 4 Step 6 (commit & approve)
- No clear verdict but positive / no actionable items -> treat as approved - **VERDICT: APPROVED** with only `P3` findings -> optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding -> go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` -> treat as approved
- Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` -> surface status log, note any `stall-warning` heartbeat lines as non-terminal operator hints, and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` -> surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop) - Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop)
#### Step 5: Address Feedback & Re-verify #### Step 5: Address Feedback & Re-verify
1. Address each issue the reviewer raised (do NOT commit yet). 1. Address the reviewer findings in priority order (`P0` -> `P1` -> `P2`, then `P3` when practical) (do NOT commit yet).
2. Re-run verification (lint/typecheck/tests) — all must pass. 2. Re-run verification (lint/typecheck/tests) — all must pass.
3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output. 3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output.
@@ -374,8 +479,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -390,16 +495,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've addressed your feedback. Updated diff and verification output are in /tmp/milestone-${REVIEW_ID}.md. I've addressed your feedback. Updated diff and verification output are below.
$(cat /tmp/milestone-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read \ --strict-mcp-config \
> /tmp/milestone-review-${REVIEW_ID}.md --setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -417,8 +524,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -480,6 +587,32 @@ Present summary:
**Branch:** implement/<plan-folder-name> (merged and deleted) **Branch:** implement/<plan-folder-name> (merged and deleted)
``` ```
### Phase 8: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from Cursor's installed skills directory:
```bash
if [ -x .cursor/skills/reviewer-runtime/notify-telegram.sh ]; then
TELEGRAM_NOTIFY_RUNTIME=.cursor/skills/reviewer-runtime/notify-telegram.sh
else
TELEGRAM_NOTIFY_RUNTIME=~/.cursor/skills/reviewer-runtime/notify-telegram.sh
fi
```
On every terminal outcome for the implement-plan run (fully completed, stopped after max rounds, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "implement-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Quick Reference ## Quick Reference
| Phase | Action | Required Output | | Phase | Action | Required Output |
@@ -491,6 +624,7 @@ Present summary:
| 5 | Milestone review loop (per milestone) | Reviewer approval or max rounds + user override | | 5 | Milestone review loop (per milestone) | Reviewer approval or max rounds + user override |
| 6 | Invoke `superpowers:finishing-a-development-branch` | Branch merged to parent, worktree deleted | | 6 | Invoke `superpowers:finishing-a-development-branch` | Branch merged to parent, worktree deleted |
| 7 | Final report | Summary presented | | 7 | Final report | Summary presented |
| 8 | Send Telegram notification | User notified or notification status reported |
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
@@ -516,6 +650,7 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- Skipping worktree setup and working directly on the main branch. - Skipping worktree setup and working directly on the main branch.
- Forgetting to update `story-tracker.md` between stories. - Forgetting to update `story-tracker.md` between stories.
- Creating a new worktree when one already exists for a resumed plan. - Creating a new worktree when one already exists for a resumed plan.
- Using any notification path other than Telegram.
## Red Flags - Stop and Correct ## Red Flags - Stop and Correct
@@ -544,3 +679,4 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- [ ] Final test suite passes - [ ] Final test suite passes
- [ ] Worktree branch merged to parent and worktree deleted - [ ] Worktree branch merged to parent and worktree deleted
- [ ] Story tracker updated with final status - [ ] Story tracker updated with final status
- [ ] Telegram notification attempted if configured

View File

@@ -169,6 +169,20 @@ Resolve the shared runtime helper path before writing the command script:
REVIEWER_RUNTIME=~/.config/opencode/skills/reviewer-runtime/run-review.sh REVIEWER_RUNTIME=~/.config/opencode/skills/reviewer-runtime/run-review.sh
``` ```
Set helper success-artifact args before writing the command script:
```bash
HELPER_SUCCESS_FILE_ARGS=()
case "$REVIEWER_CLI" in
codex)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.md)
;;
cursor)
HELPER_SUCCESS_FILE_ARGS+=(--success-file /tmp/milestone-review-${REVIEW_ID}.json)
;;
esac
```
#### Step 2: Write Review Payload #### Step 2: Write Review Payload
Write to `/tmp/milestone-${REVIEW_ID}.md`: Write to `/tmp/milestone-${REVIEW_ID}.md`:
@@ -194,6 +208,42 @@ Write to `/tmp/milestone-${REVIEW_ID}.md`:
[test output with pass/fail counts] [test output with pass/fail counts]
``` ```
#### Review Contract (Applies to Every Round)
The reviewer response must use this structure:
```text
## Summary
...
## Findings
### P0
- ...
### P1
- ...
### P2
- ...
### P3
- ...
## Verdict
VERDICT: APPROVED
```
Rules:
- Order findings from `P0` to `P3`.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- Use `- None.` when a severity has no findings.
- `VERDICT: APPROVED` is allowed only when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking.
- The calling agent should still try to fix `P3` findings when they are cheap and safe.
#### Liveness Contract (Applies While Review Is Running)
- The shared reviewer runtime emits `state=in-progress note="In progress N"` heartbeats every 60 seconds while the reviewer child is alive.
- The calling agent must keep waiting as long as a fresh `In progress N` heartbeat keeps arriving roughly once per minute.
- Do not abort just because the review is slow, a soft timeout fired, or a `stall-warning` line appears, as long as the `In progress N` heartbeat continues.
- Treat missing heartbeats, `state=failed`, `state=completed-empty-output`, and `state=needs-operator-decision` as escalation signals.
#### Step 3: Submit to Reviewer (Round 1) #### Step 3: Submit to Reviewer (Round 1)
Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script: Write the reviewer invocation to `/tmp/milestone-review-${REVIEW_ID}.sh` as a bash script:
@@ -219,8 +269,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking."
``` ```
Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds. Do not try to capture the Codex session ID yet. When using the helper, extract it from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the command completes (look for `session id: <uuid>`), then store it as `CODEX_SESSION_ID` for resume in subsequent rounds.
@@ -229,7 +293,9 @@ Do not try to capture the Codex session ID yet. When using the helper, extract i
```bash ```bash
claude -p \ claude -p \
"Read the file /tmp/milestone-${REVIEW_ID}.md and review this milestone implementation. "Review this milestone implementation using the following spec, acceptance criteria, git diff, and verification output:
$(cat /tmp/milestone-${REVIEW_ID}.md)
Evaluate: Evaluate:
1. Correctness — Does the implementation match the milestone spec? 1. Correctness — Does the implementation match the milestone spec?
@@ -238,10 +304,25 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read --strict-mcp-config \
--setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -261,8 +342,22 @@ Evaluate:
4. Test coverage — Are changes adequately tested? 4. Test coverage — Are changes adequately tested?
5. Security — Any security concerns introduced? 5. Security — Any security concerns introduced?
Be specific and actionable. If solid, end with exactly: VERDICT: APPROVED
If changes are needed, end with exactly: VERDICT: REVISE" \ Return exactly these sections in order:
## Summary
## Findings
### P0
### P1
### P2
### P3
## Verdict
Rules:
- Order findings from highest severity to lowest.
- Use `- None.` when a severity has no findings.
- `P0` = total blocker, `P1` = major risk, `P2` = must-fix before approval, `P3` = cosmetic / nice to have.
- End with exactly one verdict line: `VERDICT: APPROVED` or `VERDICT: REVISE`
- `VERDICT: APPROVED` is allowed only when there are no `P0`, `P1`, or `P2` findings. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -276,13 +371,16 @@ if [ -x "$REVIEWER_RUNTIME" ]; then
--command-file /tmp/milestone-review-${REVIEW_ID}.sh \ --command-file /tmp/milestone-review-${REVIEW_ID}.sh \
--stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \ --stdout-file /tmp/milestone-review-${REVIEW_ID}.runner.out \
--stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \ --stderr-file /tmp/milestone-review-${REVIEW_ID}.stderr \
--status-file /tmp/milestone-review-${REVIEW_ID}.status --status-file /tmp/milestone-review-${REVIEW_ID}.status \
"${HELPER_SUCCESS_FILE_ARGS[@]}"
else else
echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2 echo "Warning: reviewer runtime helper not found at $REVIEWER_RUNTIME; falling back to direct synchronous review." >&2
bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr bash /tmp/milestone-review-${REVIEW_ID}.sh >/tmp/milestone-review-${REVIEW_ID}.runner.out 2>/tmp/milestone-review-${REVIEW_ID}.stderr
fi fi
``` ```
Run the helper in the foreground and watch its live stdout for `state=in-progress` heartbeats. If your agent environment buffers command output until exit, start the helper in the background and poll `/tmp/milestone-review-${REVIEW_ID}.status` separately instead of treating heartbeats as post-hoc-only data.
After the command completes: After the command completes:
- If `REVIEWER_CLI=cursor`, extract the final review text: - If `REVIEWER_CLI=cursor`, extract the final review text:
@@ -292,6 +390,11 @@ jq -r '.result' /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-
``` ```
- If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing. - If `REVIEWER_CLI=codex`, extract `CODEX_SESSION_ID` from `/tmp/milestone-review-${REVIEW_ID}.runner.out` after the helper or fallback run. If the review text is only in `.runner.out`, move or copy the actual review body into `/tmp/milestone-review-${REVIEW_ID}.md` before verdict parsing.
- If `REVIEWER_CLI=claude`, promote stdout captured by the helper or fallback runner into the markdown review file:
```bash
cp /tmp/milestone-review-${REVIEW_ID}.runner.out /tmp/milestone-review-${REVIEW_ID}.md
```
Fallback is allowed only when the helper is missing or not executable. Fallback is allowed only when the helper is missing or not executable.
@@ -310,17 +413,19 @@ Fallback is allowed only when the helper is missing or not executable.
[Reviewer feedback] [Reviewer feedback]
``` ```
4. Check verdict: 4. While the reviewer is still running, keep waiting as long as fresh `state=in-progress note="In progress N"` heartbeats continue to appear roughly once per minute.
- **VERDICT: APPROVED** -> proceed to Phase 5 Step 6 (commit & approve) 5. Check verdict:
- **VERDICT: REVISE** -> go to Step 5 - **VERDICT: APPROVED** with no `P0`, `P1`, or `P2` findings -> proceed to Phase 5 Step 6 (commit & approve)
- No clear verdict but positive / no actionable items -> treat as approved - **VERDICT: APPROVED** with only `P3` findings -> optionally fix the `P3` items if they are cheap and safe, then proceed
- **VERDICT: REVISE** or any `P0`, `P1`, or `P2` finding -> go to Step 5
- No clear verdict but `P0`, `P1`, and `P2` are all `- None.` -> treat as approved
- Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry - Helper state `completed-empty-output` -> treat as failed review attempt, surface stderr/status, fix invocation or prompt handling, then retry
- Helper state `needs-operator-decision` -> surface status log, note any `stall-warning` heartbeat lines as non-terminal operator hints, and decide whether to keep waiting, abort, or retry with different helper parameters - Helper state `needs-operator-decision` -> surface status log and decide whether to extend the timeout, abort, or retry with different helper parameters
- Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop) - Max rounds (`MAX_ROUNDS`) reached -> present to user for manual decision (proceed or stop)
#### Step 5: Address Feedback & Re-verify #### Step 5: Address Feedback & Re-verify
1. Address each issue the reviewer raised (do NOT commit yet). 1. Address the reviewer findings in priority order (`P0` -> `P1` -> `P2`, then `P3` when practical) (do NOT commit yet).
2. Re-run verification (lint/typecheck/tests) — all must pass. 2. Re-run verification (lint/typecheck/tests) — all must pass.
3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output. 3. Update `/tmp/milestone-${REVIEW_ID}.md` with new diff and verification output.
@@ -349,8 +454,8 @@ codex exec resume ${CODEX_SESSION_ID} \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking."
``` ```
If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds. If resume fails (session expired), fall back to fresh `codex exec` with context about prior rounds.
@@ -365,16 +470,18 @@ claude -p \
Previous feedback summary: [key points from last review] Previous feedback summary: [key points from last review]
I've addressed your feedback. Updated diff and verification output are in /tmp/milestone-${REVIEW_ID}.md. I've addressed your feedback. Updated diff and verification output are below.
$(cat /tmp/milestone-${REVIEW_ID}.md)
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
--model ${REVIEWER_MODEL} \ --model ${REVIEWER_MODEL} \
--allowedTools Read \ --strict-mcp-config \
> /tmp/milestone-review-${REVIEW_ID}.md --setting-sources user
``` ```
**If `REVIEWER_CLI` is `cursor`:** **If `REVIEWER_CLI` is `cursor`:**
@@ -392,8 +499,8 @@ cursor-agent --resume ${CURSOR_SESSION_ID} -p \
Changes made: Changes made:
[List specific changes] [List specific changes]
Re-review. If solid, end with: VERDICT: APPROVED Re-review using the same `## Summary`, `## Findings`, and `## Verdict` structure as before.
If more changes needed, end with: VERDICT: REVISE" \ Keep findings ordered `P0` to `P3`, use `- None.` when a severity has no findings, and only use `VERDICT: APPROVED` when no `P0`, `P1`, or `P2` findings remain. `P3` findings are non-blocking." \
> /tmp/milestone-review-${REVIEW_ID}.json > /tmp/milestone-review-${REVIEW_ID}.json
``` ```
@@ -455,6 +562,28 @@ Present summary:
**Branch:** implement/<plan-folder-name> (merged and deleted) **Branch:** implement/<plan-folder-name> (merged and deleted)
``` ```
### Phase 9: Telegram Notification (MANDATORY)
Resolve the Telegram notifier helper from the installed OpenCode skills directory:
```bash
TELEGRAM_NOTIFY_RUNTIME=~/.config/opencode/skills/reviewer-runtime/notify-telegram.sh
```
On every terminal outcome for the implement-plan run (fully completed, stopped after max rounds, skipped reviewer, or failure), send a Telegram summary if the helper exists and both `TELEGRAM_BOT_TOKEN` and `TELEGRAM_CHAT_ID` are configured:
```bash
if [ -x "$TELEGRAM_NOTIFY_RUNTIME" ] && [ -n "${TELEGRAM_BOT_TOKEN:-}" ] && [ -n "${TELEGRAM_CHAT_ID:-}" ]; then
"$TELEGRAM_NOTIFY_RUNTIME" --message "implement-plan completed for <plan-folder-name>: <status summary>"
fi
```
Rules:
- Telegram is the only supported notification path. Do not use desktop notifications, `say`, email, or any other notifier.
- Notification failures are non-blocking, but they must be surfaced to the user.
- Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured.
- If Telegram is not configured, state that no Telegram notification was sent.
## Tracker Discipline (MANDATORY) ## Tracker Discipline (MANDATORY)
Before starting any story: Before starting any story:
@@ -487,3 +616,4 @@ Note: Commit hashes are backfilled into story Notes after the milestone commit (
- [ ] Final test suite passes - [ ] Final test suite passes
- [ ] Worktree branch merged to parent and worktree deleted - [ ] Worktree branch merged to parent and worktree deleted
- [ ] Story tracker updated with final status - [ ] Story tracker updated with final status
- [ ] Telegram notification attempted if configured

View File

@@ -0,0 +1,99 @@
#!/usr/bin/env bash
set -euo pipefail
DEFAULT_API_BASE_URL="https://api.telegram.org"
DEFAULT_PARSE_MODE="HTML"
MAX_MESSAGE_LENGTH=4096
BOT_TOKEN=${TELEGRAM_BOT_TOKEN:-}
CHAT_ID=${TELEGRAM_CHAT_ID:-}
API_BASE_URL=${TELEGRAM_API_BASE_URL:-$DEFAULT_API_BASE_URL}
PARSE_MODE=${TELEGRAM_PARSE_MODE:-$DEFAULT_PARSE_MODE}
MESSAGE=""
MESSAGE_FILE=""
usage() {
cat <<'EOF'
Usage:
notify-telegram.sh --message <text> [--bot-token <token>] [--chat-id <id>] [--api-base-url <url>]
notify-telegram.sh --message-file <path> [--bot-token <token>] [--chat-id <id>] [--api-base-url <url>]
Environment fallbacks:
TELEGRAM_BOT_TOKEN
TELEGRAM_CHAT_ID
TELEGRAM_API_BASE_URL
TELEGRAM_PARSE_MODE
EOF
}
fail_usage() {
echo "Error: $*" >&2
usage >&2
exit 2
}
parse_args() {
while [[ $# -gt 0 ]]; do
case "$1" in
--bot-token)
BOT_TOKEN=${2:-}
shift 2
;;
--chat-id)
CHAT_ID=${2:-}
shift 2
;;
--api-base-url)
API_BASE_URL=${2:-}
shift 2
;;
--message)
MESSAGE=${2:-}
shift 2
;;
--message-file)
MESSAGE_FILE=${2:-}
shift 2
;;
--help|-h)
usage
exit 0
;;
*)
fail_usage "unknown argument: $1"
;;
esac
done
if [[ -n "$MESSAGE" && -n "$MESSAGE_FILE" ]]; then
fail_usage "use either --message or --message-file, not both"
fi
if [[ -n "$MESSAGE_FILE" ]]; then
[[ -r "$MESSAGE_FILE" ]] || fail_usage "message file is not readable: $MESSAGE_FILE"
MESSAGE=$(<"$MESSAGE_FILE")
fi
[[ -n "$MESSAGE" ]] || fail_usage "message is required"
[[ -n "$BOT_TOKEN" ]] || fail_usage "bot token is required (use --bot-token or TELEGRAM_BOT_TOKEN)"
[[ -n "$CHAT_ID" ]] || fail_usage "chat id is required (use --chat-id or TELEGRAM_CHAT_ID)"
command -v curl >/dev/null 2>&1 || fail_usage "curl is required"
if [[ ${#MESSAGE} -gt "$MAX_MESSAGE_LENGTH" ]]; then
MESSAGE=${MESSAGE:0:$MAX_MESSAGE_LENGTH}
fi
}
main() {
parse_args "$@"
curl -fsS -X POST \
"${API_BASE_URL%/}/bot${BOT_TOKEN}/sendMessage" \
--data-urlencode "chat_id=${CHAT_ID}" \
--data-urlencode "text=${MESSAGE}" \
--data-urlencode "parse_mode=${PARSE_MODE}" \
--data-urlencode "disable_web_page_preview=true" \
>/dev/null
}
main "$@"

View File

@@ -2,6 +2,7 @@
set -euo pipefail set -euo pipefail
DEFAULT_POLL_SECONDS=10 DEFAULT_POLL_SECONDS=10
DEFAULT_HEARTBEAT_SECONDS=60
DEFAULT_SOFT_TIMEOUT_SECONDS=600 DEFAULT_SOFT_TIMEOUT_SECONDS=600
DEFAULT_STALL_WARNING_SECONDS=300 DEFAULT_STALL_WARNING_SECONDS=300
DEFAULT_HARD_TIMEOUT_SECONDS=1800 DEFAULT_HARD_TIMEOUT_SECONDS=1800
@@ -12,7 +13,9 @@ COMMAND_FILE=""
STDOUT_FILE="" STDOUT_FILE=""
STDERR_FILE="" STDERR_FILE=""
STATUS_FILE="" STATUS_FILE=""
SUCCESS_FILES=()
POLL_SECONDS=$DEFAULT_POLL_SECONDS POLL_SECONDS=$DEFAULT_POLL_SECONDS
HEARTBEAT_SECONDS=$DEFAULT_HEARTBEAT_SECONDS
SOFT_TIMEOUT_SECONDS=$DEFAULT_SOFT_TIMEOUT_SECONDS SOFT_TIMEOUT_SECONDS=$DEFAULT_SOFT_TIMEOUT_SECONDS
STALL_WARNING_SECONDS=$DEFAULT_STALL_WARNING_SECONDS STALL_WARNING_SECONDS=$DEFAULT_STALL_WARNING_SECONDS
HARD_TIMEOUT_SECONDS=$DEFAULT_HARD_TIMEOUT_SECONDS HARD_TIMEOUT_SECONDS=$DEFAULT_HARD_TIMEOUT_SECONDS
@@ -29,7 +32,9 @@ Usage:
--stdout-file <path> \ --stdout-file <path> \
--stderr-file <path> \ --stderr-file <path> \
--status-file <path> \ --status-file <path> \
[--success-file <path>] \
[--poll-seconds <int>] \ [--poll-seconds <int>] \
[--heartbeat-seconds <int>] \
[--soft-timeout-seconds <int>] \ [--soft-timeout-seconds <int>] \
[--stall-warning-seconds <int>] \ [--stall-warning-seconds <int>] \
[--hard-timeout-seconds <int>] [--hard-timeout-seconds <int>]
@@ -55,6 +60,25 @@ escape_note() {
printf '%s' "$note" printf '%s' "$note"
} }
join_success_files() {
if [[ ${#SUCCESS_FILES[@]} -eq 0 ]]; then
printf ''
return 0
fi
local joined=""
local path
for path in "${SUCCESS_FILES[@]}"; do
if [[ -n "$joined" ]]; then
joined+=", "
fi
joined+="$path"
done
printf '%s' "$joined"
}
iso_timestamp() { iso_timestamp() {
date -u +"%Y-%m-%dT%H:%M:%SZ" date -u +"%Y-%m-%dT%H:%M:%SZ"
} }
@@ -147,10 +171,18 @@ parse_args() {
STATUS_FILE=${2:-} STATUS_FILE=${2:-}
shift 2 shift 2
;; ;;
--success-file)
SUCCESS_FILES+=("${2:-}")
shift 2
;;
--poll-seconds) --poll-seconds)
POLL_SECONDS=${2:-} POLL_SECONDS=${2:-}
shift 2 shift 2
;; ;;
--heartbeat-seconds)
HEARTBEAT_SECONDS=${2:-}
shift 2
;;
--soft-timeout-seconds) --soft-timeout-seconds)
SOFT_TIMEOUT_SECONDS=${2:-} SOFT_TIMEOUT_SECONDS=${2:-}
shift 2 shift 2
@@ -179,11 +211,13 @@ parse_args() {
[[ -n "$STATUS_FILE" ]] || fail_usage "--status-file is required" [[ -n "$STATUS_FILE" ]] || fail_usage "--status-file is required"
require_integer "poll-seconds" "$POLL_SECONDS" require_integer "poll-seconds" "$POLL_SECONDS"
require_integer "heartbeat-seconds" "$HEARTBEAT_SECONDS"
require_integer "soft-timeout-seconds" "$SOFT_TIMEOUT_SECONDS" require_integer "soft-timeout-seconds" "$SOFT_TIMEOUT_SECONDS"
require_integer "stall-warning-seconds" "$STALL_WARNING_SECONDS" require_integer "stall-warning-seconds" "$STALL_WARNING_SECONDS"
require_integer "hard-timeout-seconds" "$HARD_TIMEOUT_SECONDS" require_integer "hard-timeout-seconds" "$HARD_TIMEOUT_SECONDS"
[[ "$POLL_SECONDS" -gt 0 ]] || fail_usage "poll-seconds must be > 0" [[ "$POLL_SECONDS" -gt 0 ]] || fail_usage "poll-seconds must be > 0"
[[ "$HEARTBEAT_SECONDS" -gt 0 ]] || fail_usage "heartbeat-seconds must be > 0"
[[ "$SOFT_TIMEOUT_SECONDS" -gt 0 ]] || fail_usage "soft-timeout-seconds must be > 0" [[ "$SOFT_TIMEOUT_SECONDS" -gt 0 ]] || fail_usage "soft-timeout-seconds must be > 0"
[[ "$STALL_WARNING_SECONDS" -gt 0 ]] || fail_usage "stall-warning-seconds must be > 0" [[ "$STALL_WARNING_SECONDS" -gt 0 ]] || fail_usage "stall-warning-seconds must be > 0"
[[ "$HARD_TIMEOUT_SECONDS" -gt 0 ]] || fail_usage "hard-timeout-seconds must be > 0" [[ "$HARD_TIMEOUT_SECONDS" -gt 0 ]] || fail_usage "hard-timeout-seconds must be > 0"
@@ -227,8 +261,10 @@ main() {
local last_stdout_bytes=0 local last_stdout_bytes=0
local last_stderr_bytes=0 local last_stderr_bytes=0
local last_output_change_time=$START_TIME local last_output_change_time=$START_TIME
local last_heartbeat_time=$START_TIME
local soft_timeout_logged=0 local soft_timeout_logged=0
local stall_warning_logged=0 local stall_warning_logged=0
local heartbeat_count=0
while kill -0 "$CHILD_PID" 2>/dev/null; do while kill -0 "$CHILD_PID" 2>/dev/null; do
sleep "$POLL_SECONDS" sleep "$POLL_SECONDS"
@@ -239,6 +275,12 @@ main() {
stdout_bytes=$(file_bytes "$STDOUT_FILE") stdout_bytes=$(file_bytes "$STDOUT_FILE")
stderr_bytes=$(file_bytes "$STDERR_FILE") stderr_bytes=$(file_bytes "$STDERR_FILE")
if [[ $((now - last_heartbeat_time)) -ge "$HEARTBEAT_SECONDS" ]]; then
heartbeat_count=$((heartbeat_count + 1))
append_status info in-progress "In progress ${heartbeat_count}"
last_heartbeat_time=$now
fi
if [[ "$stdout_bytes" -ne "$last_stdout_bytes" || "$stderr_bytes" -ne "$last_stderr_bytes" ]]; then if [[ "$stdout_bytes" -ne "$last_stdout_bytes" || "$stderr_bytes" -ne "$last_stderr_bytes" ]]; then
last_output_change_time=$now last_output_change_time=$now
stall_warning_logged=0 stall_warning_logged=0
@@ -285,6 +327,7 @@ main() {
trap - EXIT trap - EXIT
local final_stdout_bytes final_stderr_bytes local final_stdout_bytes final_stderr_bytes
local success_file success_bytes
final_stdout_bytes=$(file_bytes "$STDOUT_FILE") final_stdout_bytes=$(file_bytes "$STDOUT_FILE")
final_stderr_bytes=$(file_bytes "$STDERR_FILE") final_stderr_bytes=$(file_bytes "$STDERR_FILE")
@@ -294,6 +337,16 @@ main() {
exit 0 exit 0
fi fi
if [[ ${#SUCCESS_FILES[@]} -gt 0 ]]; then
for success_file in "${SUCCESS_FILES[@]}"; do
success_bytes=$(file_bytes "$success_file")
if [[ "$success_bytes" -gt 0 ]]; then
append_status info completed "reviewer completed successfully via success file $(join_success_files)"
exit 0
fi
done
fi
append_status error completed-empty-output "reviewer exited successfully with empty stdout" append_status error completed-empty-output "reviewer exited successfully with empty stdout"
exit "$EXIT_COMPLETED_EMPTY_OUTPUT" exit "$EXIT_COMPLETED_EMPTY_OUTPUT"
fi fi

View File

@@ -0,0 +1,53 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
REPO_ROOT=$(cd "$SCRIPT_DIR/../../.." && pwd)
fail() {
echo "FAIL: $*" >&2
exit 1
}
assert_contains() {
local file=$1
local pattern=$2
if ! grep -qF -- "$pattern" "$file"; then
fail "expected '$pattern' in $file"
fi
}
assert_not_contains() {
local file=$1
local pattern=$2
if grep -qF -- "$pattern" "$file"; then
fail "did not expect '$pattern' in $file"
fi
}
check_skill_file() {
local file=$1
assert_contains "$file" "claude -p \\"
assert_contains "$file" '$(cat /tmp/'
assert_contains "$file" "--strict-mcp-config"
assert_contains "$file" "--setting-sources user"
assert_contains "$file" "### P0"
assert_contains "$file" "In progress N"
assert_contains "$file" "notify-telegram.sh"
assert_contains "$file" "Before stopping for any user interaction, approval, or manual decision, send a Telegram summary first if configured."
assert_not_contains "$file" "--allowedTools Read"
}
check_skill_file "$REPO_ROOT/skills/implement-plan/claude-code/SKILL.md"
check_skill_file "$REPO_ROOT/skills/implement-plan/codex/SKILL.md"
check_skill_file "$REPO_ROOT/skills/implement-plan/cursor/SKILL.md"
check_skill_file "$REPO_ROOT/skills/implement-plan/opencode/SKILL.md"
check_skill_file "$REPO_ROOT/skills/create-plan/claude-code/SKILL.md"
check_skill_file "$REPO_ROOT/skills/create-plan/codex/SKILL.md"
check_skill_file "$REPO_ROOT/skills/create-plan/cursor/SKILL.md"
check_skill_file "$REPO_ROOT/skills/create-plan/opencode/SKILL.md"
echo "PASS: Claude reviewer templates use isolated settings and avoid Read-tool review prompts"

82
skills/reviewer-runtime/tests/smoke-test.sh Normal file → Executable file
View File

@@ -66,6 +66,28 @@ run_helper() {
return "$exit_code" return "$exit_code"
} }
run_helper_allowing_success_file() {
local command_file=$1
local stdout_file=$2
local stderr_file=$3
local status_file=$4
local success_file=$5
shift 5
set +e
"$HELPER_PATH" \
--command-file "$command_file" \
--stdout-file "$stdout_file" \
--stderr-file "$stderr_file" \
--status-file "$status_file" \
--success-file "$success_file" \
"$@"
local exit_code=$?
set -e
return "$exit_code"
}
test_delayed_success() { test_delayed_success() {
local dir=$1 local dir=$1
local command_file=$dir/delayed-success.sh local command_file=$dir/delayed-success.sh
@@ -120,6 +142,36 @@ printf "completed after soft timeout\n"
assert_file_contains "$status_file" "state=completed" assert_file_contains "$status_file" "state=completed"
} }
test_in_progress_heartbeats() {
local dir=$1
local command_file=$dir/in-progress-heartbeats.sh
local stdout_file=$dir/in-progress-heartbeats.stdout
local stderr_file=$dir/in-progress-heartbeats.stderr
local status_file=$dir/in-progress-heartbeats.status
make_command "$command_file" '
sleep 3
printf "finished with heartbeat coverage\n"
'
if run_helper "$command_file" "$stdout_file" "$stderr_file" "$status_file" \
--poll-seconds 1 \
--heartbeat-seconds 1 \
--soft-timeout-seconds 5 \
--stall-warning-seconds 4 \
--hard-timeout-seconds 10; then
local exit_code=0
else
local exit_code=$?
fi
assert_exit_code "$exit_code" 0
assert_file_contains "$stdout_file" "finished with heartbeat coverage"
assert_file_contains "$status_file" "state=in-progress"
assert_file_contains "$status_file" "In progress 1"
assert_file_contains "$status_file" "In progress 2"
}
test_nonzero_failure() { test_nonzero_failure() {
local dir=$1 local dir=$1
local command_file=$dir/nonzero-failure.sh local command_file=$dir/nonzero-failure.sh
@@ -173,6 +225,34 @@ exit 0
assert_file_contains "$status_file" "state=completed-empty-output" assert_file_contains "$status_file" "state=completed-empty-output"
} }
test_success_file_allows_empty_stdout() {
local dir=$1
local command_file=$dir/success-file.sh
local stdout_file=$dir/success-file.stdout
local stderr_file=$dir/success-file.stderr
local status_file=$dir/success-file.status
local success_file=$dir/review-output.md
make_command "$command_file" "
printf 'review body from redirected file\\n' > \"$success_file\"
exit 0
"
if run_helper_allowing_success_file "$command_file" "$stdout_file" "$stderr_file" "$status_file" "$success_file" \
--poll-seconds 1 \
--soft-timeout-seconds 5 \
--stall-warning-seconds 3 \
--hard-timeout-seconds 10; then
local exit_code=0
else
local exit_code=$?
fi
assert_exit_code "$exit_code" 0
assert_file_contains "$success_file" "review body from redirected file"
assert_file_contains "$status_file" "state=completed"
}
test_signal_cleanup() { test_signal_cleanup() {
local dir=$1 local dir=$1
local command_file=$dir/signal-child.sh local command_file=$dir/signal-child.sh
@@ -252,8 +332,10 @@ main() {
test_delayed_success "$tmp_dir" test_delayed_success "$tmp_dir"
test_soft_timeout_continues "$tmp_dir" test_soft_timeout_continues "$tmp_dir"
test_in_progress_heartbeats "$tmp_dir"
test_nonzero_failure "$tmp_dir" test_nonzero_failure "$tmp_dir"
test_empty_output_is_terminal "$tmp_dir" test_empty_output_is_terminal "$tmp_dir"
test_success_file_allows_empty_stdout "$tmp_dir"
test_signal_cleanup "$tmp_dir" test_signal_cleanup "$tmp_dir"
test_hard_timeout_escalation "$tmp_dir" test_hard_timeout_escalation "$tmp_dir"

View File

@@ -0,0 +1,158 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
HELPER_PATH=$(cd "$SCRIPT_DIR/.." && pwd)/notify-telegram.sh
fail() {
echo "FAIL: $*" >&2
exit 1
}
assert_equals() {
local actual=$1
local expected=$2
if [[ "$actual" != "$expected" ]]; then
fail "expected '$expected', got '$actual'"
fi
}
assert_file_contains() {
local file=$1
local pattern=$2
if ! grep -qF -- "$pattern" "$file"; then
echo "--- $file ---" >&2
sed -n '1,200p' "$file" >&2 || true
fail "expected '$pattern' in $file"
fi
}
capture_curl() {
local bin_dir=$1
local curl_args_file=$2
mkdir -p "$bin_dir"
cat >"$bin_dir/curl" <<EOF
#!/usr/bin/env bash
printf '%s\n' "\$@" >"$curl_args_file"
printf '{"ok":true}\n'
EOF
chmod +x "$bin_dir/curl"
}
read_curl_value() {
local file=$1
local key=$2
awk -v prefix="${key}=" 'index($0, prefix) == 1 { print substr($0, length(prefix) + 1); exit }' "$file"
}
test_missing_credentials() {
local tmp_dir=$1
local output_file=$tmp_dir/missing-credentials.out
set +e
env -u TELEGRAM_BOT_TOKEN -u TELEGRAM_CHAT_ID \
"$HELPER_PATH" --message "hello" >"$output_file" 2>&1
local exit_code=$?
set -e
[[ "$exit_code" -eq 2 ]] || fail "expected exit code 2 for missing credentials, got $exit_code"
assert_file_contains "$output_file" "bot token is required"
}
test_rejects_message_and_message_file_together() {
local tmp_dir=$1
local message_file=$tmp_dir/message.txt
local output_file=$tmp_dir/message-and-file.out
printf 'hello from file\n' >"$message_file"
set +e
TELEGRAM_BOT_TOKEN=test-token \
TELEGRAM_CHAT_ID=123456 \
"$HELPER_PATH" --message "hello" --message-file "$message_file" >"$output_file" 2>&1
local exit_code=$?
set -e
[[ "$exit_code" -eq 2 ]] || fail "expected exit code 2 for mutually exclusive arguments, got $exit_code"
assert_file_contains "$output_file" "use either --message or --message-file, not both"
}
test_successful_request() {
local tmp_dir=$1
local bin_dir=$tmp_dir/bin
local curl_args_file=$tmp_dir/curl-args.txt
capture_curl "$bin_dir" "$curl_args_file"
PATH="$bin_dir:$PATH" \
TELEGRAM_BOT_TOKEN=test-token \
TELEGRAM_CHAT_ID=123456 \
"$HELPER_PATH" --message "Plan completed"
assert_file_contains "$curl_args_file" "https://api.telegram.org/bottest-token/sendMessage"
assert_file_contains "$curl_args_file" "chat_id=123456"
assert_file_contains "$curl_args_file" "text=Plan completed"
assert_file_contains "$curl_args_file" "disable_web_page_preview=true"
assert_file_contains "$curl_args_file" "parse_mode=HTML"
}
test_message_file_and_custom_api_base() {
local tmp_dir=$1
local bin_dir=$tmp_dir/bin-message-file
local curl_args_file=$tmp_dir/curl-message-file.txt
local message_file=$tmp_dir/telegram-message.txt
capture_curl "$bin_dir" "$curl_args_file"
printf 'Plan completed from file\n' >"$message_file"
PATH="$bin_dir:$PATH" \
TELEGRAM_BOT_TOKEN=test-token \
TELEGRAM_CHAT_ID=654321 \
"$HELPER_PATH" \
--message-file "$message_file" \
--api-base-url "https://telegram.example.test/custom"
assert_file_contains "$curl_args_file" "https://telegram.example.test/custom/bottest-token/sendMessage"
assert_file_contains "$curl_args_file" "chat_id=654321"
assert_file_contains "$curl_args_file" "text=Plan completed from file"
}
test_truncates_long_message() {
local tmp_dir=$1
local bin_dir=$tmp_dir/bin-truncate
local curl_args_file=$tmp_dir/curl-truncate.txt
local long_message_file=$tmp_dir/long-message.txt
local truncated_message
capture_curl "$bin_dir" "$curl_args_file"
python3 - <<'PY' >"$long_message_file"
print("A" * 5000, end="")
PY
PATH="$bin_dir:$PATH" \
TELEGRAM_BOT_TOKEN=test-token \
TELEGRAM_CHAT_ID=123456 \
"$HELPER_PATH" --message-file "$long_message_file"
truncated_message=$(read_curl_value "$curl_args_file" "text")
assert_equals "${#truncated_message}" "4096"
}
main() {
[[ -x "$HELPER_PATH" ]] || fail "helper is not executable: $HELPER_PATH"
local tmp_dir
tmp_dir=$(mktemp -d)
trap "rm -rf '$tmp_dir'" EXIT
test_missing_credentials "$tmp_dir"
test_rejects_message_and_message_file_together "$tmp_dir"
test_successful_request "$tmp_dir"
test_message_file_and_custom_api_base "$tmp_dir"
test_truncates_long_message "$tmp_dir"
echo "PASS: telegram notifier tests"
}
main "$@"

View File

@@ -1,48 +1,99 @@
--- ---
name: web-automation name: web-automation
description: Browse and scrape web pages using Playwright with Camoufox anti-detection browser. Use when automating web workflows, extracting page content to markdown, handling authenticated sessions, or scraping websites with bot protection. description: Browse and scrape web pages using Playwright-compatible CloakBrowser. Use when automating web workflows, extracting rendered page content, handling authenticated sessions, or running multi-step browser flows.
--- ---
# Web Automation with Camoufox (Claude Code) # Web Automation with CloakBrowser (Claude Code)
Automated web browsing and scraping using Playwright with Camoufox anti-detection browser. Automated web browsing and scraping using Playwright-compatible CloakBrowser with two execution paths:
- one-shot extraction via `extract.js`
- broader stateful automation via `auth.ts`, `browse.ts`, `flow.ts`, `scan-local-app.ts`, and `scrape.ts`
## Requirements ## Requirements
- Node.js 20+ - Node.js 20+
- pnpm - pnpm
- Network access to download browser binaries - Network access to download the CloakBrowser binary on first use
## First-Time Setup ## First-Time Setup
```bash ```bash
cd ~/.claude/skills/web-automation/scripts cd ~/.claude/skills/web-automation/scripts
pnpm install pnpm install
npx camoufox-js fetch npx cloakbrowser install
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
```
## Updating CloakBrowser
```bash
cd ~/.claude/skills/web-automation/scripts
pnpm up cloakbrowser playwright-core
npx cloakbrowser install
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
``` ```
## Prerequisite Check (MANDATORY) ## Prerequisite Check (MANDATORY)
Before running any automation, verify Playwright + Camoufox dependencies are installed and scripts are configured to use Camoufox. Before running automation, verify CloakBrowser and Playwright Core are installed and wired correctly.
```bash ```bash
cd ~/.claude/skills/web-automation/scripts cd ~/.claude/skills/web-automation/scripts
node -e "require.resolve('playwright-core/package.json');require.resolve('camoufox-js/package.json');console.log('OK: playwright-core + camoufox-js installed')" node check-install.js
node -e "const fs=require('fs');const t=fs.readFileSync('browse.ts','utf8');if(!/camoufox-js/.test(t)){throw new Error('browse.ts is not configured for Camoufox')}console.log('OK: Camoufox integration detected in browse.ts')"
``` ```
If any check fails, stop and return: If the check fails, stop and return:
"Missing dependency/config: web-automation requires `playwright-core` + `camoufox-js` and Camoufox-based scripts. Run setup in this skill, then retry." "Missing dependency/config: web-automation requires `cloakbrowser` and `playwright-core` with CloakBrowser-based scripts. Run setup in this skill, then retry."
If runtime fails with missing native bindings for `better-sqlite3` or `esbuild`, run:
```bash
cd ~/.claude/skills/web-automation/scripts
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
```
## When To Use Which Command
- Use `node extract.js "<URL>"` for a one-shot rendered fetch with JSON output.
- Use `npx tsx scrape.ts ...` when you need markdown extraction, Readability cleanup, or selector-based scraping.
- Use `npx tsx browse.ts ...`, `auth.ts`, or `flow.ts` when the task needs login handling, persistent sessions, clicks, typing, screenshots, or multi-step navigation.
- Use `npx tsx scan-local-app.ts` when you need a configurable local-app smoke pass driven by `SCAN_*` and `CLOAKBROWSER_*` environment variables.
## Quick Reference ## Quick Reference
- Install check: `node check-install.js`
- One-shot JSON extract: `node extract.js "https://example.com"`
- Browse page: `npx tsx browse.ts --url "https://example.com"` - Browse page: `npx tsx browse.ts --url "https://example.com"`
- Scrape markdown: `npx tsx scrape.ts --url "https://example.com" --mode main --output page.md` - Scrape markdown: `npx tsx scrape.ts --url "https://example.com" --mode main --output page.md`
- Authenticate: `npx tsx auth.ts --url "https://example.com/login"` - Authenticate: `npx tsx auth.ts --url "https://example.com/login"`
- Natural-language flow: `npx tsx flow.ts --instruction 'go to https://example.com then click on "Login" then type "user@example.com" in #email then press enter'`
- Local app smoke scan: `SCAN_BASE_URL=http://localhost:3000 SCAN_ROUTES=/,/dashboard npx tsx scan-local-app.ts`
## Local App Smoke Scan
`scan-local-app.ts` is intentionally generic. Configure it with environment variables instead of editing the file:
- `SCAN_BASE_URL`
- `SCAN_LOGIN_PATH`
- `SCAN_USERNAME`
- `SCAN_PASSWORD`
- `SCAN_USERNAME_SELECTOR`
- `SCAN_PASSWORD_SELECTOR`
- `SCAN_SUBMIT_SELECTOR`
- `SCAN_ROUTES`
- `SCAN_REPORT_PATH`
- `SCAN_HEADLESS`
If `SCAN_USERNAME` or `SCAN_PASSWORD` are omitted, the script falls back to `CLOAKBROWSER_USERNAME` and `CLOAKBROWSER_PASSWORD`.
## Notes ## Notes
- Sessions persist in Camoufox profile storage. - Sessions persist in CloakBrowser profile storage.
- Use `--wait` for dynamic pages. - Use `--wait` for dynamic pages.
- Use `--mode selector --selector "..."` for targeted extraction. - Use `--mode selector --selector "..."` for targeted extraction.
- `extract.js` keeps a bounded stealth/rendered fetch path without needing a long-lived automation session.

View File

@@ -41,8 +41,8 @@ function getCredentials(options?: {
username?: string; username?: string;
password?: string; password?: string;
}): { username: string; password: string } | null { }): { username: string; password: string } | null {
const username = options?.username || process.env.CAMOUFOX_USERNAME; const username = options?.username || process.env.CLOAKBROWSER_USERNAME;
const password = options?.password || process.env.CAMOUFOX_PASSWORD; const password = options?.password || process.env.CLOAKBROWSER_PASSWORD;
if (!username || !password) { if (!username || !password) {
return null; return null;
@@ -450,7 +450,7 @@ export async function navigateAuthenticated(
if (!credentials) { if (!credentials) {
throw new Error( throw new Error(
'Authentication required but no credentials provided. ' + 'Authentication required but no credentials provided. ' +
'Set CAMOUFOX_USERNAME and CAMOUFOX_PASSWORD environment variables.' 'Set CLOAKBROWSER_USERNAME and CLOAKBROWSER_PASSWORD environment variables.'
); );
} }
@@ -504,8 +504,8 @@ Usage:
Options: Options:
-u, --url <url> URL to authenticate (required) -u, --url <url> URL to authenticate (required)
-t, --type <type> Auth type: auto, form, or msal (default: auto) -t, --type <type> Auth type: auto, form, or msal (default: auto)
--username <user> Username/email (or set CAMOUFOX_USERNAME env var) --username <user> Username/email (or set CLOAKBROWSER_USERNAME env var)
--password <pass> Password (or set CAMOUFOX_PASSWORD env var) --password <pass> Password (or set CLOAKBROWSER_PASSWORD env var)
--headless <bool> Run in headless mode (default: false for auth) --headless <bool> Run in headless mode (default: false for auth)
-h, --help Show this help message -h, --help Show this help message
@@ -515,8 +515,8 @@ Auth Types:
msal Microsoft SSO (login.microsoftonline.com) msal Microsoft SSO (login.microsoftonline.com)
Environment Variables: Environment Variables:
CAMOUFOX_USERNAME Default username/email for authentication CLOAKBROWSER_USERNAME Default username/email for authentication
CAMOUFOX_PASSWORD Default password for authentication CLOAKBROWSER_PASSWORD Default password for authentication
Examples: Examples:
# Interactive login (no credentials, opens browser) # Interactive login (no credentials, opens browser)
@@ -527,11 +527,11 @@ Examples:
--username "user@example.com" --password "secret" --username "user@example.com" --password "secret"
# Microsoft SSO login # Microsoft SSO login
CAMOUFOX_USERNAME=user@company.com CAMOUFOX_PASSWORD=secret \\ CLOAKBROWSER_USERNAME=user@company.com CLOAKBROWSER_PASSWORD=secret \\
npx tsx auth.ts --url "https://internal.company.com" --type msal npx tsx auth.ts --url "https://internal.company.com" --type msal
Notes: Notes:
- Session is saved to ~/.camoufox-profile/ for persistence - Session is saved to ~/.cloakbrowser-profile/ for persistence
- After successful auth, subsequent browses will be authenticated - After successful auth, subsequent browses will be authenticated
- Use --headless false if you need to handle MFA manually - Use --headless false if you need to handle MFA manually
`); `);

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env npx tsx #!/usr/bin/env npx tsx
/** /**
* Browser launcher using Camoufox with persistent profile * Browser launcher using CloakBrowser with persistent profile
* *
* Usage: * Usage:
* npx tsx browse.ts --url "https://example.com" * npx tsx browse.ts --url "https://example.com"
@@ -9,14 +9,13 @@
* npx tsx browse.ts --url "https://example.com" --headless false --wait 5000 * npx tsx browse.ts --url "https://example.com" --headless false --wait 5000
*/ */
import { Camoufox } from 'camoufox-js'; import { launchPersistentContext } from 'cloakbrowser';
import { homedir } from 'os'; import { homedir } from 'os';
import { join } from 'path'; import { join } from 'path';
import { existsSync, mkdirSync } from 'fs'; import { existsSync, mkdirSync } from 'fs';
import parseArgs from 'minimist'; import parseArgs from 'minimist';
import type { Page, BrowserContext } from 'playwright-core'; import type { Page, BrowserContext } from 'playwright-core';
// Types
interface BrowseOptions { interface BrowseOptions {
url: string; url: string;
headless?: boolean; headless?: boolean;
@@ -33,55 +32,54 @@ interface BrowseResult {
screenshotPath?: string; screenshotPath?: string;
} }
// Get profile directory function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
const getProfilePath = (): string => { const getProfilePath = (): string => {
const customPath = process.env.CAMOUFOX_PROFILE_PATH; const customPath = process.env.CLOAKBROWSER_PROFILE_PATH;
if (customPath) return customPath; if (customPath) return customPath;
const profileDir = join(homedir(), '.camoufox-profile'); const profileDir = join(homedir(), '.cloakbrowser-profile');
if (!existsSync(profileDir)) { if (!existsSync(profileDir)) {
mkdirSync(profileDir, { recursive: true }); mkdirSync(profileDir, { recursive: true });
} }
return profileDir; return profileDir;
}; };
// Launch browser with persistent profile
export async function launchBrowser(options: { export async function launchBrowser(options: {
headless?: boolean; headless?: boolean;
}): Promise<BrowserContext> { }): Promise<BrowserContext> {
const profilePath = getProfilePath(); const profilePath = getProfilePath();
const headless = const envHeadless = process.env.CLOAKBROWSER_HEADLESS;
options.headless ?? const headless = options.headless ?? (envHeadless ? envHeadless === 'true' : true);
(process.env.CAMOUFOX_HEADLESS ? process.env.CAMOUFOX_HEADLESS === 'true' : true);
console.log(`Using profile: ${profilePath}`); console.log(`Using profile: ${profilePath}`);
console.log(`Headless mode: ${headless}`); console.log(`Headless mode: ${headless}`);
const browser = await Camoufox({ const context = await launchPersistentContext({
user_data_dir: profilePath, userDataDir: profilePath,
headless, headless,
humanize: true,
}); });
return browser; return context;
} }
// Browse to URL and optionally take screenshot
export async function browse(options: BrowseOptions): Promise<BrowseResult> { export async function browse(options: BrowseOptions): Promise<BrowseResult> {
const browser = await launchBrowser({ headless: options.headless }); const browser = await launchBrowser({ headless: options.headless });
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
try { try {
// Navigate to URL
console.log(`Navigating to: ${options.url}`); console.log(`Navigating to: ${options.url}`);
await page.goto(options.url, { await page.goto(options.url, {
timeout: options.timeout ?? 60000, timeout: options.timeout ?? 60000,
waitUntil: 'domcontentloaded', waitUntil: 'domcontentloaded',
}); });
// Wait if specified
if (options.wait) { if (options.wait) {
console.log(`Waiting ${options.wait}ms...`); console.log(`Waiting ${options.wait}ms...`);
await page.waitForTimeout(options.wait); await sleep(options.wait);
} }
const result: BrowseResult = { const result: BrowseResult = {
@@ -92,7 +90,6 @@ export async function browse(options: BrowseOptions): Promise<BrowseResult> {
console.log(`Page title: ${result.title}`); console.log(`Page title: ${result.title}`);
console.log(`Final URL: ${result.url}`); console.log(`Final URL: ${result.url}`);
// Take screenshot if requested
if (options.screenshot) { if (options.screenshot) {
const outputPath = options.output ?? 'screenshot.png'; const outputPath = options.output ?? 'screenshot.png';
await page.screenshot({ path: outputPath, fullPage: true }); await page.screenshot({ path: outputPath, fullPage: true });
@@ -100,11 +97,10 @@ export async function browse(options: BrowseOptions): Promise<BrowseResult> {
console.log(`Screenshot saved: ${outputPath}`); console.log(`Screenshot saved: ${outputPath}`);
} }
// If interactive mode, keep browser open
if (options.interactive) { if (options.interactive) {
console.log('\nInteractive mode - browser will stay open.'); console.log('\nInteractive mode - browser will stay open.');
console.log('Press Ctrl+C to close.'); console.log('Press Ctrl+C to close.');
await new Promise(() => {}); // Keep running await new Promise(() => {});
} }
return result; return result;
@@ -115,16 +111,14 @@ export async function browse(options: BrowseOptions): Promise<BrowseResult> {
} }
} }
// Export page for use in other scripts
export async function getPage(options?: { export async function getPage(options?: {
headless?: boolean; headless?: boolean;
}): Promise<{ page: Page; browser: BrowserContext }> { }): Promise<{ page: Page; browser: BrowserContext }> {
const browser = await launchBrowser({ headless: options?.headless }); const browser = await launchBrowser({ headless: options?.headless });
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
return { page, browser }; return { page, browser };
} }
// CLI entry point
async function main() { async function main() {
const args = parseArgs(process.argv.slice(2), { const args = parseArgs(process.argv.slice(2), {
string: ['url', 'output'], string: ['url', 'output'],
@@ -145,7 +139,7 @@ async function main() {
if (args.help || !args.url) { if (args.help || !args.url) {
console.log(` console.log(`
Web Browser with Camoufox Web Browser with CloakBrowser
Usage: Usage:
npx tsx browse.ts --url <url> [options] npx tsx browse.ts --url <url> [options]
@@ -166,8 +160,8 @@ Examples:
npx tsx browse.ts --url "https://example.com" --headless false --interactive npx tsx browse.ts --url "https://example.com" --headless false --interactive
Environment Variables: Environment Variables:
CAMOUFOX_PROFILE_PATH Custom profile directory (default: ~/.camoufox-profile/) CLOAKBROWSER_PROFILE_PATH Custom profile directory (default: ~/.cloakbrowser-profile/)
CAMOUFOX_HEADLESS Default headless mode (true/false) CLOAKBROWSER_HEADLESS Default headless mode (true/false)
`); `);
process.exit(args.help ? 0 : 1); process.exit(args.help ? 0 : 1);
} }
@@ -188,7 +182,6 @@ Environment Variables:
} }
} }
// Run if executed directly
const isMainModule = process.argv[1]?.includes('browse.ts'); const isMainModule = process.argv[1]?.includes('browse.ts');
if (isMainModule) { if (isMainModule) {
main(); main();

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env node
import fs from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
function fail(message, details) {
const payload = { error: message };
if (details) payload.details = details;
process.stderr.write(`${JSON.stringify(payload)}\n`);
process.exit(1);
}
async function main() {
try {
await import("cloakbrowser");
await import("playwright-core");
} catch (error) {
fail(
"Missing dependency/config: web-automation requires cloakbrowser and playwright-core.",
error instanceof Error ? error.message : String(error)
);
}
const browsePath = path.join(__dirname, "browse.ts");
const browseSource = fs.readFileSync(browsePath, "utf8");
if (!/launchPersistentContext/.test(browseSource) || !/from ['"]cloakbrowser['"]/.test(browseSource)) {
fail("browse.ts is not configured for CloakBrowser.");
}
process.stdout.write("OK: cloakbrowser + playwright-core installed\n");
process.stdout.write("OK: CloakBrowser integration detected in browse.ts\n");
}
main().catch((error) => {
fail("Install check failed.", error instanceof Error ? error.message : String(error));
});

View File

@@ -0,0 +1,188 @@
#!/usr/bin/env node
import fs from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
const DEFAULT_WAIT_MS = 5000;
const MAX_WAIT_MS = 20000;
const NAV_TIMEOUT_MS = 30000;
const EXTRA_CHALLENGE_WAIT_MS = 8000;
const CONTENT_LIMIT = 12000;
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
function fail(message, details) {
const payload = { error: message };
if (details) payload.details = details;
process.stderr.write(`${JSON.stringify(payload)}\n`);
process.exit(1);
}
function parseWaitTime(raw) {
const value = Number.parseInt(raw || `${DEFAULT_WAIT_MS}`, 10);
if (!Number.isFinite(value) || value < 0) return DEFAULT_WAIT_MS;
return Math.min(value, MAX_WAIT_MS);
}
function parseTarget(rawUrl) {
if (!rawUrl) {
fail("Missing URL. Usage: node extract.js <URL>");
}
let parsed;
try {
parsed = new URL(rawUrl);
} catch (error) {
fail("Invalid URL.", error.message);
}
if (!["http:", "https:"].includes(parsed.protocol)) {
fail("Only http and https URLs are allowed.");
}
return parsed.toString();
}
function ensureParentDir(filePath) {
if (!filePath) return;
fs.mkdirSync(path.dirname(filePath), { recursive: true });
}
function sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function detectChallenge(page) {
try {
return await page.evaluate(() => {
const text = (document.body?.innerText || "").toLowerCase();
return (
text.includes("checking your browser") ||
text.includes("just a moment") ||
text.includes("verify you are human") ||
text.includes("press and hold") ||
document.querySelector('iframe[src*="challenge"]') !== null ||
document.querySelector('iframe[src*="cloudflare"]') !== null
);
});
} catch {
return false;
}
}
async function loadCloakBrowser() {
try {
return await import("cloakbrowser");
} catch (error) {
fail(
"CloakBrowser is not installed for this skill. Run pnpm install in this skill's scripts directory first.",
error.message
);
}
}
async function runWithStderrLogs(fn) {
const originalLog = console.log;
const originalError = console.error;
console.log = (...args) => process.stderr.write(`${args.join(" ")}\n`);
console.error = (...args) => process.stderr.write(`${args.join(" ")}\n`);
try {
return await fn();
} finally {
console.log = originalLog;
console.error = originalError;
}
}
async function main() {
const requestedUrl = parseTarget(process.argv[2]);
const waitTime = parseWaitTime(process.env.WAIT_TIME);
const screenshotPath = process.env.SCREENSHOT_PATH || "";
const saveHtml = process.env.SAVE_HTML === "true";
const headless = process.env.HEADLESS !== "false";
const userAgent = process.env.USER_AGENT || undefined;
const startedAt = Date.now();
const { ensureBinary, launchContext } = await loadCloakBrowser();
let context;
try {
await runWithStderrLogs(() => ensureBinary());
context = await runWithStderrLogs(() => launchContext({
headless,
userAgent,
locale: "en-US",
viewport: { width: 1440, height: 900 },
humanize: true,
}));
const page = await context.newPage();
const response = await page.goto(requestedUrl, {
waitUntil: "domcontentloaded",
timeout: NAV_TIMEOUT_MS
});
await sleep(waitTime);
let challengeDetected = await detectChallenge(page);
if (challengeDetected) {
await sleep(EXTRA_CHALLENGE_WAIT_MS);
challengeDetected = await detectChallenge(page);
}
const extracted = await page.evaluate((contentLimit) => {
const bodyText = document.body?.innerText || "";
return {
finalUrl: window.location.href,
title: document.title || "",
content: bodyText.slice(0, contentLimit),
metaDescription:
document.querySelector('meta[name="description"]')?.content ||
document.querySelector('meta[property="og:description"]')?.content ||
""
};
}, CONTENT_LIMIT);
const result = {
requestedUrl,
finalUrl: extracted.finalUrl,
title: extracted.title,
content: extracted.content,
metaDescription: extracted.metaDescription,
status: response ? response.status() : null,
challengeDetected,
elapsedSeconds: ((Date.now() - startedAt) / 1000).toFixed(2)
};
if (screenshotPath) {
ensureParentDir(screenshotPath);
await page.screenshot({ path: screenshotPath, fullPage: false, timeout: 10000 });
result.screenshot = screenshotPath;
}
if (saveHtml) {
const htmlTarget = screenshotPath
? screenshotPath.replace(/\.[^.]+$/, ".html")
: path.resolve(__dirname, `page-${Date.now()}.html`);
ensureParentDir(htmlTarget);
fs.writeFileSync(htmlTarget, await page.content());
result.htmlFile = htmlTarget;
}
process.stdout.write(`${JSON.stringify(result, null, 2)}\n`);
await context.close();
} catch (error) {
if (context) {
try {
await context.close();
} catch {
// Ignore close errors after the primary failure.
}
}
fail("Scrape failed.", error.message);
}
}
main();

View File

@@ -0,0 +1,329 @@
#!/usr/bin/env npx tsx
import parseArgs from 'minimist';
import type { Page } from 'playwright-core';
import { launchBrowser } from './browse';
type Step =
| { action: 'goto'; url: string }
| { action: 'click'; selector?: string; text?: string; role?: string; name?: string }
| { action: 'type'; selector?: string; text: string }
| { action: 'press'; key: string; selector?: string }
| { action: 'wait'; ms: number }
| { action: 'screenshot'; path: string }
| { action: 'extract'; selector: string; count?: number };
function normalizeNavigationUrl(rawUrl: string): string {
let parsed: URL;
try {
parsed = new URL(rawUrl);
} catch {
throw new Error(`Invalid navigation URL: ${rawUrl}`);
}
if (!['http:', 'https:'].includes(parsed.protocol)) {
throw new Error(`Only http and https URLs are allowed in flow steps: ${rawUrl}`);
}
return parsed.toString();
}
function normalizeKey(k: string): string {
if (!k) return 'Enter';
const lower = k.toLowerCase();
if (lower === 'enter' || lower === 'return') return 'Enter';
if (lower === 'tab') return 'Tab';
if (lower === 'escape' || lower === 'esc') return 'Escape';
return k;
}
function splitInstructions(instruction: string): string[] {
return instruction
.split(/\bthen\b|;/gi)
.map((s) => s.trim())
.filter(Boolean);
}
function parseInstruction(instruction: string): Step[] {
const parts = splitInstructions(instruction);
const steps: Step[] = [];
for (const p of parts) {
// go to https://...
const goto = p.match(/^(?:go to|open|navigate to)\s+(https?:\/\/\S+)/i);
if (goto) {
steps.push({ action: 'goto', url: normalizeNavigationUrl(goto[1]) });
continue;
}
// click on "text" or click #selector or click button "name"
const clickRole = p.match(/^click\s+(button|link|textbox|img|image|tab)\s+"([^"]+)"$/i);
if (clickRole) {
const role = clickRole[1].toLowerCase() === 'image' ? 'img' : clickRole[1].toLowerCase();
steps.push({ action: 'click', role, name: clickRole[2] });
continue;
}
const clickText = p.match(/^click(?: on)?\s+"([^"]+)"/i);
if (clickText) {
steps.push({ action: 'click', text: clickText[1] });
continue;
}
const clickSelector = p.match(/^click(?: on)?\s+(#[\w-]+|\.[\w-]+|[a-z]+\[[^\]]+\])/i);
if (clickSelector) {
steps.push({ action: 'click', selector: clickSelector[1] });
continue;
}
// type "text" [in selector]
const typeInto = p.match(/^type\s+"([^"]+)"\s+in\s+(.+)$/i);
if (typeInto) {
steps.push({ action: 'type', text: typeInto[1], selector: typeInto[2].trim() });
continue;
}
const typeOnly = p.match(/^type\s+"([^"]+)"$/i);
if (typeOnly) {
steps.push({ action: 'type', text: typeOnly[1] });
continue;
}
// press enter [in selector]
const pressIn = p.match(/^press\s+(\w+)\s+in\s+(.+)$/i);
if (pressIn) {
steps.push({ action: 'press', key: normalizeKey(pressIn[1]), selector: pressIn[2].trim() });
continue;
}
const pressOnly = p.match(/^press\s+(\w+)$/i);
if (pressOnly) {
steps.push({ action: 'press', key: normalizeKey(pressOnly[1]) });
continue;
}
// wait 2s / wait 500ms
const waitS = p.match(/^wait\s+(\d+)\s*s(?:ec(?:onds?)?)?$/i);
if (waitS) {
steps.push({ action: 'wait', ms: parseInt(waitS[1], 10) * 1000 });
continue;
}
const waitMs = p.match(/^wait\s+(\d+)\s*ms$/i);
if (waitMs) {
steps.push({ action: 'wait', ms: parseInt(waitMs[1], 10) });
continue;
}
// screenshot path
const shot = p.match(/^screenshot(?: to)?\s+(.+)$/i);
if (shot) {
steps.push({ action: 'screenshot', path: shot[1].trim() });
continue;
}
throw new Error(`Could not parse step: "${p}"`);
}
return steps;
}
function validateSteps(steps: Step[]): Step[] {
return steps.map((step) =>
step.action === 'goto'
? {
...step,
url: normalizeNavigationUrl(step.url),
}
: step
);
}
function escapeRegExp(value: string): string {
return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}
function isLikelyLoginText(text: string): boolean {
return /(login|accedi|sign\s*in|entra)/i.test(text);
}
async function clickByText(page: Page, text: string): Promise<boolean> {
const patterns = [new RegExp(`^${escapeRegExp(text)}$`, 'i'), new RegExp(escapeRegExp(text), 'i')];
for (const pattern of patterns) {
const targets = [
page.getByRole('button', { name: pattern }).first(),
page.getByRole('link', { name: pattern }).first(),
page.getByText(pattern).first(),
];
for (const target of targets) {
if (await target.count()) {
try {
await target.click({ timeout: 8000 });
return true;
} catch {
// keep trying next candidate
}
}
}
}
return false;
}
async function fallbackLoginNavigation(page: Page, requestedText: string): Promise<boolean> {
if (!isLikelyLoginText(requestedText)) return false;
const current = new URL(page.url());
const candidateLinks = await page.evaluate(() => {
const loginTerms = ['login', 'accedi', 'sign in', 'entra'];
const anchors = Array.from(document.querySelectorAll('a[href], a[onclick], button[onclick]')) as Array<HTMLAnchorElement | HTMLButtonElement>;
return anchors
.map((el) => {
const text = (el.textContent || '').trim().toLowerCase();
const href = (el as HTMLAnchorElement).getAttribute('href') || '';
return { text, href };
})
.filter((x) => x.text && loginTerms.some((t) => x.text.includes(t)))
.map((x) => x.href)
.filter(Boolean);
});
// Prefer real URLs (not javascript:)
const realCandidate = candidateLinks.find((h) => /login|account\/login/i.test(h) && !h.startsWith('javascript:'));
if (realCandidate) {
const target = new URL(realCandidate, page.url()).toString();
await page.goto(target, { waitUntil: 'domcontentloaded', timeout: 60000 });
return true;
}
// Site-specific fallback for Corriere
if (/corriere\.it$/i.test(current.hostname) || /\.corriere\.it$/i.test(current.hostname)) {
await page.goto('https://www.corriere.it/account/login', {
waitUntil: 'domcontentloaded',
timeout: 60000,
});
return true;
}
return false;
}
async function typeInBestTarget(page: Page, text: string, selector?: string) {
if (selector) {
await page.locator(selector).first().click({ timeout: 10000 });
await page.locator(selector).first().fill(text);
return;
}
const loc = page.locator('input[name="q"], input[type="search"], input[type="text"], textarea').first();
await loc.click({ timeout: 10000 });
await loc.fill(text);
}
async function pressOnTarget(page: Page, key: string, selector?: string) {
if (selector) {
await page.locator(selector).first().press(key);
return;
}
await page.keyboard.press(key);
}
async function runSteps(page: Page, steps: Step[]) {
for (const step of steps) {
switch (step.action) {
case 'goto':
await page.goto(normalizeNavigationUrl(step.url), {
waitUntil: 'domcontentloaded',
timeout: 60000,
});
break;
case 'click':
if (step.selector) {
await page.locator(step.selector).first().click({ timeout: 15000 });
} else if (step.role && step.name) {
await page.getByRole(step.role as any, { name: new RegExp(escapeRegExp(step.name), 'i') }).first().click({ timeout: 15000 });
} else if (step.text) {
const clicked = await clickByText(page, step.text);
if (!clicked) {
const recovered = await fallbackLoginNavigation(page, step.text);
if (!recovered) {
throw new Error(`Could not click target text: ${step.text}`);
}
}
} else {
throw new Error('click step missing selector/text/role');
}
try {
await page.waitForLoadState('domcontentloaded', { timeout: 10000 });
} catch {
// no navigation is fine
}
break;
case 'type':
await typeInBestTarget(page, step.text, step.selector);
break;
case 'press':
await pressOnTarget(page, step.key, step.selector);
break;
case 'wait':
await page.waitForTimeout(step.ms);
break;
case 'screenshot':
await page.screenshot({ path: step.path, fullPage: true });
break;
case 'extract': {
const items = await page.locator(step.selector).allTextContents();
const out = items.slice(0, step.count ?? items.length).map((t) => t.trim()).filter(Boolean);
console.log(JSON.stringify(out, null, 2));
break;
}
default:
throw new Error('Unknown step');
}
}
}
async function main() {
const args = parseArgs(process.argv.slice(2), {
string: ['instruction', 'steps'],
boolean: ['headless', 'help'],
default: { headless: true },
alias: { i: 'instruction', s: 'steps', h: 'help' },
});
if (args.help || (!args.instruction && !args.steps)) {
console.log(`
General Web Flow Runner (CloakBrowser)
Usage:
npx tsx flow.ts --instruction "go to https://example.com then type \"hello\" then press enter"
npx tsx flow.ts --steps '[{"action":"goto","url":"https://example.com"}]'
Supported natural steps:
- go to/open/navigate to <url>
- click on "Text"
- click <css-selector>
- type "text"
- type "text" in <css-selector>
- press <key>
- press <key> in <css-selector>
- wait <N>s | wait <N>ms
- screenshot <path>
`);
process.exit(args.help ? 0 : 1);
}
const steps = validateSteps(args.steps ? JSON.parse(args.steps) : parseInstruction(args.instruction));
const browser = await launchBrowser({ headless: args.headless });
const page = await browser.newPage();
try {
await runSteps(page, steps);
console.log('Flow complete. Final URL:', page.url());
} finally {
await browser.close();
}
}
main().catch((e) => {
console.error('Error:', e instanceof Error ? e.message : e);
process.exit(1);
});

View File

@@ -1,27 +1,36 @@
{ {
"name": "web-automation-scripts", "name": "web-automation-scripts",
"version": "1.0.0", "version": "1.0.0",
"description": "Web browsing and scraping scripts using Camoufox", "description": "Web browsing and scraping scripts using CloakBrowser",
"type": "module", "type": "module",
"scripts": { "scripts": {
"check-install": "node check-install.js",
"extract": "node extract.js",
"browse": "tsx browse.ts", "browse": "tsx browse.ts",
"auth": "tsx auth.ts",
"flow": "tsx flow.ts",
"scrape": "tsx scrape.ts", "scrape": "tsx scrape.ts",
"fetch-browser": "npx camoufox-js fetch" "typecheck": "tsc --noEmit -p tsconfig.json",
"lint": "pnpm run typecheck && node --check check-install.js && node --check extract.js",
"fetch-browser": "npx cloakbrowser install"
}, },
"dependencies": { "dependencies": {
"camoufox-js": "^0.8.5",
"playwright-core": "^1.40.0",
"turndown": "^7.1.2",
"turndown-plugin-gfm": "^1.0.2",
"@mozilla/readability": "^0.5.0", "@mozilla/readability": "^0.5.0",
"better-sqlite3": "^12.6.2",
"cloakbrowser": "^0.3.22",
"jsdom": "^24.0.0", "jsdom": "^24.0.0",
"minimist": "^1.2.8" "minimist": "^1.2.8",
"playwright-core": "^1.59.1",
"turndown": "^7.1.2",
"turndown-plugin-gfm": "^1.0.2"
}, },
"devDependencies": { "devDependencies": {
"typescript": "^5.3.0",
"@types/turndown": "^5.0.4",
"@types/jsdom": "^21.1.6", "@types/jsdom": "^21.1.6",
"@types/minimist": "^1.2.5", "@types/minimist": "^1.2.5",
"tsx": "^4.7.0" "@types/turndown": "^5.0.4",
} "esbuild": "0.27.0",
"tsx": "^4.7.0",
"typescript": "^5.3.0"
},
"packageManager": "pnpm@10.18.1+sha512.77a884a165cbba2d8d1c19e3b4880eee6d2fcabd0d879121e282196b80042351d5eb3ca0935fa599da1dc51265cc68816ad2bddd2a2de5ea9fdf92adbec7cd34"
} }

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,174 @@
#!/usr/bin/env npx tsx
import { mkdirSync, writeFileSync } from 'fs';
import { dirname, resolve } from 'path';
import { getPage } from './browse.js';
type NavResult = {
requestedUrl: string;
url: string;
status: number | null;
title: string;
error?: string;
};
type RouteCheck = {
route: string;
result: NavResult;
heading: string | null;
};
const DEFAULT_BASE_URL = 'http://localhost:3000';
const DEFAULT_REPORT_PATH = resolve(process.cwd(), 'scan-local-app.md');
function env(name: string): string | undefined {
const value = process.env[name]?.trim();
return value ? value : undefined;
}
function getRoutes(baseUrl: string): string[] {
const routeList = env('SCAN_ROUTES');
if (routeList) {
return routeList
.split(',')
.map((route) => route.trim())
.filter(Boolean)
.map((route) => new URL(route, baseUrl).toString());
}
return [baseUrl];
}
async function gotoWithStatus(page: any, url: string): Promise<NavResult> {
const response = await page
.goto(url, { waitUntil: 'domcontentloaded', timeout: 60000 })
.catch((error: unknown) => ({ error }));
if (response?.error) {
return {
requestedUrl: url,
url: page.url(),
status: null,
title: await page.title().catch(() => ''),
error: String(response.error),
};
}
return {
requestedUrl: url,
url: page.url(),
status: response ? response.status() : null,
title: await page.title().catch(() => ''),
};
}
async function textOrNull(page: any, selector: string): Promise<string | null> {
const locator = page.locator(selector).first();
try {
if ((await locator.count()) === 0) return null;
const value = await locator.textContent();
return value ? value.trim().replace(/\s+/g, ' ') : null;
} catch {
return null;
}
}
async function loginIfConfigured(page: any, baseUrl: string, lines: string[]) {
const loginPath = env('SCAN_LOGIN_PATH');
const username = env('SCAN_USERNAME') ?? env('CLOAKBROWSER_USERNAME');
const password = env('SCAN_PASSWORD') ?? env('CLOAKBROWSER_PASSWORD');
const usernameSelector = env('SCAN_USERNAME_SELECTOR') ?? 'input[type="email"], input[name="email"]';
const passwordSelector = env('SCAN_PASSWORD_SELECTOR') ?? 'input[type="password"], input[name="password"]';
const submitSelector = env('SCAN_SUBMIT_SELECTOR') ?? 'button[type="submit"], input[type="submit"]';
if (!loginPath) {
lines.push('## Login');
lines.push('- Skipped: set `SCAN_LOGIN_PATH` to enable login smoke checks.');
lines.push('');
return;
}
const loginUrl = new URL(loginPath, baseUrl).toString();
lines.push('## Login');
lines.push(`- Login URL: ${loginUrl}`);
await gotoWithStatus(page, loginUrl);
if (!username || !password) {
lines.push('- Skipped: set `SCAN_USERNAME`/`SCAN_PASSWORD` or `CLOAKBROWSER_USERNAME`/`CLOAKBROWSER_PASSWORD`.');
lines.push('');
return;
}
await page.locator(usernameSelector).first().fill(username);
await page.locator(passwordSelector).first().fill(password);
await page.locator(submitSelector).first().click();
await page.waitForTimeout(2500);
lines.push(`- After submit URL: ${page.url()}`);
lines.push(`- Cookie count: ${(await page.context().cookies()).length}`);
lines.push('');
}
async function checkRoutes(page: any, baseUrl: string, lines: string[]) {
const routes = getRoutes(baseUrl);
const routeChecks: RouteCheck[] = [];
for (const url of routes) {
const result = await gotoWithStatus(page, url);
const heading = await textOrNull(page, 'h1');
routeChecks.push({
route: url,
result,
heading,
});
}
lines.push('## Route Checks');
for (const check of routeChecks) {
const relativeUrl = check.route.startsWith(baseUrl) ? check.route.slice(baseUrl.length) || '/' : check.route;
const finalPath = check.result.url.startsWith(baseUrl)
? check.result.url.slice(baseUrl.length) || '/'
: check.result.url;
const suffix = check.heading ? `, h1="${check.heading}"` : '';
const errorSuffix = check.result.error ? `, error="${check.result.error}"` : '';
lines.push(
`- ${relativeUrl} → status ${check.result.status ?? 'ERR'} (final ${finalPath})${suffix}${errorSuffix}`
);
}
lines.push('');
}
async function main() {
const baseUrl = env('SCAN_BASE_URL') ?? DEFAULT_BASE_URL;
const reportPath = resolve(env('SCAN_REPORT_PATH') ?? DEFAULT_REPORT_PATH);
const headless = (env('SCAN_HEADLESS') ?? env('CLOAKBROWSER_HEADLESS') ?? 'true') === 'true';
const { page, browser } = await getPage({ headless });
const lines: string[] = [];
lines.push('# Web Automation Scan (local)');
lines.push('');
lines.push(`- Base URL: ${baseUrl}`);
lines.push(`- Timestamp: ${new Date().toISOString()}`);
lines.push(`- Headless: ${headless}`);
lines.push(`- Report Path: ${reportPath}`);
lines.push('');
try {
await loginIfConfigured(page, baseUrl, lines);
await checkRoutes(page, baseUrl, lines);
lines.push('## Notes');
lines.push('- This generic smoke helper records route availability and top-level headings for a local app.');
lines.push('- Configure login and route coverage with `SCAN_*` environment variables.');
} finally {
await browser.close();
}
mkdirSync(dirname(reportPath), { recursive: true });
writeFileSync(reportPath, `${lines.join('\n')}\n`, 'utf-8');
console.log(`Report written to ${reportPath}`);
}
main().catch((error) => {
console.error(error);
process.exitCode = 1;
});

View File

@@ -1,28 +1,25 @@
import { Camoufox } from 'camoufox-js'; import { launchPersistentContext } from 'cloakbrowser';
import { homedir } from 'os'; import { homedir } from 'os';
import { join } from 'path'; import { join } from 'path';
import { mkdirSync, existsSync } from 'fs'; import { mkdirSync, existsSync } from 'fs';
async function test() { async function test() {
const profilePath = join(homedir(), '.camoufox-profile'); const profilePath = join(homedir(), '.cloakbrowser-profile');
if (!existsSync(profilePath)) { if (!existsSync(profilePath)) {
mkdirSync(profilePath, { recursive: true }); mkdirSync(profilePath, { recursive: true });
} }
console.log('Profile path:', profilePath); console.log('Profile path:', profilePath);
console.log('Launching with full options...'); console.log('Launching CloakBrowser with full options...');
const browser = await Camoufox({ const browser = await launchPersistentContext({
headless: true, headless: true,
user_data_dir: profilePath, userDataDir: profilePath,
// humanize: 1.5, // Test without this first humanize: true,
// geoip: true, // Test without this first
// enable_cache: true,
// block_webrtc: false,
}); });
console.log('Browser launched'); console.log('Browser launched');
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
console.log('Page created'); console.log('Page created');
await page.goto('https://github.com', { timeout: 30000 }); await page.goto('https://github.com', { timeout: 30000 });

View File

@@ -1,10 +1,11 @@
import { Camoufox } from 'camoufox-js'; import { launch } from 'cloakbrowser';
async function test() { async function test() {
console.log('Launching Camoufox with minimal config...'); console.log('Launching CloakBrowser with minimal config...');
const browser = await Camoufox({ const browser = await launch({
headless: true, headless: true,
humanize: true,
}); });
console.log('Browser launched'); console.log('Browser launched');

View File

@@ -1,24 +1,25 @@
import { Camoufox } from 'camoufox-js'; import { launchPersistentContext } from 'cloakbrowser';
import { homedir } from 'os'; import { homedir } from 'os';
import { join } from 'path'; import { join } from 'path';
import { mkdirSync, existsSync } from 'fs'; import { mkdirSync, existsSync } from 'fs';
async function test() { async function test() {
const profilePath = join(homedir(), '.camoufox-profile'); const profilePath = join(homedir(), '.cloakbrowser-profile');
if (!existsSync(profilePath)) { if (!existsSync(profilePath)) {
mkdirSync(profilePath, { recursive: true }); mkdirSync(profilePath, { recursive: true });
} }
console.log('Profile path:', profilePath); console.log('Profile path:', profilePath);
console.log('Launching with user_data_dir...'); console.log('Launching with persistent userDataDir...');
const browser = await Camoufox({ const browser = await launchPersistentContext({
headless: true, headless: true,
user_data_dir: profilePath, userDataDir: profilePath,
humanize: true,
}); });
console.log('Browser launched'); console.log('Browser launched');
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
console.log('Page created'); console.log('Page created');
await page.goto('https://example.com', { timeout: 30000 }); await page.goto('https://example.com', { timeout: 30000 });

View File

@@ -1,48 +1,99 @@
--- ---
name: web-automation name: web-automation
description: Browse and scrape web pages using Playwright with Camoufox anti-detection browser. Use when automating web workflows, extracting page content to markdown, handling authenticated sessions, or scraping websites with bot protection. description: Browse and scrape web pages using Playwright-compatible CloakBrowser. Use when automating web workflows, extracting rendered page content, handling authenticated sessions, or running multi-step browser flows.
--- ---
# Web Automation with Camoufox (Codex) # Web Automation with CloakBrowser (Codex)
Automated web browsing and scraping using Playwright with Camoufox anti-detection browser. Automated web browsing and scraping using Playwright-compatible CloakBrowser with two execution paths:
- one-shot extraction via `extract.js`
- broader stateful automation via `auth.ts`, `browse.ts`, `flow.ts`, `scan-local-app.ts`, and `scrape.ts`
## Requirements ## Requirements
- Node.js 20+ - Node.js 20+
- pnpm - pnpm
- Network access to download browser binaries - Network access to download the CloakBrowser binary on first use
## First-Time Setup ## First-Time Setup
```bash ```bash
cd ~/.codex/skills/web-automation/scripts cd ~/.codex/skills/web-automation/scripts
pnpm install pnpm install
npx camoufox-js fetch npx cloakbrowser install
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
```
## Updating CloakBrowser
```bash
cd ~/.codex/skills/web-automation/scripts
pnpm up cloakbrowser playwright-core
npx cloakbrowser install
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
``` ```
## Prerequisite Check (MANDATORY) ## Prerequisite Check (MANDATORY)
Before running any automation, verify Playwright + Camoufox dependencies are installed and scripts are configured to use Camoufox. Before running automation, verify CloakBrowser and Playwright Core are installed and wired correctly.
```bash ```bash
cd ~/.codex/skills/web-automation/scripts cd ~/.codex/skills/web-automation/scripts
node -e "require.resolve('playwright-core/package.json');require.resolve('camoufox-js/package.json');console.log('OK: playwright-core + camoufox-js installed')" node check-install.js
node -e "const fs=require('fs');const t=fs.readFileSync('browse.ts','utf8');if(!/camoufox-js/.test(t)){throw new Error('browse.ts is not configured for Camoufox')}console.log('OK: Camoufox integration detected in browse.ts')"
``` ```
If any check fails, stop and return: If the check fails, stop and return:
"Missing dependency/config: web-automation requires `playwright-core` + `camoufox-js` and Camoufox-based scripts. Run setup in this skill, then retry." "Missing dependency/config: web-automation requires `cloakbrowser` and `playwright-core` with CloakBrowser-based scripts. Run setup in this skill, then retry."
If runtime fails with missing native bindings for `better-sqlite3` or `esbuild`, run:
```bash
cd ~/.codex/skills/web-automation/scripts
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
```
## When To Use Which Command
- Use `node extract.js "<URL>"` for a one-shot rendered fetch with JSON output.
- Use `npx tsx scrape.ts ...` when you need markdown extraction, Readability cleanup, or selector-based scraping.
- Use `npx tsx browse.ts ...`, `auth.ts`, or `flow.ts` when the task needs login handling, persistent sessions, clicks, typing, screenshots, or multi-step navigation.
- Use `npx tsx scan-local-app.ts` when you need a configurable local-app smoke pass driven by `SCAN_*` and `CLOAKBROWSER_*` environment variables.
## Quick Reference ## Quick Reference
- Install check: `node check-install.js`
- One-shot JSON extract: `node extract.js "https://example.com"`
- Browse page: `npx tsx browse.ts --url "https://example.com"` - Browse page: `npx tsx browse.ts --url "https://example.com"`
- Scrape markdown: `npx tsx scrape.ts --url "https://example.com" --mode main --output page.md` - Scrape markdown: `npx tsx scrape.ts --url "https://example.com" --mode main --output page.md`
- Authenticate: `npx tsx auth.ts --url "https://example.com/login"` - Authenticate: `npx tsx auth.ts --url "https://example.com/login"`
- Natural-language flow: `npx tsx flow.ts --instruction 'go to https://example.com then click on "Login" then type "user@example.com" in #email then press enter'`
- Local app smoke scan: `SCAN_BASE_URL=http://localhost:3000 SCAN_ROUTES=/,/dashboard npx tsx scan-local-app.ts`
## Local App Smoke Scan
`scan-local-app.ts` is intentionally generic. Configure it with environment variables instead of editing the file:
- `SCAN_BASE_URL`
- `SCAN_LOGIN_PATH`
- `SCAN_USERNAME`
- `SCAN_PASSWORD`
- `SCAN_USERNAME_SELECTOR`
- `SCAN_PASSWORD_SELECTOR`
- `SCAN_SUBMIT_SELECTOR`
- `SCAN_ROUTES`
- `SCAN_REPORT_PATH`
- `SCAN_HEADLESS`
If `SCAN_USERNAME` or `SCAN_PASSWORD` are omitted, the script falls back to `CLOAKBROWSER_USERNAME` and `CLOAKBROWSER_PASSWORD`.
## Notes ## Notes
- Sessions persist in Camoufox profile storage. - Sessions persist in CloakBrowser profile storage.
- Use `--wait` for dynamic pages. - Use `--wait` for dynamic pages.
- Use `--mode selector --selector "..."` for targeted extraction. - Use `--mode selector --selector "..."` for targeted extraction.
- `extract.js` keeps a bounded stealth/rendered fetch path without needing a long-lived automation session.

View File

@@ -41,8 +41,8 @@ function getCredentials(options?: {
username?: string; username?: string;
password?: string; password?: string;
}): { username: string; password: string } | null { }): { username: string; password: string } | null {
const username = options?.username || process.env.CAMOUFOX_USERNAME; const username = options?.username || process.env.CLOAKBROWSER_USERNAME;
const password = options?.password || process.env.CAMOUFOX_PASSWORD; const password = options?.password || process.env.CLOAKBROWSER_PASSWORD;
if (!username || !password) { if (!username || !password) {
return null; return null;
@@ -450,7 +450,7 @@ export async function navigateAuthenticated(
if (!credentials) { if (!credentials) {
throw new Error( throw new Error(
'Authentication required but no credentials provided. ' + 'Authentication required but no credentials provided. ' +
'Set CAMOUFOX_USERNAME and CAMOUFOX_PASSWORD environment variables.' 'Set CLOAKBROWSER_USERNAME and CLOAKBROWSER_PASSWORD environment variables.'
); );
} }
@@ -504,8 +504,8 @@ Usage:
Options: Options:
-u, --url <url> URL to authenticate (required) -u, --url <url> URL to authenticate (required)
-t, --type <type> Auth type: auto, form, or msal (default: auto) -t, --type <type> Auth type: auto, form, or msal (default: auto)
--username <user> Username/email (or set CAMOUFOX_USERNAME env var) --username <user> Username/email (or set CLOAKBROWSER_USERNAME env var)
--password <pass> Password (or set CAMOUFOX_PASSWORD env var) --password <pass> Password (or set CLOAKBROWSER_PASSWORD env var)
--headless <bool> Run in headless mode (default: false for auth) --headless <bool> Run in headless mode (default: false for auth)
-h, --help Show this help message -h, --help Show this help message
@@ -515,8 +515,8 @@ Auth Types:
msal Microsoft SSO (login.microsoftonline.com) msal Microsoft SSO (login.microsoftonline.com)
Environment Variables: Environment Variables:
CAMOUFOX_USERNAME Default username/email for authentication CLOAKBROWSER_USERNAME Default username/email for authentication
CAMOUFOX_PASSWORD Default password for authentication CLOAKBROWSER_PASSWORD Default password for authentication
Examples: Examples:
# Interactive login (no credentials, opens browser) # Interactive login (no credentials, opens browser)
@@ -527,11 +527,11 @@ Examples:
--username "user@example.com" --password "secret" --username "user@example.com" --password "secret"
# Microsoft SSO login # Microsoft SSO login
CAMOUFOX_USERNAME=user@company.com CAMOUFOX_PASSWORD=secret \\ CLOAKBROWSER_USERNAME=user@company.com CLOAKBROWSER_PASSWORD=secret \\
npx tsx auth.ts --url "https://internal.company.com" --type msal npx tsx auth.ts --url "https://internal.company.com" --type msal
Notes: Notes:
- Session is saved to ~/.camoufox-profile/ for persistence - Session is saved to ~/.cloakbrowser-profile/ for persistence
- After successful auth, subsequent browses will be authenticated - After successful auth, subsequent browses will be authenticated
- Use --headless false if you need to handle MFA manually - Use --headless false if you need to handle MFA manually
`); `);

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env npx tsx #!/usr/bin/env npx tsx
/** /**
* Browser launcher using Camoufox with persistent profile * Browser launcher using CloakBrowser with persistent profile
* *
* Usage: * Usage:
* npx tsx browse.ts --url "https://example.com" * npx tsx browse.ts --url "https://example.com"
@@ -9,14 +9,13 @@
* npx tsx browse.ts --url "https://example.com" --headless false --wait 5000 * npx tsx browse.ts --url "https://example.com" --headless false --wait 5000
*/ */
import { Camoufox } from 'camoufox-js'; import { launchPersistentContext } from 'cloakbrowser';
import { homedir } from 'os'; import { homedir } from 'os';
import { join } from 'path'; import { join } from 'path';
import { existsSync, mkdirSync } from 'fs'; import { existsSync, mkdirSync } from 'fs';
import parseArgs from 'minimist'; import parseArgs from 'minimist';
import type { Page, BrowserContext } from 'playwright-core'; import type { Page, BrowserContext } from 'playwright-core';
// Types
interface BrowseOptions { interface BrowseOptions {
url: string; url: string;
headless?: boolean; headless?: boolean;
@@ -33,55 +32,54 @@ interface BrowseResult {
screenshotPath?: string; screenshotPath?: string;
} }
// Get profile directory function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
const getProfilePath = (): string => { const getProfilePath = (): string => {
const customPath = process.env.CAMOUFOX_PROFILE_PATH; const customPath = process.env.CLOAKBROWSER_PROFILE_PATH;
if (customPath) return customPath; if (customPath) return customPath;
const profileDir = join(homedir(), '.camoufox-profile'); const profileDir = join(homedir(), '.cloakbrowser-profile');
if (!existsSync(profileDir)) { if (!existsSync(profileDir)) {
mkdirSync(profileDir, { recursive: true }); mkdirSync(profileDir, { recursive: true });
} }
return profileDir; return profileDir;
}; };
// Launch browser with persistent profile
export async function launchBrowser(options: { export async function launchBrowser(options: {
headless?: boolean; headless?: boolean;
}): Promise<BrowserContext> { }): Promise<BrowserContext> {
const profilePath = getProfilePath(); const profilePath = getProfilePath();
const headless = const envHeadless = process.env.CLOAKBROWSER_HEADLESS;
options.headless ?? const headless = options.headless ?? (envHeadless ? envHeadless === 'true' : true);
(process.env.CAMOUFOX_HEADLESS ? process.env.CAMOUFOX_HEADLESS === 'true' : true);
console.log(`Using profile: ${profilePath}`); console.log(`Using profile: ${profilePath}`);
console.log(`Headless mode: ${headless}`); console.log(`Headless mode: ${headless}`);
const browser = await Camoufox({ const context = await launchPersistentContext({
user_data_dir: profilePath, userDataDir: profilePath,
headless, headless,
humanize: true,
}); });
return browser; return context;
} }
// Browse to URL and optionally take screenshot
export async function browse(options: BrowseOptions): Promise<BrowseResult> { export async function browse(options: BrowseOptions): Promise<BrowseResult> {
const browser = await launchBrowser({ headless: options.headless }); const browser = await launchBrowser({ headless: options.headless });
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
try { try {
// Navigate to URL
console.log(`Navigating to: ${options.url}`); console.log(`Navigating to: ${options.url}`);
await page.goto(options.url, { await page.goto(options.url, {
timeout: options.timeout ?? 60000, timeout: options.timeout ?? 60000,
waitUntil: 'domcontentloaded', waitUntil: 'domcontentloaded',
}); });
// Wait if specified
if (options.wait) { if (options.wait) {
console.log(`Waiting ${options.wait}ms...`); console.log(`Waiting ${options.wait}ms...`);
await page.waitForTimeout(options.wait); await sleep(options.wait);
} }
const result: BrowseResult = { const result: BrowseResult = {
@@ -92,7 +90,6 @@ export async function browse(options: BrowseOptions): Promise<BrowseResult> {
console.log(`Page title: ${result.title}`); console.log(`Page title: ${result.title}`);
console.log(`Final URL: ${result.url}`); console.log(`Final URL: ${result.url}`);
// Take screenshot if requested
if (options.screenshot) { if (options.screenshot) {
const outputPath = options.output ?? 'screenshot.png'; const outputPath = options.output ?? 'screenshot.png';
await page.screenshot({ path: outputPath, fullPage: true }); await page.screenshot({ path: outputPath, fullPage: true });
@@ -100,11 +97,10 @@ export async function browse(options: BrowseOptions): Promise<BrowseResult> {
console.log(`Screenshot saved: ${outputPath}`); console.log(`Screenshot saved: ${outputPath}`);
} }
// If interactive mode, keep browser open
if (options.interactive) { if (options.interactive) {
console.log('\nInteractive mode - browser will stay open.'); console.log('\nInteractive mode - browser will stay open.');
console.log('Press Ctrl+C to close.'); console.log('Press Ctrl+C to close.');
await new Promise(() => {}); // Keep running await new Promise(() => {});
} }
return result; return result;
@@ -115,16 +111,14 @@ export async function browse(options: BrowseOptions): Promise<BrowseResult> {
} }
} }
// Export page for use in other scripts
export async function getPage(options?: { export async function getPage(options?: {
headless?: boolean; headless?: boolean;
}): Promise<{ page: Page; browser: BrowserContext }> { }): Promise<{ page: Page; browser: BrowserContext }> {
const browser = await launchBrowser({ headless: options?.headless }); const browser = await launchBrowser({ headless: options?.headless });
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
return { page, browser }; return { page, browser };
} }
// CLI entry point
async function main() { async function main() {
const args = parseArgs(process.argv.slice(2), { const args = parseArgs(process.argv.slice(2), {
string: ['url', 'output'], string: ['url', 'output'],
@@ -145,7 +139,7 @@ async function main() {
if (args.help || !args.url) { if (args.help || !args.url) {
console.log(` console.log(`
Web Browser with Camoufox Web Browser with CloakBrowser
Usage: Usage:
npx tsx browse.ts --url <url> [options] npx tsx browse.ts --url <url> [options]
@@ -166,8 +160,8 @@ Examples:
npx tsx browse.ts --url "https://example.com" --headless false --interactive npx tsx browse.ts --url "https://example.com" --headless false --interactive
Environment Variables: Environment Variables:
CAMOUFOX_PROFILE_PATH Custom profile directory (default: ~/.camoufox-profile/) CLOAKBROWSER_PROFILE_PATH Custom profile directory (default: ~/.cloakbrowser-profile/)
CAMOUFOX_HEADLESS Default headless mode (true/false) CLOAKBROWSER_HEADLESS Default headless mode (true/false)
`); `);
process.exit(args.help ? 0 : 1); process.exit(args.help ? 0 : 1);
} }
@@ -188,7 +182,6 @@ Environment Variables:
} }
} }
// Run if executed directly
const isMainModule = process.argv[1]?.includes('browse.ts'); const isMainModule = process.argv[1]?.includes('browse.ts');
if (isMainModule) { if (isMainModule) {
main(); main();

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env node
import fs from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
function fail(message, details) {
const payload = { error: message };
if (details) payload.details = details;
process.stderr.write(`${JSON.stringify(payload)}\n`);
process.exit(1);
}
async function main() {
try {
await import("cloakbrowser");
await import("playwright-core");
} catch (error) {
fail(
"Missing dependency/config: web-automation requires cloakbrowser and playwright-core.",
error instanceof Error ? error.message : String(error)
);
}
const browsePath = path.join(__dirname, "browse.ts");
const browseSource = fs.readFileSync(browsePath, "utf8");
if (!/launchPersistentContext/.test(browseSource) || !/from ['"]cloakbrowser['"]/.test(browseSource)) {
fail("browse.ts is not configured for CloakBrowser.");
}
process.stdout.write("OK: cloakbrowser + playwright-core installed\n");
process.stdout.write("OK: CloakBrowser integration detected in browse.ts\n");
}
main().catch((error) => {
fail("Install check failed.", error instanceof Error ? error.message : String(error));
});

View File

@@ -0,0 +1,188 @@
#!/usr/bin/env node
import fs from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
const DEFAULT_WAIT_MS = 5000;
const MAX_WAIT_MS = 20000;
const NAV_TIMEOUT_MS = 30000;
const EXTRA_CHALLENGE_WAIT_MS = 8000;
const CONTENT_LIMIT = 12000;
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
function fail(message, details) {
const payload = { error: message };
if (details) payload.details = details;
process.stderr.write(`${JSON.stringify(payload)}\n`);
process.exit(1);
}
function parseWaitTime(raw) {
const value = Number.parseInt(raw || `${DEFAULT_WAIT_MS}`, 10);
if (!Number.isFinite(value) || value < 0) return DEFAULT_WAIT_MS;
return Math.min(value, MAX_WAIT_MS);
}
function parseTarget(rawUrl) {
if (!rawUrl) {
fail("Missing URL. Usage: node extract.js <URL>");
}
let parsed;
try {
parsed = new URL(rawUrl);
} catch (error) {
fail("Invalid URL.", error.message);
}
if (!["http:", "https:"].includes(parsed.protocol)) {
fail("Only http and https URLs are allowed.");
}
return parsed.toString();
}
function ensureParentDir(filePath) {
if (!filePath) return;
fs.mkdirSync(path.dirname(filePath), { recursive: true });
}
function sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function detectChallenge(page) {
try {
return await page.evaluate(() => {
const text = (document.body?.innerText || "").toLowerCase();
return (
text.includes("checking your browser") ||
text.includes("just a moment") ||
text.includes("verify you are human") ||
text.includes("press and hold") ||
document.querySelector('iframe[src*="challenge"]') !== null ||
document.querySelector('iframe[src*="cloudflare"]') !== null
);
});
} catch {
return false;
}
}
async function loadCloakBrowser() {
try {
return await import("cloakbrowser");
} catch (error) {
fail(
"CloakBrowser is not installed for this skill. Run pnpm install in this skill's scripts directory first.",
error.message
);
}
}
async function runWithStderrLogs(fn) {
const originalLog = console.log;
const originalError = console.error;
console.log = (...args) => process.stderr.write(`${args.join(" ")}\n`);
console.error = (...args) => process.stderr.write(`${args.join(" ")}\n`);
try {
return await fn();
} finally {
console.log = originalLog;
console.error = originalError;
}
}
async function main() {
const requestedUrl = parseTarget(process.argv[2]);
const waitTime = parseWaitTime(process.env.WAIT_TIME);
const screenshotPath = process.env.SCREENSHOT_PATH || "";
const saveHtml = process.env.SAVE_HTML === "true";
const headless = process.env.HEADLESS !== "false";
const userAgent = process.env.USER_AGENT || undefined;
const startedAt = Date.now();
const { ensureBinary, launchContext } = await loadCloakBrowser();
let context;
try {
await runWithStderrLogs(() => ensureBinary());
context = await runWithStderrLogs(() => launchContext({
headless,
userAgent,
locale: "en-US",
viewport: { width: 1440, height: 900 },
humanize: true,
}));
const page = await context.newPage();
const response = await page.goto(requestedUrl, {
waitUntil: "domcontentloaded",
timeout: NAV_TIMEOUT_MS
});
await sleep(waitTime);
let challengeDetected = await detectChallenge(page);
if (challengeDetected) {
await sleep(EXTRA_CHALLENGE_WAIT_MS);
challengeDetected = await detectChallenge(page);
}
const extracted = await page.evaluate((contentLimit) => {
const bodyText = document.body?.innerText || "";
return {
finalUrl: window.location.href,
title: document.title || "",
content: bodyText.slice(0, contentLimit),
metaDescription:
document.querySelector('meta[name="description"]')?.content ||
document.querySelector('meta[property="og:description"]')?.content ||
""
};
}, CONTENT_LIMIT);
const result = {
requestedUrl,
finalUrl: extracted.finalUrl,
title: extracted.title,
content: extracted.content,
metaDescription: extracted.metaDescription,
status: response ? response.status() : null,
challengeDetected,
elapsedSeconds: ((Date.now() - startedAt) / 1000).toFixed(2)
};
if (screenshotPath) {
ensureParentDir(screenshotPath);
await page.screenshot({ path: screenshotPath, fullPage: false, timeout: 10000 });
result.screenshot = screenshotPath;
}
if (saveHtml) {
const htmlTarget = screenshotPath
? screenshotPath.replace(/\.[^.]+$/, ".html")
: path.resolve(__dirname, `page-${Date.now()}.html`);
ensureParentDir(htmlTarget);
fs.writeFileSync(htmlTarget, await page.content());
result.htmlFile = htmlTarget;
}
process.stdout.write(`${JSON.stringify(result, null, 2)}\n`);
await context.close();
} catch (error) {
if (context) {
try {
await context.close();
} catch {
// Ignore close errors after the primary failure.
}
}
fail("Scrape failed.", error.message);
}
}
main();

View File

@@ -0,0 +1,329 @@
#!/usr/bin/env npx tsx
import parseArgs from 'minimist';
import type { Page } from 'playwright-core';
import { launchBrowser } from './browse';
type Step =
| { action: 'goto'; url: string }
| { action: 'click'; selector?: string; text?: string; role?: string; name?: string }
| { action: 'type'; selector?: string; text: string }
| { action: 'press'; key: string; selector?: string }
| { action: 'wait'; ms: number }
| { action: 'screenshot'; path: string }
| { action: 'extract'; selector: string; count?: number };
function normalizeNavigationUrl(rawUrl: string): string {
let parsed: URL;
try {
parsed = new URL(rawUrl);
} catch {
throw new Error(`Invalid navigation URL: ${rawUrl}`);
}
if (!['http:', 'https:'].includes(parsed.protocol)) {
throw new Error(`Only http and https URLs are allowed in flow steps: ${rawUrl}`);
}
return parsed.toString();
}
function normalizeKey(k: string): string {
if (!k) return 'Enter';
const lower = k.toLowerCase();
if (lower === 'enter' || lower === 'return') return 'Enter';
if (lower === 'tab') return 'Tab';
if (lower === 'escape' || lower === 'esc') return 'Escape';
return k;
}
function splitInstructions(instruction: string): string[] {
return instruction
.split(/\bthen\b|;/gi)
.map((s) => s.trim())
.filter(Boolean);
}
function parseInstruction(instruction: string): Step[] {
const parts = splitInstructions(instruction);
const steps: Step[] = [];
for (const p of parts) {
// go to https://...
const goto = p.match(/^(?:go to|open|navigate to)\s+(https?:\/\/\S+)/i);
if (goto) {
steps.push({ action: 'goto', url: normalizeNavigationUrl(goto[1]) });
continue;
}
// click on "text" or click #selector or click button "name"
const clickRole = p.match(/^click\s+(button|link|textbox|img|image|tab)\s+"([^"]+)"$/i);
if (clickRole) {
const role = clickRole[1].toLowerCase() === 'image' ? 'img' : clickRole[1].toLowerCase();
steps.push({ action: 'click', role, name: clickRole[2] });
continue;
}
const clickText = p.match(/^click(?: on)?\s+"([^"]+)"/i);
if (clickText) {
steps.push({ action: 'click', text: clickText[1] });
continue;
}
const clickSelector = p.match(/^click(?: on)?\s+(#[\w-]+|\.[\w-]+|[a-z]+\[[^\]]+\])/i);
if (clickSelector) {
steps.push({ action: 'click', selector: clickSelector[1] });
continue;
}
// type "text" [in selector]
const typeInto = p.match(/^type\s+"([^"]+)"\s+in\s+(.+)$/i);
if (typeInto) {
steps.push({ action: 'type', text: typeInto[1], selector: typeInto[2].trim() });
continue;
}
const typeOnly = p.match(/^type\s+"([^"]+)"$/i);
if (typeOnly) {
steps.push({ action: 'type', text: typeOnly[1] });
continue;
}
// press enter [in selector]
const pressIn = p.match(/^press\s+(\w+)\s+in\s+(.+)$/i);
if (pressIn) {
steps.push({ action: 'press', key: normalizeKey(pressIn[1]), selector: pressIn[2].trim() });
continue;
}
const pressOnly = p.match(/^press\s+(\w+)$/i);
if (pressOnly) {
steps.push({ action: 'press', key: normalizeKey(pressOnly[1]) });
continue;
}
// wait 2s / wait 500ms
const waitS = p.match(/^wait\s+(\d+)\s*s(?:ec(?:onds?)?)?$/i);
if (waitS) {
steps.push({ action: 'wait', ms: parseInt(waitS[1], 10) * 1000 });
continue;
}
const waitMs = p.match(/^wait\s+(\d+)\s*ms$/i);
if (waitMs) {
steps.push({ action: 'wait', ms: parseInt(waitMs[1], 10) });
continue;
}
// screenshot path
const shot = p.match(/^screenshot(?: to)?\s+(.+)$/i);
if (shot) {
steps.push({ action: 'screenshot', path: shot[1].trim() });
continue;
}
throw new Error(`Could not parse step: "${p}"`);
}
return steps;
}
function validateSteps(steps: Step[]): Step[] {
return steps.map((step) =>
step.action === 'goto'
? {
...step,
url: normalizeNavigationUrl(step.url),
}
: step
);
}
function escapeRegExp(value: string): string {
return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}
function isLikelyLoginText(text: string): boolean {
return /(login|accedi|sign\s*in|entra)/i.test(text);
}
async function clickByText(page: Page, text: string): Promise<boolean> {
const patterns = [new RegExp(`^${escapeRegExp(text)}$`, 'i'), new RegExp(escapeRegExp(text), 'i')];
for (const pattern of patterns) {
const targets = [
page.getByRole('button', { name: pattern }).first(),
page.getByRole('link', { name: pattern }).first(),
page.getByText(pattern).first(),
];
for (const target of targets) {
if (await target.count()) {
try {
await target.click({ timeout: 8000 });
return true;
} catch {
// keep trying next candidate
}
}
}
}
return false;
}
async function fallbackLoginNavigation(page: Page, requestedText: string): Promise<boolean> {
if (!isLikelyLoginText(requestedText)) return false;
const current = new URL(page.url());
const candidateLinks = await page.evaluate(() => {
const loginTerms = ['login', 'accedi', 'sign in', 'entra'];
const anchors = Array.from(document.querySelectorAll('a[href], a[onclick], button[onclick]')) as Array<HTMLAnchorElement | HTMLButtonElement>;
return anchors
.map((el) => {
const text = (el.textContent || '').trim().toLowerCase();
const href = (el as HTMLAnchorElement).getAttribute('href') || '';
return { text, href };
})
.filter((x) => x.text && loginTerms.some((t) => x.text.includes(t)))
.map((x) => x.href)
.filter(Boolean);
});
// Prefer real URLs (not javascript:)
const realCandidate = candidateLinks.find((h) => /login|account\/login/i.test(h) && !h.startsWith('javascript:'));
if (realCandidate) {
const target = new URL(realCandidate, page.url()).toString();
await page.goto(target, { waitUntil: 'domcontentloaded', timeout: 60000 });
return true;
}
// Site-specific fallback for Corriere
if (/corriere\.it$/i.test(current.hostname) || /\.corriere\.it$/i.test(current.hostname)) {
await page.goto('https://www.corriere.it/account/login', {
waitUntil: 'domcontentloaded',
timeout: 60000,
});
return true;
}
return false;
}
async function typeInBestTarget(page: Page, text: string, selector?: string) {
if (selector) {
await page.locator(selector).first().click({ timeout: 10000 });
await page.locator(selector).first().fill(text);
return;
}
const loc = page.locator('input[name="q"], input[type="search"], input[type="text"], textarea').first();
await loc.click({ timeout: 10000 });
await loc.fill(text);
}
async function pressOnTarget(page: Page, key: string, selector?: string) {
if (selector) {
await page.locator(selector).first().press(key);
return;
}
await page.keyboard.press(key);
}
async function runSteps(page: Page, steps: Step[]) {
for (const step of steps) {
switch (step.action) {
case 'goto':
await page.goto(normalizeNavigationUrl(step.url), {
waitUntil: 'domcontentloaded',
timeout: 60000,
});
break;
case 'click':
if (step.selector) {
await page.locator(step.selector).first().click({ timeout: 15000 });
} else if (step.role && step.name) {
await page.getByRole(step.role as any, { name: new RegExp(escapeRegExp(step.name), 'i') }).first().click({ timeout: 15000 });
} else if (step.text) {
const clicked = await clickByText(page, step.text);
if (!clicked) {
const recovered = await fallbackLoginNavigation(page, step.text);
if (!recovered) {
throw new Error(`Could not click target text: ${step.text}`);
}
}
} else {
throw new Error('click step missing selector/text/role');
}
try {
await page.waitForLoadState('domcontentloaded', { timeout: 10000 });
} catch {
// no navigation is fine
}
break;
case 'type':
await typeInBestTarget(page, step.text, step.selector);
break;
case 'press':
await pressOnTarget(page, step.key, step.selector);
break;
case 'wait':
await page.waitForTimeout(step.ms);
break;
case 'screenshot':
await page.screenshot({ path: step.path, fullPage: true });
break;
case 'extract': {
const items = await page.locator(step.selector).allTextContents();
const out = items.slice(0, step.count ?? items.length).map((t) => t.trim()).filter(Boolean);
console.log(JSON.stringify(out, null, 2));
break;
}
default:
throw new Error('Unknown step');
}
}
}
async function main() {
const args = parseArgs(process.argv.slice(2), {
string: ['instruction', 'steps'],
boolean: ['headless', 'help'],
default: { headless: true },
alias: { i: 'instruction', s: 'steps', h: 'help' },
});
if (args.help || (!args.instruction && !args.steps)) {
console.log(`
General Web Flow Runner (CloakBrowser)
Usage:
npx tsx flow.ts --instruction "go to https://example.com then type \"hello\" then press enter"
npx tsx flow.ts --steps '[{"action":"goto","url":"https://example.com"}]'
Supported natural steps:
- go to/open/navigate to <url>
- click on "Text"
- click <css-selector>
- type "text"
- type "text" in <css-selector>
- press <key>
- press <key> in <css-selector>
- wait <N>s | wait <N>ms
- screenshot <path>
`);
process.exit(args.help ? 0 : 1);
}
const steps = validateSteps(args.steps ? JSON.parse(args.steps) : parseInstruction(args.instruction));
const browser = await launchBrowser({ headless: args.headless });
const page = await browser.newPage();
try {
await runSteps(page, steps);
console.log('Flow complete. Final URL:', page.url());
} finally {
await browser.close();
}
}
main().catch((e) => {
console.error('Error:', e instanceof Error ? e.message : e);
process.exit(1);
});

View File

@@ -1,19 +1,26 @@
{ {
"name": "web-automation-scripts", "name": "web-automation-scripts",
"version": "1.0.0", "version": "1.0.0",
"description": "Web browsing and scraping scripts using Camoufox", "description": "Web browsing and scraping scripts using CloakBrowser",
"type": "module", "type": "module",
"scripts": { "scripts": {
"check-install": "node check-install.js",
"extract": "node extract.js",
"browse": "tsx browse.ts", "browse": "tsx browse.ts",
"auth": "tsx auth.ts",
"flow": "tsx flow.ts",
"scrape": "tsx scrape.ts", "scrape": "tsx scrape.ts",
"fetch-browser": "npx camoufox-js fetch" "typecheck": "tsc --noEmit -p tsconfig.json",
"lint": "pnpm run typecheck && node --check check-install.js && node --check extract.js",
"fetch-browser": "npx cloakbrowser install"
}, },
"dependencies": { "dependencies": {
"@mozilla/readability": "^0.5.0", "@mozilla/readability": "^0.5.0",
"camoufox-js": "^0.8.5", "better-sqlite3": "^12.6.2",
"cloakbrowser": "^0.3.22",
"jsdom": "^24.0.0", "jsdom": "^24.0.0",
"minimist": "^1.2.8", "minimist": "^1.2.8",
"playwright-core": "^1.40.0", "playwright-core": "^1.59.1",
"turndown": "^7.1.2", "turndown": "^7.1.2",
"turndown-plugin-gfm": "^1.0.2" "turndown-plugin-gfm": "^1.0.2"
}, },

View File

@@ -11,9 +11,12 @@ importers:
'@mozilla/readability': '@mozilla/readability':
specifier: ^0.5.0 specifier: ^0.5.0
version: 0.5.0 version: 0.5.0
camoufox-js: better-sqlite3:
specifier: ^0.8.5 specifier: ^12.6.2
version: 0.8.5(playwright-core@1.57.0) version: 12.8.0
cloakbrowser:
specifier: ^0.3.22
version: 0.3.22(mmdb-lib@3.0.1)(playwright-core@1.59.1)
jsdom: jsdom:
specifier: ^24.0.0 specifier: ^24.0.0
version: 24.1.3 version: 24.1.3
@@ -21,8 +24,8 @@ importers:
specifier: ^1.2.8 specifier: ^1.2.8
version: 1.2.8 version: 1.2.8
playwright-core: playwright-core:
specifier: ^1.40.0 specifier: ^1.59.1
version: 1.57.0 version: 1.59.1
turndown: turndown:
specifier: ^7.1.2 specifier: ^7.1.2
version: 7.2.2 version: 7.2.2
@@ -238,13 +241,9 @@ packages:
cpu: [x64] cpu: [x64]
os: [win32] os: [win32]
'@isaacs/balanced-match@4.0.1': '@isaacs/fs-minipass@4.0.1':
resolution: {integrity: sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==} resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==}
engines: {node: 20 || >=22} engines: {node: '>=18.0.0'}
'@isaacs/brace-expansion@5.0.0':
resolution: {integrity: sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==}
engines: {node: 20 || >=22}
'@mixmark-io/domino@2.2.0': '@mixmark-io/domino@2.2.0':
resolution: {integrity: sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==} resolution: {integrity: sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==}
@@ -253,10 +252,6 @@ packages:
resolution: {integrity: sha512-Z+CZ3QaosfFaTqvhQsIktyGrjFjSC0Fa4EMph4mqKnWhmyoGICsV/8QK+8HpXut6zV7zwfWwqDmEjtk1Qf6EgQ==} resolution: {integrity: sha512-Z+CZ3QaosfFaTqvhQsIktyGrjFjSC0Fa4EMph4mqKnWhmyoGICsV/8QK+8HpXut6zV7zwfWwqDmEjtk1Qf6EgQ==}
engines: {node: '>=14.0.0'} engines: {node: '>=14.0.0'}
'@sindresorhus/is@4.6.0':
resolution: {integrity: sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==}
engines: {node: '>=10'}
'@types/jsdom@21.1.7': '@types/jsdom@21.1.7':
resolution: {integrity: sha512-yOriVnggzrnQ3a9OKOCxaVuSug3w3/SbOj5i7VwXWZEyUNl3bLF9V3MfxGbZKuwqJOQyRfqXyROBB1CoZLFWzA==} resolution: {integrity: sha512-yOriVnggzrnQ3a9OKOCxaVuSug3w3/SbOj5i7VwXWZEyUNl3bLF9V3MfxGbZKuwqJOQyRfqXyROBB1CoZLFWzA==}
@@ -272,10 +267,6 @@ packages:
'@types/turndown@5.0.6': '@types/turndown@5.0.6':
resolution: {integrity: sha512-ru00MoyeeouE5BX4gRL+6m/BsDfbRayOskWqUvh7CLGW+UXxHQItqALa38kKnOiZPqJrtzJUgAC2+F0rL1S4Pg==} resolution: {integrity: sha512-ru00MoyeeouE5BX4gRL+6m/BsDfbRayOskWqUvh7CLGW+UXxHQItqALa38kKnOiZPqJrtzJUgAC2+F0rL1S4Pg==}
adm-zip@0.5.16:
resolution: {integrity: sha512-TGw5yVi4saajsSEgz25grObGHEUaDrniwvA2qwSC060KfqGPdglhvPMA2lPIoxs3PQIItj2iag35fONcQqgUaQ==}
engines: {node: '>=12.0'}
agent-base@7.1.4: agent-base@7.1.4:
resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==}
engines: {node: '>= 14'} engines: {node: '>= 14'}
@@ -286,12 +277,8 @@ packages:
base64-js@1.5.1: base64-js@1.5.1:
resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
baseline-browser-mapping@2.9.14: better-sqlite3@12.8.0:
resolution: {integrity: sha512-B0xUquLkiGLgHhpPBqvl7GWegWBUNuujQ6kXd/r1U38ElPT6Ok8KZ8e+FpUGEc2ZoRQUzq/aUnaKFc/svWUGSg==} resolution: {integrity: sha512-RxD2Vd96sQDjQr20kdP+F+dK/1OUNiVOl200vKBZY8u0vTwysfolF6Hq+3ZK2+h8My9YvZhHsF+RSGZW2VYrPQ==}
hasBin: true
better-sqlite3@12.6.0:
resolution: {integrity: sha512-FXI191x+D6UPWSze5IzZjhz+i9MK9nsuHsmTX9bXVl52k06AfZ2xql0lrgIUuzsMsJ7Vgl5kIptvDgBLIV3ZSQ==}
engines: {node: 20.x || 22.x || 23.x || 24.x || 25.x} engines: {node: 20.x || 22.x || 23.x || 24.x || 25.x}
bindings@1.5.0: bindings@1.5.0:
@@ -300,11 +287,6 @@ packages:
bl@4.1.0: bl@4.1.0:
resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==} resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==}
browserslist@4.28.1:
resolution: {integrity: sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==}
engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
hasBin: true
buffer@5.7.1: buffer@5.7.1:
resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==}
@@ -312,31 +294,33 @@ packages:
resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
callsites@3.1.0:
resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==}
engines: {node: '>=6'}
camoufox-js@0.8.5:
resolution: {integrity: sha512-20ihPbspAcOVSUTX9Drxxp0C116DON1n8OVA1eUDglWZiHwiHwFVFOMrIEBwAHMZpU11mIEH/kawJtstRIrDPA==}
engines: {node: '>= 20'}
hasBin: true
peerDependencies:
playwright-core: '*'
caniuse-lite@1.0.30001764:
resolution: {integrity: sha512-9JGuzl2M+vPL+pz70gtMF9sHdMFbY9FJaQBi186cHKH3pSzDvzoUJUPV6fqiKIMyXbud9ZLg4F3Yza1vJ1+93g==}
chownr@1.1.4: chownr@1.1.4:
resolution: {integrity: sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==} resolution: {integrity: sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==}
chownr@3.0.0:
resolution: {integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==}
engines: {node: '>=18'}
cloakbrowser@0.3.22:
resolution: {integrity: sha512-L2CWQiVdunhKslTli8HCe4INhaAt4npbvsM2Ox4/idqiRmT2BADndQ05eDS8TonNSWeWqbjsh04UhSZOD3B6mg==}
engines: {node: '>=18.0.0'}
hasBin: true
peerDependencies:
mmdb-lib: '>=2.0.0'
playwright-core: '>=1.40.0'
puppeteer-core: '>=21.0.0'
peerDependenciesMeta:
mmdb-lib:
optional: true
playwright-core:
optional: true
puppeteer-core:
optional: true
combined-stream@1.0.8: combined-stream@1.0.8:
resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==}
engines: {node: '>= 0.8'} engines: {node: '>= 0.8'}
commander@14.0.2:
resolution: {integrity: sha512-TywoWNNRbhoD0BXs1P3ZEScW8W5iKrnbithIl0YH+uCmBd0QpPOA8yc82DS3BIE5Ma6FnBVUsJ7wVUDz4dvOWQ==}
engines: {node: '>=20'}
cssstyle@4.6.0: cssstyle@4.6.0:
resolution: {integrity: sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==} resolution: {integrity: sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==}
engines: {node: '>=18'} engines: {node: '>=18'}
@@ -369,24 +353,14 @@ packages:
resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==}
engines: {node: '>=0.4.0'} engines: {node: '>=0.4.0'}
detect-europe-js@0.1.2:
resolution: {integrity: sha512-lgdERlL3u0aUdHocoouzT10d9I89VVhk0qNRmll7mXdGfJT1/wqZ2ZLA4oJAjeACPY5fT1wsbq2AT+GkuInsow==}
detect-libc@2.1.2: detect-libc@2.1.2:
resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==} resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==}
engines: {node: '>=8'} engines: {node: '>=8'}
dot-prop@6.0.1:
resolution: {integrity: sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==}
engines: {node: '>=10'}
dunder-proto@1.0.1: dunder-proto@1.0.1:
resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
electron-to-chromium@1.5.267:
resolution: {integrity: sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==}
end-of-stream@1.4.5: end-of-stream@1.4.5:
resolution: {integrity: sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==} resolution: {integrity: sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==}
@@ -415,10 +389,6 @@ packages:
engines: {node: '>=18'} engines: {node: '>=18'}
hasBin: true hasBin: true
escalade@3.2.0:
resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==}
engines: {node: '>=6'}
expand-template@2.0.3: expand-template@2.0.3:
resolution: {integrity: sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==} resolution: {integrity: sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==}
engines: {node: '>=6'} engines: {node: '>=6'}
@@ -426,10 +396,6 @@ packages:
file-uri-to-path@1.0.0: file-uri-to-path@1.0.0:
resolution: {integrity: sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==} resolution: {integrity: sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==}
fingerprint-generator@2.1.79:
resolution: {integrity: sha512-0dr3kTgvRYHleRPp6OBDcPb8amJmOyFr9aOuwnpN6ooWJ5XyT+/aL/SZ6CU4ZrEtzV26EyJ2Lg7PT32a0NdrRA==}
engines: {node: '>=16.0.0'}
form-data@4.0.5: form-data@4.0.5:
resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==}
engines: {node: '>= 6'} engines: {node: '>= 6'}
@@ -445,9 +411,6 @@ packages:
function-bind@1.1.2: function-bind@1.1.2:
resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==}
generative-bayesian-network@2.1.79:
resolution: {integrity: sha512-aPH+V2wO+HE0BUX1LbsM8Ak99gmV43lgh+D7GDteM0zgnPqiAwcK9JZPxMPZa3aJUleFtFaL1lAei8g9zNrDIA==}
get-intrinsic@1.3.0: get-intrinsic@1.3.0:
resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
@@ -462,10 +425,6 @@ packages:
github-from-package@0.0.0: github-from-package@0.0.0:
resolution: {integrity: sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==} resolution: {integrity: sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==}
glob@13.0.0:
resolution: {integrity: sha512-tvZgpqk6fz4BaNZ66ZsRaZnbHvP/jG3uKJvAZOwEVUL4RTA5nJeeLYfyN9/VA8NX/V3IBG+hkeuGpKjvELkVhA==}
engines: {node: 20 || >=22}
gopd@1.2.0: gopd@1.2.0:
resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
@@ -482,10 +441,6 @@ packages:
resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
header-generator@2.1.79:
resolution: {integrity: sha512-YvHx8teq4QmV5mz7wdPMsj9n1OZBPnZxA4QE+EOrtx7xbmGvd1gBvDNKCb5XqS4GR/TL75MU5hqMqqqANdILRg==}
engines: {node: '>=16.0.0'}
html-encoding-sniffer@4.0.0: html-encoding-sniffer@4.0.0:
resolution: {integrity: sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==} resolution: {integrity: sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==}
engines: {node: '>=18'} engines: {node: '>=18'}
@@ -505,74 +460,15 @@ packages:
ieee754@1.2.1: ieee754@1.2.1:
resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==}
impit-darwin-arm64@0.7.6:
resolution: {integrity: sha512-M7NQXkttyzqilWfzVkNCp7hApT69m0etyJkVpHze4bR5z1kJnHhdsb8BSdDv2dzvZL4u1JyqZNxq+qoMn84eUw==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [darwin]
impit-darwin-x64@0.7.6:
resolution: {integrity: sha512-kikTesWirAwJp9JPxzGLoGVc+heBlEabWS5AhTkQedACU153vmuL90OBQikVr3ul2N0LPImvnuB+51wV0zDE6g==}
engines: {node: '>= 10'}
cpu: [x64]
os: [darwin]
impit-linux-arm64-gnu@0.7.6:
resolution: {integrity: sha512-H6GHjVr/0lG9VEJr6IHF8YLq+YkSIOF4k7Dfue2ygzUAj1+jZ5ZwnouhG/XrZHYW6EWsZmEAjjRfWE56Q0wDRQ==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [linux]
impit-linux-arm64-musl@0.7.6:
resolution: {integrity: sha512-1sCB/UBVXLZTpGJsXRdNNSvhN9xmmQcYLMWAAB4Itb7w684RHX1pLoCb6ichv7bfAf6tgaupcFIFZNBp3ghmQA==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [linux]
impit-linux-x64-gnu@0.7.6:
resolution: {integrity: sha512-yYhlRnZ4fhKt8kuGe0JK2WSHc8TkR6BEH0wn+guevmu8EOn9Xu43OuRvkeOyVAkRqvFnlZtMyySUo/GuSLz9Gw==}
engines: {node: '>= 10'}
cpu: [x64]
os: [linux]
impit-linux-x64-musl@0.7.6:
resolution: {integrity: sha512-sdGWyu+PCLmaOXy7Mzo4WP61ZLl5qpZ1L+VeXW+Ycazgu0e7ox0NZLdiLRunIrEzD+h0S+e4CyzNwaiP3yIolg==}
engines: {node: '>= 10'}
cpu: [x64]
os: [linux]
impit-win32-arm64-msvc@0.7.6:
resolution: {integrity: sha512-sM5deBqo0EuXg5GACBUMKEua9jIau/i34bwNlfrf/Amnw1n0GB4/RkuUh+sKiUcbNAntrRq+YhCq8qDP8IW19w==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [win32]
impit-win32-x64-msvc@0.7.6:
resolution: {integrity: sha512-ry63ADGLCB/PU/vNB1VioRt2V+klDJ34frJUXUZBEv1kA96HEAg9AxUk+604o+UHS3ttGH2rkLmrbwHOdAct5Q==}
engines: {node: '>= 10'}
cpu: [x64]
os: [win32]
impit@0.7.6:
resolution: {integrity: sha512-AkS6Gv63+E6GMvBrcRhMmOREKpq5oJ0J5m3xwfkHiEs97UIsbpEqFmW3sFw/sdyOTDGRF5q4EjaLxtb922Ta8g==}
engines: {node: '>= 20'}
inherits@2.0.4: inherits@2.0.4:
resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
ini@1.3.8: ini@1.3.8:
resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==} resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==}
is-obj@2.0.0:
resolution: {integrity: sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==}
engines: {node: '>=8'}
is-potential-custom-element-name@1.0.1: is-potential-custom-element-name@1.0.1:
resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==} resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==}
is-standalone-pwa@0.1.1:
resolution: {integrity: sha512-9Cbovsa52vNQCjdXOzeQq5CnCbAcRk05aU62K20WO372NrTv0NxibLFCK6lQ4/iZEFdEA3p3t2VNOn8AJ53F5g==}
jsdom@24.1.3: jsdom@24.1.3:
resolution: {integrity: sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==} resolution: {integrity: sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==}
engines: {node: '>=18'} engines: {node: '>=18'}
@@ -582,32 +478,13 @@ packages:
canvas: canvas:
optional: true optional: true
language-subtag-registry@0.3.23:
resolution: {integrity: sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==}
language-tags@2.1.0:
resolution: {integrity: sha512-D4CgpyCt+61f6z2jHjJS1OmZPviAWM57iJ9OKdFFWSNgS7Udj9QVWqyGs/cveVNF57XpZmhSvMdVIV5mjLA7Vg==}
engines: {node: '>=22'}
lodash.isequal@4.5.0:
resolution: {integrity: sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==}
deprecated: This package is deprecated. Use require('node:util').isDeepStrictEqual instead.
lru-cache@10.4.3: lru-cache@10.4.3:
resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==}
lru-cache@11.2.4:
resolution: {integrity: sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==}
engines: {node: 20 || >=22}
math-intrinsics@1.1.0: math-intrinsics@1.1.0:
resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
maxmind@5.0.3:
resolution: {integrity: sha512-oMtZwLrsp0LcZehfYKIirtwKMBycMMqMA1/Dc9/BlUqIEtXO75mIzMJ3PYCV1Ji+BpoUCk+lTzRfh9c+ptGdyQ==}
engines: {node: '>=12', npm: '>=6'}
mime-db@1.52.0: mime-db@1.52.0:
resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==}
engines: {node: '>= 0.6'} engines: {node: '>= 0.6'}
@@ -620,10 +497,6 @@ packages:
resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==} resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==}
engines: {node: '>=10'} engines: {node: '>=10'}
minimatch@10.1.1:
resolution: {integrity: sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==}
engines: {node: 20 || >=22}
minimist@1.2.8: minimist@1.2.8:
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
@@ -631,6 +504,10 @@ packages:
resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==}
engines: {node: '>=16 || 14 >=14.17'} engines: {node: '>=16 || 14 >=14.17'}
minizlib@3.1.0:
resolution: {integrity: sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==}
engines: {node: '>= 18'}
mkdirp-classic@0.5.3: mkdirp-classic@0.5.3:
resolution: {integrity: sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==} resolution: {integrity: sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==}
@@ -648,43 +525,26 @@ packages:
resolution: {integrity: sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==} resolution: {integrity: sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==}
engines: {node: '>=10'} engines: {node: '>=10'}
node-releases@2.0.27:
resolution: {integrity: sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==}
nwsapi@2.2.23: nwsapi@2.2.23:
resolution: {integrity: sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==} resolution: {integrity: sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==}
once@1.4.0: once@1.4.0:
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
ow@0.28.2:
resolution: {integrity: sha512-dD4UpyBh/9m4X2NVjA+73/ZPBRF+uF4zIMFvvQsabMiEK8x41L3rQ8EENOi35kyyoaJwNxEeJcP6Fj1H4U409Q==}
engines: {node: '>=12'}
parse5@7.3.0: parse5@7.3.0:
resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==} resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==}
path-scurry@2.0.1: playwright-core@1.59.1:
resolution: {integrity: sha512-oWyT4gICAu+kaA7QWk/jvCHWarMKNs6pXOGWKDTr7cw4IGcUbW+PeTfbaQiLGheFRpjo6O9J0PmyMfQPjH71oA==} resolution: {integrity: sha512-HBV/RJg81z5BiiZ9yPzIiClYV/QMsDCKUyogwH9p3MCP6IYjUFu/MActgYAvK0oWyV9NlwM3GLBjADyWgydVyg==}
engines: {node: 20 || >=22}
picocolors@1.1.1:
resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==}
playwright-core@1.57.0:
resolution: {integrity: sha512-agTcKlMw/mjBWOnD6kFZttAAGHgi/Nw0CZ2o6JqWSbMlI219lAFLZZCyqByTsvVAJq5XA5H8cA6PrvBRpBWEuQ==}
engines: {node: '>=18'} engines: {node: '>=18'}
hasBin: true hasBin: true
prebuild-install@7.1.3: prebuild-install@7.1.3:
resolution: {integrity: sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==} resolution: {integrity: sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==}
engines: {node: '>=10'} engines: {node: '>=10'}
deprecated: No longer maintained. Please contact the author of the relevant native addon; alternatives are available.
hasBin: true hasBin: true
progress@2.0.3:
resolution: {integrity: sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==}
engines: {node: '>=0.4.0'}
psl@1.15.0: psl@1.15.0:
resolution: {integrity: sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==} resolution: {integrity: sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==}
@@ -724,10 +584,6 @@ packages:
safer-buffer@2.1.2: safer-buffer@2.1.2:
resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==}
sax@1.4.4:
resolution: {integrity: sha512-1n3r/tGXO6b6VXMdFT54SHzT9ytu9yr7TaELowdYpMqY/Ao7EnlQGmAQ1+RatX7Tkkdm6hONI2owqNx2aZj5Sw==}
engines: {node: '>=11.0.0'}
saxes@6.0.0: saxes@6.0.0:
resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==} resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==}
engines: {node: '>=v12.22.7'} engines: {node: '>=v12.22.7'}
@@ -760,9 +616,9 @@ packages:
resolution: {integrity: sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==} resolution: {integrity: sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==}
engines: {node: '>=6'} engines: {node: '>=6'}
tiny-lru@11.4.5: tar@7.5.13:
resolution: {integrity: sha512-hkcz3FjNJfKXjV4mjQ1OrXSLAehg8Hw+cEZclOVT+5c/cWQWImQ9wolzTjth+dmmDe++p3bme3fTxz6Q4Etsqw==} resolution: {integrity: sha512-tOG/7GyXpFevhXVh8jOPJrmtRpOTsYqUIkVdVooZYJS/z8WhfQUX8RJILmeuJNinGAMSu1veBr4asSHFt5/hng==}
engines: {node: '>=12'} engines: {node: '>=18'}
tough-cookie@4.1.4: tough-cookie@4.1.4:
resolution: {integrity: sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==} resolution: {integrity: sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==}
@@ -772,9 +628,6 @@ packages:
resolution: {integrity: sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==} resolution: {integrity: sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==}
engines: {node: '>=18'} engines: {node: '>=18'}
tslib@2.8.1:
resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==}
tsx@4.21.0: tsx@4.21.0:
resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==} resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==}
engines: {node: '>=18.0.0'} engines: {node: '>=18.0.0'}
@@ -794,13 +647,6 @@ packages:
engines: {node: '>=14.17'} engines: {node: '>=14.17'}
hasBin: true hasBin: true
ua-is-frozen@0.1.2:
resolution: {integrity: sha512-RwKDW2p3iyWn4UbaxpP2+VxwqXh0jpvdxsYpZ5j/MLLiQOfbsV5shpgQiw93+KMYQPcteeMQ289MaAFzs3G9pw==}
ua-parser-js@2.0.7:
resolution: {integrity: sha512-CFdHVHr+6YfbktNZegH3qbYvYgC7nRNEUm2tk7nSFXSODUu4tDBpaFpP1jdXBUOKKwapVlWRfTtS8bCPzsQ47w==}
hasBin: true
undici-types@7.16.0: undici-types@7.16.0:
resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==} resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==}
@@ -808,22 +654,12 @@ packages:
resolution: {integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==} resolution: {integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==}
engines: {node: '>= 4.0.0'} engines: {node: '>= 4.0.0'}
update-browserslist-db@1.2.3:
resolution: {integrity: sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==}
hasBin: true
peerDependencies:
browserslist: '>= 4.21.0'
url-parse@1.5.10: url-parse@1.5.10:
resolution: {integrity: sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==} resolution: {integrity: sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==}
util-deprecate@1.0.2: util-deprecate@1.0.2:
resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
vali-date@1.0.0:
resolution: {integrity: sha512-sgECfZthyaCKW10N0fm27cg8HYTFK5qMWgypqkXMQ4Wbl/zZKx7xZICgcoxIIE+WFAP/MBL2EFwC/YvLxw3Zeg==}
engines: {node: '>=0.10.0'}
w3c-xmlserializer@5.0.0: w3c-xmlserializer@5.0.0:
resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==} resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==}
engines: {node: '>=18'} engines: {node: '>=18'}
@@ -864,17 +700,13 @@ packages:
resolution: {integrity: sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==} resolution: {integrity: sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==}
engines: {node: '>=18'} engines: {node: '>=18'}
xml2js@0.6.2:
resolution: {integrity: sha512-T4rieHaC1EXcES0Kxxj4JWgaUQHDk+qwHcYOCFHfiwKz7tOVPLq7Hjq9dM1WCMhylqMEfP7hMcOIChvotiZegA==}
engines: {node: '>=4.0.0'}
xmlbuilder@11.0.1:
resolution: {integrity: sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==}
engines: {node: '>=4.0'}
xmlchars@2.2.0: xmlchars@2.2.0:
resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==}
yallist@5.0.0:
resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==}
engines: {node: '>=18'}
snapshots: snapshots:
'@asamuzakjp/css-color@3.2.0': '@asamuzakjp/css-color@3.2.0':
@@ -983,18 +815,14 @@ snapshots:
'@esbuild/win32-x64@0.27.0': '@esbuild/win32-x64@0.27.0':
optional: true optional: true
'@isaacs/balanced-match@4.0.1': {} '@isaacs/fs-minipass@4.0.1':
'@isaacs/brace-expansion@5.0.0':
dependencies: dependencies:
'@isaacs/balanced-match': 4.0.1 minipass: 7.1.2
'@mixmark-io/domino@2.2.0': {} '@mixmark-io/domino@2.2.0': {}
'@mozilla/readability@0.5.0': {} '@mozilla/readability@0.5.0': {}
'@sindresorhus/is@4.6.0': {}
'@types/jsdom@21.1.7': '@types/jsdom@21.1.7':
dependencies: dependencies:
'@types/node': 25.0.6 '@types/node': 25.0.6
@@ -1011,17 +839,13 @@ snapshots:
'@types/turndown@5.0.6': {} '@types/turndown@5.0.6': {}
adm-zip@0.5.16: {}
agent-base@7.1.4: {} agent-base@7.1.4: {}
asynckit@0.4.0: {} asynckit@0.4.0: {}
base64-js@1.5.1: {} base64-js@1.5.1: {}
baseline-browser-mapping@2.9.14: {} better-sqlite3@12.8.0:
better-sqlite3@12.6.0:
dependencies: dependencies:
bindings: 1.5.0 bindings: 1.5.0
prebuild-install: 7.1.3 prebuild-install: 7.1.3
@@ -1036,14 +860,6 @@ snapshots:
inherits: 2.0.4 inherits: 2.0.4
readable-stream: 3.6.2 readable-stream: 3.6.2
browserslist@4.28.1:
dependencies:
baseline-browser-mapping: 2.9.14
caniuse-lite: 1.0.30001764
electron-to-chromium: 1.5.267
node-releases: 2.0.27
update-browserslist-db: 1.2.3(browserslist@4.28.1)
buffer@5.7.1: buffer@5.7.1:
dependencies: dependencies:
base64-js: 1.5.1 base64-js: 1.5.1
@@ -1054,33 +870,21 @@ snapshots:
es-errors: 1.3.0 es-errors: 1.3.0
function-bind: 1.1.2 function-bind: 1.1.2
callsites@3.1.0: {}
camoufox-js@0.8.5(playwright-core@1.57.0):
dependencies:
adm-zip: 0.5.16
better-sqlite3: 12.6.0
commander: 14.0.2
fingerprint-generator: 2.1.79
glob: 13.0.0
impit: 0.7.6
language-tags: 2.1.0
maxmind: 5.0.3
playwright-core: 1.57.0
progress: 2.0.3
ua-parser-js: 2.0.7
xml2js: 0.6.2
caniuse-lite@1.0.30001764: {}
chownr@1.1.4: {} chownr@1.1.4: {}
chownr@3.0.0: {}
cloakbrowser@0.3.22(mmdb-lib@3.0.1)(playwright-core@1.59.1):
dependencies:
tar: 7.5.13
optionalDependencies:
mmdb-lib: 3.0.1
playwright-core: 1.59.1
combined-stream@1.0.8: combined-stream@1.0.8:
dependencies: dependencies:
delayed-stream: 1.0.0 delayed-stream: 1.0.0
commander@14.0.2: {}
cssstyle@4.6.0: cssstyle@4.6.0:
dependencies: dependencies:
'@asamuzakjp/css-color': 3.2.0 '@asamuzakjp/css-color': 3.2.0
@@ -1105,22 +909,14 @@ snapshots:
delayed-stream@1.0.0: {} delayed-stream@1.0.0: {}
detect-europe-js@0.1.2: {}
detect-libc@2.1.2: {} detect-libc@2.1.2: {}
dot-prop@6.0.1:
dependencies:
is-obj: 2.0.0
dunder-proto@1.0.1: dunder-proto@1.0.1:
dependencies: dependencies:
call-bind-apply-helpers: 1.0.2 call-bind-apply-helpers: 1.0.2
es-errors: 1.3.0 es-errors: 1.3.0
gopd: 1.2.0 gopd: 1.2.0
electron-to-chromium@1.5.267: {}
end-of-stream@1.4.5: end-of-stream@1.4.5:
dependencies: dependencies:
once: 1.4.0 once: 1.4.0
@@ -1171,18 +967,10 @@ snapshots:
'@esbuild/win32-ia32': 0.27.0 '@esbuild/win32-ia32': 0.27.0
'@esbuild/win32-x64': 0.27.0 '@esbuild/win32-x64': 0.27.0
escalade@3.2.0: {}
expand-template@2.0.3: {} expand-template@2.0.3: {}
file-uri-to-path@1.0.0: {} file-uri-to-path@1.0.0: {}
fingerprint-generator@2.1.79:
dependencies:
generative-bayesian-network: 2.1.79
header-generator: 2.1.79
tslib: 2.8.1
form-data@4.0.5: form-data@4.0.5:
dependencies: dependencies:
asynckit: 0.4.0 asynckit: 0.4.0
@@ -1198,11 +986,6 @@ snapshots:
function-bind@1.1.2: {} function-bind@1.1.2: {}
generative-bayesian-network@2.1.79:
dependencies:
adm-zip: 0.5.16
tslib: 2.8.1
get-intrinsic@1.3.0: get-intrinsic@1.3.0:
dependencies: dependencies:
call-bind-apply-helpers: 1.0.2 call-bind-apply-helpers: 1.0.2
@@ -1227,12 +1010,6 @@ snapshots:
github-from-package@0.0.0: {} github-from-package@0.0.0: {}
glob@13.0.0:
dependencies:
minimatch: 10.1.1
minipass: 7.1.2
path-scurry: 2.0.1
gopd@1.2.0: {} gopd@1.2.0: {}
has-symbols@1.1.0: {} has-symbols@1.1.0: {}
@@ -1245,13 +1022,6 @@ snapshots:
dependencies: dependencies:
function-bind: 1.1.2 function-bind: 1.1.2
header-generator@2.1.79:
dependencies:
browserslist: 4.28.1
generative-bayesian-network: 2.1.79
ow: 0.28.2
tslib: 2.8.1
html-encoding-sniffer@4.0.0: html-encoding-sniffer@4.0.0:
dependencies: dependencies:
whatwg-encoding: 3.1.1 whatwg-encoding: 3.1.1
@@ -1276,51 +1046,12 @@ snapshots:
ieee754@1.2.1: {} ieee754@1.2.1: {}
impit-darwin-arm64@0.7.6:
optional: true
impit-darwin-x64@0.7.6:
optional: true
impit-linux-arm64-gnu@0.7.6:
optional: true
impit-linux-arm64-musl@0.7.6:
optional: true
impit-linux-x64-gnu@0.7.6:
optional: true
impit-linux-x64-musl@0.7.6:
optional: true
impit-win32-arm64-msvc@0.7.6:
optional: true
impit-win32-x64-msvc@0.7.6:
optional: true
impit@0.7.6:
optionalDependencies:
impit-darwin-arm64: 0.7.6
impit-darwin-x64: 0.7.6
impit-linux-arm64-gnu: 0.7.6
impit-linux-arm64-musl: 0.7.6
impit-linux-x64-gnu: 0.7.6
impit-linux-x64-musl: 0.7.6
impit-win32-arm64-msvc: 0.7.6
impit-win32-x64-msvc: 0.7.6
inherits@2.0.4: {} inherits@2.0.4: {}
ini@1.3.8: {} ini@1.3.8: {}
is-obj@2.0.0: {}
is-potential-custom-element-name@1.0.1: {} is-potential-custom-element-name@1.0.1: {}
is-standalone-pwa@0.1.1: {}
jsdom@24.1.3: jsdom@24.1.3:
dependencies: dependencies:
cssstyle: 4.6.0 cssstyle: 4.6.0
@@ -1349,25 +1080,10 @@ snapshots:
- supports-color - supports-color
- utf-8-validate - utf-8-validate
language-subtag-registry@0.3.23: {}
language-tags@2.1.0:
dependencies:
language-subtag-registry: 0.3.23
lodash.isequal@4.5.0: {}
lru-cache@10.4.3: {} lru-cache@10.4.3: {}
lru-cache@11.2.4: {}
math-intrinsics@1.1.0: {} math-intrinsics@1.1.0: {}
maxmind@5.0.3:
dependencies:
mmdb-lib: 3.0.1
tiny-lru: 11.4.5
mime-db@1.52.0: {} mime-db@1.52.0: {}
mime-types@2.1.35: mime-types@2.1.35:
@@ -1376,17 +1092,18 @@ snapshots:
mimic-response@3.1.0: {} mimic-response@3.1.0: {}
minimatch@10.1.1:
dependencies:
'@isaacs/brace-expansion': 5.0.0
minimist@1.2.8: {} minimist@1.2.8: {}
minipass@7.1.2: {} minipass@7.1.2: {}
minizlib@3.1.0:
dependencies:
minipass: 7.1.2
mkdirp-classic@0.5.3: {} mkdirp-classic@0.5.3: {}
mmdb-lib@3.0.1: {} mmdb-lib@3.0.1:
optional: true
ms@2.1.3: {} ms@2.1.3: {}
@@ -1396,34 +1113,17 @@ snapshots:
dependencies: dependencies:
semver: 7.7.3 semver: 7.7.3
node-releases@2.0.27: {}
nwsapi@2.2.23: {} nwsapi@2.2.23: {}
once@1.4.0: once@1.4.0:
dependencies: dependencies:
wrappy: 1.0.2 wrappy: 1.0.2
ow@0.28.2:
dependencies:
'@sindresorhus/is': 4.6.0
callsites: 3.1.0
dot-prop: 6.0.1
lodash.isequal: 4.5.0
vali-date: 1.0.0
parse5@7.3.0: parse5@7.3.0:
dependencies: dependencies:
entities: 6.0.1 entities: 6.0.1
path-scurry@2.0.1: playwright-core@1.59.1: {}
dependencies:
lru-cache: 11.2.4
minipass: 7.1.2
picocolors@1.1.1: {}
playwright-core@1.57.0: {}
prebuild-install@7.1.3: prebuild-install@7.1.3:
dependencies: dependencies:
@@ -1440,8 +1140,6 @@ snapshots:
tar-fs: 2.1.4 tar-fs: 2.1.4
tunnel-agent: 0.6.0 tunnel-agent: 0.6.0
progress@2.0.3: {}
psl@1.15.0: psl@1.15.0:
dependencies: dependencies:
punycode: 2.3.1 punycode: 2.3.1
@@ -1480,8 +1178,6 @@ snapshots:
safer-buffer@2.1.2: {} safer-buffer@2.1.2: {}
sax@1.4.4: {}
saxes@6.0.0: saxes@6.0.0:
dependencies: dependencies:
xmlchars: 2.2.0 xmlchars: 2.2.0
@@ -1519,7 +1215,13 @@ snapshots:
inherits: 2.0.4 inherits: 2.0.4
readable-stream: 3.6.2 readable-stream: 3.6.2
tiny-lru@11.4.5: {} tar@7.5.13:
dependencies:
'@isaacs/fs-minipass': 4.0.1
chownr: 3.0.0
minipass: 7.1.2
minizlib: 3.1.0
yallist: 5.0.0
tough-cookie@4.1.4: tough-cookie@4.1.4:
dependencies: dependencies:
@@ -1532,8 +1234,6 @@ snapshots:
dependencies: dependencies:
punycode: 2.3.1 punycode: 2.3.1
tslib@2.8.1: {}
tsx@4.21.0: tsx@4.21.0:
dependencies: dependencies:
esbuild: 0.27.0 esbuild: 0.27.0
@@ -1553,24 +1253,10 @@ snapshots:
typescript@5.9.3: {} typescript@5.9.3: {}
ua-is-frozen@0.1.2: {}
ua-parser-js@2.0.7:
dependencies:
detect-europe-js: 0.1.2
is-standalone-pwa: 0.1.1
ua-is-frozen: 0.1.2
undici-types@7.16.0: {} undici-types@7.16.0: {}
universalify@0.2.0: {} universalify@0.2.0: {}
update-browserslist-db@1.2.3(browserslist@4.28.1):
dependencies:
browserslist: 4.28.1
escalade: 3.2.0
picocolors: 1.1.1
url-parse@1.5.10: url-parse@1.5.10:
dependencies: dependencies:
querystringify: 2.2.0 querystringify: 2.2.0
@@ -1578,8 +1264,6 @@ snapshots:
util-deprecate@1.0.2: {} util-deprecate@1.0.2: {}
vali-date@1.0.0: {}
w3c-xmlserializer@5.0.0: w3c-xmlserializer@5.0.0:
dependencies: dependencies:
xml-name-validator: 5.0.0 xml-name-validator: 5.0.0
@@ -1603,11 +1287,6 @@ snapshots:
xml-name-validator@5.0.0: {} xml-name-validator@5.0.0: {}
xml2js@0.6.2:
dependencies:
sax: 1.4.4
xmlbuilder: 11.0.1
xmlbuilder@11.0.1: {}
xmlchars@2.2.0: {} xmlchars@2.2.0: {}
yallist@5.0.0: {}

View File

@@ -1,12 +1,9 @@
import { writeFileSync } from 'fs'; #!/usr/bin/env npx tsx
import { mkdirSync, writeFileSync } from 'fs';
import { dirname, resolve } from 'path';
import { getPage } from './browse.js'; import { getPage } from './browse.js';
const baseUrl = 'http://localhost:3000';
const username = 'analyst@fhb.local';
const password = process.env.CAMOUFOX_PASSWORD ?? '';
const reportPath = '/Users/stefano.fiorini/Documents/projects/fhb-loan-spreading-pilot-a/docs/plans/2026-01-24-financials-analysis-redesign/web-automation-scan.md';
type NavResult = { type NavResult = {
requestedUrl: string; requestedUrl: string;
url: string; url: string;
@@ -15,198 +12,163 @@ type NavResult = {
error?: string; error?: string;
}; };
type RouteCheck = {
route: string;
result: NavResult;
heading: string | null;
};
const DEFAULT_BASE_URL = 'http://localhost:3000';
const DEFAULT_REPORT_PATH = resolve(process.cwd(), 'scan-local-app.md');
function env(name: string): string | undefined {
const value = process.env[name]?.trim();
return value ? value : undefined;
}
function getRoutes(baseUrl: string): string[] {
const routeList = env('SCAN_ROUTES');
if (routeList) {
return routeList
.split(',')
.map((route) => route.trim())
.filter(Boolean)
.map((route) => new URL(route, baseUrl).toString());
}
return [baseUrl];
}
async function gotoWithStatus(page: any, url: string): Promise<NavResult> { async function gotoWithStatus(page: any, url: string): Promise<NavResult> {
const resp = await page.goto(url, { waitUntil: 'domcontentloaded', timeout: 60000 }).catch((e: unknown) => ({ error: e })); const response = await page
if (resp?.error) { .goto(url, { waitUntil: 'domcontentloaded', timeout: 60000 })
.catch((error: unknown) => ({ error }));
if (response?.error) {
return { return {
requestedUrl: url, requestedUrl: url,
url: page.url(), url: page.url(),
status: null, status: null,
title: await page.title().catch(() => ''), title: await page.title().catch(() => ''),
error: String(resp.error), error: String(response.error),
}; };
} }
return { return {
requestedUrl: url, requestedUrl: url,
url: page.url(), url: page.url(),
status: resp ? resp.status() : null, status: response ? response.status() : null,
title: await page.title().catch(() => ''), title: await page.title().catch(() => ''),
}; };
} }
async function textOrNull(page: any, selector: string): Promise<string | null> { async function textOrNull(page: any, selector: string): Promise<string | null> {
const loc = page.locator(selector).first(); const locator = page.locator(selector).first();
try { try {
if ((await loc.count()) === 0) return null; if ((await locator.count()) === 0) return null;
const txt = await loc.textContent(); const value = await locator.textContent();
return txt ? txt.trim().replace(/\s+/g, ' ') : null; return value ? value.trim().replace(/\s+/g, ' ') : null;
} catch { } catch {
return null; return null;
} }
} }
async function loginIfConfigured(page: any, baseUrl: string, lines: string[]) {
const loginPath = env('SCAN_LOGIN_PATH');
const username = env('SCAN_USERNAME') ?? env('CLOAKBROWSER_USERNAME');
const password = env('SCAN_PASSWORD') ?? env('CLOAKBROWSER_PASSWORD');
const usernameSelector = env('SCAN_USERNAME_SELECTOR') ?? 'input[type="email"], input[name="email"]';
const passwordSelector = env('SCAN_PASSWORD_SELECTOR') ?? 'input[type="password"], input[name="password"]';
const submitSelector = env('SCAN_SUBMIT_SELECTOR') ?? 'button[type="submit"], input[type="submit"]';
if (!loginPath) {
lines.push('## Login');
lines.push('- Skipped: set `SCAN_LOGIN_PATH` to enable login smoke checks.');
lines.push('');
return;
}
const loginUrl = new URL(loginPath, baseUrl).toString();
lines.push('## Login');
lines.push(`- Login URL: ${loginUrl}`);
await gotoWithStatus(page, loginUrl);
if (!username || !password) {
lines.push('- Skipped: set `SCAN_USERNAME`/`SCAN_PASSWORD` or `CLOAKBROWSER_USERNAME`/`CLOAKBROWSER_PASSWORD`.');
lines.push('');
return;
}
await page.locator(usernameSelector).first().fill(username);
await page.locator(passwordSelector).first().fill(password);
await page.locator(submitSelector).first().click();
await page.waitForTimeout(2500);
lines.push(`- After submit URL: ${page.url()}`);
lines.push(`- Cookie count: ${(await page.context().cookies()).length}`);
lines.push('');
}
async function checkRoutes(page: any, baseUrl: string, lines: string[]) {
const routes = getRoutes(baseUrl);
const routeChecks: RouteCheck[] = [];
for (const url of routes) {
const result = await gotoWithStatus(page, url);
const heading = await textOrNull(page, 'h1');
routeChecks.push({
route: url,
result,
heading,
});
}
lines.push('## Route Checks');
for (const check of routeChecks) {
const relativeUrl = check.route.startsWith(baseUrl) ? check.route.slice(baseUrl.length) || '/' : check.route;
const finalPath = check.result.url.startsWith(baseUrl)
? check.result.url.slice(baseUrl.length) || '/'
: check.result.url;
const suffix = check.heading ? `, h1="${check.heading}"` : '';
const errorSuffix = check.result.error ? `, error="${check.result.error}"` : '';
lines.push(
`- ${relativeUrl} → status ${check.result.status ?? 'ERR'} (final ${finalPath})${suffix}${errorSuffix}`
);
}
lines.push('');
}
async function main() { async function main() {
const { page, browser } = await getPage({ headless: true }); const baseUrl = env('SCAN_BASE_URL') ?? DEFAULT_BASE_URL;
const reportPath = resolve(env('SCAN_REPORT_PATH') ?? DEFAULT_REPORT_PATH);
const headless = (env('SCAN_HEADLESS') ?? env('CLOAKBROWSER_HEADLESS') ?? 'true') === 'true';
const { page, browser } = await getPage({ headless });
const lines: string[] = []; const lines: string[] = [];
lines.push('# Web Automation Scan (local)'); lines.push('# Web Automation Scan (local)');
lines.push(''); lines.push('');
lines.push(`- Base URL: ${baseUrl}`); lines.push(`- Base URL: ${baseUrl}`);
lines.push(`- Timestamp: ${new Date().toISOString()}`); lines.push(`- Timestamp: ${new Date().toISOString()}`);
lines.push(`- Headless: ${headless}`);
lines.push(`- Report Path: ${reportPath}`);
lines.push(''); lines.push('');
try { try {
lines.push('## Login'); await loginIfConfigured(page, baseUrl, lines);
await gotoWithStatus(page, `${baseUrl}/login`); await checkRoutes(page, baseUrl, lines);
await page.locator('input[name="email"]').fill(username);
await page.locator('input[name="password"]').fill(password);
await page.locator('button[type="submit"]').click();
await page.waitForTimeout(2500);
const cookies = await page.context().cookies();
const sessionCookie = cookies.find((c: any) => c.name === 'fhb_session');
lines.push(`- After submit URL: ${page.url()}`);
lines.push(`- Has session cookie (fhb_session): ${Boolean(sessionCookie)}`);
lines.push('');
lines.push('## Demo Case');
const casesNav = await gotoWithStatus(page, `${baseUrl}/cases`);
lines.push(`- GET /cases → status ${casesNav.status ?? 'ERR'}, final ${casesNav.url}`);
const envCaseId = process.env.SCAN_CASE_ID?.trim() || null;
let selectedCaseId: string | null = envCaseId;
if (!selectedCaseId) {
const caseLinks = await page.$$eval('a[href^="/cases/"]', (as) =>
as
.map((a) => ({
href: (a as HTMLAnchorElement).getAttribute('href') || '',
text: (a.textContent || '').trim(),
}))
.filter((x) => x.href.includes('/cases/'))
);
const preferredTitles = ['Demo - Strong Borrower', 'Demo - Weak Borrower', 'Demo - Incomplete'];
for (const title of preferredTitles) {
const match = caseLinks.find((l) => l.text.includes(title) && l.href.includes('/cases/'));
const href = match?.href ?? '';
const m = href.match(/\/cases\/([0-9a-f-]{36})/i);
if (m) {
selectedCaseId = m[1];
break;
}
}
if (!selectedCaseId) {
const firstHref =
caseLinks.map((l) => l.href).find((h) => /\/cases\/[0-9a-f-]{36}/i.test(h)) ?? null;
const m = firstHref?.match(/\/cases\/([0-9a-f-]{36})/i) ?? null;
selectedCaseId = m?.[1] ?? null;
}
}
lines.push(`- Selected caseId: ${selectedCaseId ?? '(none found)'}`);
if (!selectedCaseId) {
lines.push('');
lines.push('⚠️ Could not find a demo case link on /cases.');
writeFileSync(reportPath, lines.join('\n') + '\n', 'utf-8');
return;
}
const caseBase = `${baseUrl}/cases/${selectedCaseId}/journey`;
lines.push('');
lines.push('## Route Checks');
const routesToCheck = [
`${caseBase}`,
`${caseBase}/financials`,
`${caseBase}/financials/income`,
`${caseBase}/analysis`,
`${caseBase}/analysis/configure`,
`${caseBase}/analysis/ai`,
`${caseBase}/analysis/ai/detail`,
`${caseBase}/spreads`,
];
for (const url of routesToCheck) {
const r = await gotoWithStatus(page, url);
const h1 = await textOrNull(page, 'h1');
const finalPath = r.url.startsWith(baseUrl) ? r.url.slice(baseUrl.length) : r.url;
lines.push(`- ${url.slice(baseUrl.length)} → status ${r.status ?? 'ERR'} (final ${finalPath})${h1 ? `, h1="${h1}"` : ''}`);
}
lines.push('');
lines.push('## Spreadsheet Analysis (UI)');
await gotoWithStatus(page, `${caseBase}/analysis/configure`);
const runButton = page.locator('button:has-text("Run Analysis")').first();
const disabled = await runButton.isDisabled().catch(() => true);
lines.push(`- Run button disabled: ${disabled}`);
if (!disabled) {
await runButton.click();
const resultsWait = page
.waitForURL('**/journey/analysis/results**', { timeout: 180000 })
.then(() => 'results' as const);
const errorWait = page
.locator('[role="alert"]')
.filter({ hasText: 'Error' })
.first()
.waitFor({ timeout: 180000 })
.then(() => 'error' as const);
const outcome = await Promise.race([resultsWait, errorWait]).catch(() => 'timeout' as const);
if (outcome === 'results') {
await page.waitForTimeout(1500);
lines.push(`- Results URL: ${page.url().replace(baseUrl, '')}`);
const downloadHref = await page
.locator('a[href*="/journey/analysis/download"]')
.first()
.getAttribute('href')
.catch(() => null);
if (downloadHref) {
const dlUrl = downloadHref.startsWith('http') ? downloadHref : `${baseUrl}${downloadHref}`;
const dlResp = await page.goto(dlUrl, { waitUntil: 'commit', timeout: 60000 }).catch(() => null);
lines.push(
`- Download route status: ${dlResp?.status() ?? 'ERR'} (Content-Type: ${dlResp?.headers()?.['content-type'] ?? 'n/a'})`
);
} else {
lines.push('- Download link not found on results page');
}
} else if (outcome === 'error') {
const errorText = await page
.locator('[role="alert"]')
.first()
.textContent()
.then((t: string | null) => (t ? t.trim().replace(/\\s+/g, ' ') : null))
.catch(() => null);
lines.push(`- Stayed on configure page; saw error callout: ${errorText ?? '(unable to read)'}`);
lines.push('- Skipping download check because analysis did not complete.');
} else {
lines.push('- Timed out waiting for results or error after clicking Run Analysis.');
}
} else {
lines.push('- Skipped running analysis because Run button was disabled.');
}
lines.push('');
lines.push('## Notes'); lines.push('## Notes');
lines.push('- This scan avoids scraping financial values; it records route availability and basic headings.'); lines.push('- This generic smoke helper records route availability and top-level headings for a local app.');
lines.push('- Configure login and route coverage with `SCAN_*` environment variables.');
writeFileSync(reportPath, lines.join('\n') + '\n', 'utf-8');
} finally { } finally {
await browser.close(); await browser.close();
} }
mkdirSync(dirname(reportPath), { recursive: true });
writeFileSync(reportPath, `${lines.join('\n')}\n`, 'utf-8');
console.log(`Report written to ${reportPath}`);
} }
main().catch((err) => { main().catch((error) => {
console.error(err); console.error(error);
process.exitCode = 1; process.exitCode = 1;
}); });

View File

@@ -1,28 +1,25 @@
import { Camoufox } from 'camoufox-js'; import { launchPersistentContext } from 'cloakbrowser';
import { homedir } from 'os'; import { homedir } from 'os';
import { join } from 'path'; import { join } from 'path';
import { mkdirSync, existsSync } from 'fs'; import { mkdirSync, existsSync } from 'fs';
async function test() { async function test() {
const profilePath = join(homedir(), '.camoufox-profile'); const profilePath = join(homedir(), '.cloakbrowser-profile');
if (!existsSync(profilePath)) { if (!existsSync(profilePath)) {
mkdirSync(profilePath, { recursive: true }); mkdirSync(profilePath, { recursive: true });
} }
console.log('Profile path:', profilePath); console.log('Profile path:', profilePath);
console.log('Launching with full options...'); console.log('Launching CloakBrowser with full options...');
const browser = await Camoufox({ const browser = await launchPersistentContext({
headless: true, headless: true,
user_data_dir: profilePath, userDataDir: profilePath,
// humanize: 1.5, // Test without this first humanize: true,
// geoip: true, // Test without this first
// enable_cache: true,
// block_webrtc: false,
}); });
console.log('Browser launched'); console.log('Browser launched');
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
console.log('Page created'); console.log('Page created');
await page.goto('https://github.com', { timeout: 30000 }); await page.goto('https://github.com', { timeout: 30000 });

View File

@@ -1,10 +1,11 @@
import { Camoufox } from 'camoufox-js'; import { launch } from 'cloakbrowser';
async function test() { async function test() {
console.log('Launching Camoufox with minimal config...'); console.log('Launching CloakBrowser with minimal config...');
const browser = await Camoufox({ const browser = await launch({
headless: true, headless: true,
humanize: true,
}); });
console.log('Browser launched'); console.log('Browser launched');

View File

@@ -1,24 +1,25 @@
import { Camoufox } from 'camoufox-js'; import { launchPersistentContext } from 'cloakbrowser';
import { homedir } from 'os'; import { homedir } from 'os';
import { join } from 'path'; import { join } from 'path';
import { mkdirSync, existsSync } from 'fs'; import { mkdirSync, existsSync } from 'fs';
async function test() { async function test() {
const profilePath = join(homedir(), '.camoufox-profile'); const profilePath = join(homedir(), '.cloakbrowser-profile');
if (!existsSync(profilePath)) { if (!existsSync(profilePath)) {
mkdirSync(profilePath, { recursive: true }); mkdirSync(profilePath, { recursive: true });
} }
console.log('Profile path:', profilePath); console.log('Profile path:', profilePath);
console.log('Launching with user_data_dir...'); console.log('Launching with persistent userDataDir...');
const browser = await Camoufox({ const browser = await launchPersistentContext({
headless: true, headless: true,
user_data_dir: profilePath, userDataDir: profilePath,
humanize: true,
}); });
console.log('Browser launched'); console.log('Browser launched');
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
console.log('Page created'); console.log('Page created');
await page.goto('https://example.com', { timeout: 30000 }); await page.goto('https://example.com', { timeout: 30000 });

View File

@@ -1,78 +0,0 @@
import { getPage } from './browse.js';
type Extracted = {
title: string;
url: string;
colorVars: Array<[string, string]>;
samples: Record<string, null | { background: string; color: string; border: string }>;
};
function isColorValue(value: string) {
return /#([0-9a-f]{3,4}|[0-9a-f]{6}|[0-9a-f]{8})\b/i.test(value) || /\brgb\(|\bhsl\(/i.test(value);
}
async function main() {
const url = process.argv[2] ?? 'https://www.firsthorizon.com';
const { page, browser } = await getPage({ headless: true });
try {
await page.goto(url, { waitUntil: 'domcontentloaded', timeout: 60000 });
await page.waitForTimeout(5000);
const data = await page.evaluate(`(() => {
const rootStyles = getComputedStyle(document.documentElement);
const vars = {};
for (let i = 0; i < rootStyles.length; i++) {
const prop = rootStyles[i];
if (prop && prop.startsWith('--')) {
vars[prop] = rootStyles.getPropertyValue(prop).trim();
}
}
const pick = (selector) => {
const el = document.querySelector(selector);
if (!el) return null;
const cs = getComputedStyle(el);
return {
background: cs.backgroundColor,
color: cs.color,
border: cs.borderColor,
};
};
return {
title: document.title,
url: location.href,
vars,
samples: {
body: pick('body'),
header: pick('header'),
nav: pick('nav'),
primaryButton: pick('button, [role="button"], a[role="button"], a.button, .button'),
link: pick('a'),
},
};
})()`);
const entries = Object.entries(data.vars) as Array<[string, string]>;
const colorVars = entries
.filter(([, v]) => v && isColorValue(v))
.sort((a, b) => a[0].localeCompare(b[0]));
const out: Extracted = {
title: data.title,
url: data.url,
colorVars,
samples: data.samples,
};
process.stdout.write(JSON.stringify(out, null, 2));
} finally {
await browser.close();
}
}
main().catch((error) => {
console.error(error);
process.exit(1);
});

View File

@@ -1,48 +1,99 @@
--- ---
name: web-automation name: web-automation
description: Browse and scrape web pages using Playwright with Camoufox anti-detection browser. Use when automating web workflows, extracting page content to markdown, handling authenticated sessions, or scraping websites with bot protection. description: Browse and scrape web pages using Playwright-compatible CloakBrowser. Use when automating web workflows, extracting rendered page content, handling authenticated sessions, or running multi-step browser flows.
--- ---
# Web Automation with Camoufox (OpenCode) # Web Automation with CloakBrowser (OpenCode)
Automated web browsing and scraping using Playwright with Camoufox anti-detection browser. Automated web browsing and scraping using Playwright-compatible CloakBrowser with two execution paths:
- one-shot extraction via `extract.js`
- broader stateful automation via `auth.ts`, `browse.ts`, `flow.ts`, `scan-local-app.ts`, and `scrape.ts`
## Requirements ## Requirements
- Node.js 20+ - Node.js 20+
- pnpm - pnpm
- Network access to download browser binaries - Network access to download the CloakBrowser binary on first use
## First-Time Setup ## First-Time Setup
```bash ```bash
cd ~/.opencode/skills/web-automation/scripts cd ~/.config/opencode/skills/web-automation/scripts
pnpm install pnpm install
npx camoufox-js fetch npx cloakbrowser install
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
```
## Updating CloakBrowser
```bash
cd ~/.config/opencode/skills/web-automation/scripts
pnpm up cloakbrowser playwright-core
npx cloakbrowser install
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
``` ```
## Prerequisite Check (MANDATORY) ## Prerequisite Check (MANDATORY)
Before running any automation, verify Playwright + Camoufox dependencies are installed and scripts are configured to use Camoufox. Before running automation, verify CloakBrowser and Playwright Core are installed and wired correctly.
```bash ```bash
cd ~/.opencode/skills/web-automation/scripts cd ~/.config/opencode/skills/web-automation/scripts
node -e "require.resolve('playwright-core/package.json');require.resolve('camoufox-js/package.json');console.log('OK: playwright-core + camoufox-js installed')" node check-install.js
node -e "const fs=require('fs');const t=fs.readFileSync('browse.ts','utf8');if(!/camoufox-js/.test(t)){throw new Error('browse.ts is not configured for Camoufox')}console.log('OK: Camoufox integration detected in browse.ts')"
``` ```
If any check fails, stop and return: If the check fails, stop and return:
"Missing dependency/config: web-automation requires `playwright-core` + `camoufox-js` and Camoufox-based scripts. Run setup in this skill, then retry." "Missing dependency/config: web-automation requires `cloakbrowser` and `playwright-core` with CloakBrowser-based scripts. Run setup in this skill, then retry."
If runtime fails with missing native bindings for `better-sqlite3` or `esbuild`, run:
```bash
cd ~/.config/opencode/skills/web-automation/scripts
pnpm approve-builds
pnpm rebuild better-sqlite3 esbuild
```
## When To Use Which Command
- Use `node extract.js "<URL>"` for a one-shot rendered fetch with JSON output.
- Use `npx tsx scrape.ts ...` when you need markdown extraction, Readability cleanup, or selector-based scraping.
- Use `npx tsx browse.ts ...`, `auth.ts`, or `flow.ts` when the task needs login handling, persistent sessions, clicks, typing, screenshots, or multi-step navigation.
- Use `npx tsx scan-local-app.ts` when you need a configurable local-app smoke pass driven by `SCAN_*` and `CLOAKBROWSER_*` environment variables.
## Quick Reference ## Quick Reference
- Install check: `node check-install.js`
- One-shot JSON extract: `node extract.js "https://example.com"`
- Browse page: `npx tsx browse.ts --url "https://example.com"` - Browse page: `npx tsx browse.ts --url "https://example.com"`
- Scrape markdown: `npx tsx scrape.ts --url "https://example.com" --mode main --output page.md` - Scrape markdown: `npx tsx scrape.ts --url "https://example.com" --mode main --output page.md`
- Authenticate: `npx tsx auth.ts --url "https://example.com/login"` - Authenticate: `npx tsx auth.ts --url "https://example.com/login"`
- Natural-language flow: `npx tsx flow.ts --instruction 'go to https://example.com then click on "Login" then type "user@example.com" in #email then press enter'`
- Local app smoke scan: `SCAN_BASE_URL=http://localhost:3000 SCAN_ROUTES=/,/dashboard npx tsx scan-local-app.ts`
## Local App Smoke Scan
`scan-local-app.ts` is intentionally generic. Configure it with environment variables instead of editing the file:
- `SCAN_BASE_URL`
- `SCAN_LOGIN_PATH`
- `SCAN_USERNAME`
- `SCAN_PASSWORD`
- `SCAN_USERNAME_SELECTOR`
- `SCAN_PASSWORD_SELECTOR`
- `SCAN_SUBMIT_SELECTOR`
- `SCAN_ROUTES`
- `SCAN_REPORT_PATH`
- `SCAN_HEADLESS`
If `SCAN_USERNAME` or `SCAN_PASSWORD` are omitted, the script falls back to `CLOAKBROWSER_USERNAME` and `CLOAKBROWSER_PASSWORD`.
## Notes ## Notes
- Sessions persist in Camoufox profile storage. - Sessions persist in CloakBrowser profile storage.
- Use `--wait` for dynamic pages. - Use `--wait` for dynamic pages.
- Use `--mode selector --selector "..."` for targeted extraction. - Use `--mode selector --selector "..."` for targeted extraction.
- `extract.js` keeps a bounded stealth/rendered fetch path without needing a long-lived automation session.

View File

@@ -41,8 +41,8 @@ function getCredentials(options?: {
username?: string; username?: string;
password?: string; password?: string;
}): { username: string; password: string } | null { }): { username: string; password: string } | null {
const username = options?.username || process.env.CAMOUFOX_USERNAME; const username = options?.username || process.env.CLOAKBROWSER_USERNAME;
const password = options?.password || process.env.CAMOUFOX_PASSWORD; const password = options?.password || process.env.CLOAKBROWSER_PASSWORD;
if (!username || !password) { if (!username || !password) {
return null; return null;
@@ -450,7 +450,7 @@ export async function navigateAuthenticated(
if (!credentials) { if (!credentials) {
throw new Error( throw new Error(
'Authentication required but no credentials provided. ' + 'Authentication required but no credentials provided. ' +
'Set CAMOUFOX_USERNAME and CAMOUFOX_PASSWORD environment variables.' 'Set CLOAKBROWSER_USERNAME and CLOAKBROWSER_PASSWORD environment variables.'
); );
} }
@@ -504,8 +504,8 @@ Usage:
Options: Options:
-u, --url <url> URL to authenticate (required) -u, --url <url> URL to authenticate (required)
-t, --type <type> Auth type: auto, form, or msal (default: auto) -t, --type <type> Auth type: auto, form, or msal (default: auto)
--username <user> Username/email (or set CAMOUFOX_USERNAME env var) --username <user> Username/email (or set CLOAKBROWSER_USERNAME env var)
--password <pass> Password (or set CAMOUFOX_PASSWORD env var) --password <pass> Password (or set CLOAKBROWSER_PASSWORD env var)
--headless <bool> Run in headless mode (default: false for auth) --headless <bool> Run in headless mode (default: false for auth)
-h, --help Show this help message -h, --help Show this help message
@@ -515,8 +515,8 @@ Auth Types:
msal Microsoft SSO (login.microsoftonline.com) msal Microsoft SSO (login.microsoftonline.com)
Environment Variables: Environment Variables:
CAMOUFOX_USERNAME Default username/email for authentication CLOAKBROWSER_USERNAME Default username/email for authentication
CAMOUFOX_PASSWORD Default password for authentication CLOAKBROWSER_PASSWORD Default password for authentication
Examples: Examples:
# Interactive login (no credentials, opens browser) # Interactive login (no credentials, opens browser)
@@ -527,11 +527,11 @@ Examples:
--username "user@example.com" --password "secret" --username "user@example.com" --password "secret"
# Microsoft SSO login # Microsoft SSO login
CAMOUFOX_USERNAME=user@company.com CAMOUFOX_PASSWORD=secret \\ CLOAKBROWSER_USERNAME=user@company.com CLOAKBROWSER_PASSWORD=secret \\
npx tsx auth.ts --url "https://internal.company.com" --type msal npx tsx auth.ts --url "https://internal.company.com" --type msal
Notes: Notes:
- Session is saved to ~/.camoufox-profile/ for persistence - Session is saved to ~/.cloakbrowser-profile/ for persistence
- After successful auth, subsequent browses will be authenticated - After successful auth, subsequent browses will be authenticated
- Use --headless false if you need to handle MFA manually - Use --headless false if you need to handle MFA manually
`); `);

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env npx tsx #!/usr/bin/env npx tsx
/** /**
* Browser launcher using Camoufox with persistent profile * Browser launcher using CloakBrowser with persistent profile
* *
* Usage: * Usage:
* npx tsx browse.ts --url "https://example.com" * npx tsx browse.ts --url "https://example.com"
@@ -9,14 +9,13 @@
* npx tsx browse.ts --url "https://example.com" --headless false --wait 5000 * npx tsx browse.ts --url "https://example.com" --headless false --wait 5000
*/ */
import { Camoufox } from 'camoufox-js'; import { launchPersistentContext } from 'cloakbrowser';
import { homedir } from 'os'; import { homedir } from 'os';
import { join } from 'path'; import { join } from 'path';
import { existsSync, mkdirSync } from 'fs'; import { existsSync, mkdirSync } from 'fs';
import parseArgs from 'minimist'; import parseArgs from 'minimist';
import type { Page, BrowserContext } from 'playwright-core'; import type { Page, BrowserContext } from 'playwright-core';
// Types
interface BrowseOptions { interface BrowseOptions {
url: string; url: string;
headless?: boolean; headless?: boolean;
@@ -33,55 +32,54 @@ interface BrowseResult {
screenshotPath?: string; screenshotPath?: string;
} }
// Get profile directory function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
const getProfilePath = (): string => { const getProfilePath = (): string => {
const customPath = process.env.CAMOUFOX_PROFILE_PATH; const customPath = process.env.CLOAKBROWSER_PROFILE_PATH;
if (customPath) return customPath; if (customPath) return customPath;
const profileDir = join(homedir(), '.camoufox-profile'); const profileDir = join(homedir(), '.cloakbrowser-profile');
if (!existsSync(profileDir)) { if (!existsSync(profileDir)) {
mkdirSync(profileDir, { recursive: true }); mkdirSync(profileDir, { recursive: true });
} }
return profileDir; return profileDir;
}; };
// Launch browser with persistent profile
export async function launchBrowser(options: { export async function launchBrowser(options: {
headless?: boolean; headless?: boolean;
}): Promise<BrowserContext> { }): Promise<BrowserContext> {
const profilePath = getProfilePath(); const profilePath = getProfilePath();
const headless = const envHeadless = process.env.CLOAKBROWSER_HEADLESS;
options.headless ?? const headless = options.headless ?? (envHeadless ? envHeadless === 'true' : true);
(process.env.CAMOUFOX_HEADLESS ? process.env.CAMOUFOX_HEADLESS === 'true' : true);
console.log(`Using profile: ${profilePath}`); console.log(`Using profile: ${profilePath}`);
console.log(`Headless mode: ${headless}`); console.log(`Headless mode: ${headless}`);
const browser = await Camoufox({ const context = await launchPersistentContext({
user_data_dir: profilePath, userDataDir: profilePath,
headless, headless,
humanize: true,
}); });
return browser; return context;
} }
// Browse to URL and optionally take screenshot
export async function browse(options: BrowseOptions): Promise<BrowseResult> { export async function browse(options: BrowseOptions): Promise<BrowseResult> {
const browser = await launchBrowser({ headless: options.headless }); const browser = await launchBrowser({ headless: options.headless });
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
try { try {
// Navigate to URL
console.log(`Navigating to: ${options.url}`); console.log(`Navigating to: ${options.url}`);
await page.goto(options.url, { await page.goto(options.url, {
timeout: options.timeout ?? 60000, timeout: options.timeout ?? 60000,
waitUntil: 'domcontentloaded', waitUntil: 'domcontentloaded',
}); });
// Wait if specified
if (options.wait) { if (options.wait) {
console.log(`Waiting ${options.wait}ms...`); console.log(`Waiting ${options.wait}ms...`);
await page.waitForTimeout(options.wait); await sleep(options.wait);
} }
const result: BrowseResult = { const result: BrowseResult = {
@@ -92,7 +90,6 @@ export async function browse(options: BrowseOptions): Promise<BrowseResult> {
console.log(`Page title: ${result.title}`); console.log(`Page title: ${result.title}`);
console.log(`Final URL: ${result.url}`); console.log(`Final URL: ${result.url}`);
// Take screenshot if requested
if (options.screenshot) { if (options.screenshot) {
const outputPath = options.output ?? 'screenshot.png'; const outputPath = options.output ?? 'screenshot.png';
await page.screenshot({ path: outputPath, fullPage: true }); await page.screenshot({ path: outputPath, fullPage: true });
@@ -100,11 +97,10 @@ export async function browse(options: BrowseOptions): Promise<BrowseResult> {
console.log(`Screenshot saved: ${outputPath}`); console.log(`Screenshot saved: ${outputPath}`);
} }
// If interactive mode, keep browser open
if (options.interactive) { if (options.interactive) {
console.log('\nInteractive mode - browser will stay open.'); console.log('\nInteractive mode - browser will stay open.');
console.log('Press Ctrl+C to close.'); console.log('Press Ctrl+C to close.');
await new Promise(() => {}); // Keep running await new Promise(() => {});
} }
return result; return result;
@@ -115,16 +111,14 @@ export async function browse(options: BrowseOptions): Promise<BrowseResult> {
} }
} }
// Export page for use in other scripts
export async function getPage(options?: { export async function getPage(options?: {
headless?: boolean; headless?: boolean;
}): Promise<{ page: Page; browser: BrowserContext }> { }): Promise<{ page: Page; browser: BrowserContext }> {
const browser = await launchBrowser({ headless: options?.headless }); const browser = await launchBrowser({ headless: options?.headless });
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
return { page, browser }; return { page, browser };
} }
// CLI entry point
async function main() { async function main() {
const args = parseArgs(process.argv.slice(2), { const args = parseArgs(process.argv.slice(2), {
string: ['url', 'output'], string: ['url', 'output'],
@@ -145,7 +139,7 @@ async function main() {
if (args.help || !args.url) { if (args.help || !args.url) {
console.log(` console.log(`
Web Browser with Camoufox Web Browser with CloakBrowser
Usage: Usage:
npx tsx browse.ts --url <url> [options] npx tsx browse.ts --url <url> [options]
@@ -166,8 +160,8 @@ Examples:
npx tsx browse.ts --url "https://example.com" --headless false --interactive npx tsx browse.ts --url "https://example.com" --headless false --interactive
Environment Variables: Environment Variables:
CAMOUFOX_PROFILE_PATH Custom profile directory (default: ~/.camoufox-profile/) CLOAKBROWSER_PROFILE_PATH Custom profile directory (default: ~/.cloakbrowser-profile/)
CAMOUFOX_HEADLESS Default headless mode (true/false) CLOAKBROWSER_HEADLESS Default headless mode (true/false)
`); `);
process.exit(args.help ? 0 : 1); process.exit(args.help ? 0 : 1);
} }
@@ -188,7 +182,6 @@ Environment Variables:
} }
} }
// Run if executed directly
const isMainModule = process.argv[1]?.includes('browse.ts'); const isMainModule = process.argv[1]?.includes('browse.ts');
if (isMainModule) { if (isMainModule) {
main(); main();

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env node
import fs from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
function fail(message, details) {
const payload = { error: message };
if (details) payload.details = details;
process.stderr.write(`${JSON.stringify(payload)}\n`);
process.exit(1);
}
async function main() {
try {
await import("cloakbrowser");
await import("playwright-core");
} catch (error) {
fail(
"Missing dependency/config: web-automation requires cloakbrowser and playwright-core.",
error instanceof Error ? error.message : String(error)
);
}
const browsePath = path.join(__dirname, "browse.ts");
const browseSource = fs.readFileSync(browsePath, "utf8");
if (!/launchPersistentContext/.test(browseSource) || !/from ['"]cloakbrowser['"]/.test(browseSource)) {
fail("browse.ts is not configured for CloakBrowser.");
}
process.stdout.write("OK: cloakbrowser + playwright-core installed\n");
process.stdout.write("OK: CloakBrowser integration detected in browse.ts\n");
}
main().catch((error) => {
fail("Install check failed.", error instanceof Error ? error.message : String(error));
});

View File

@@ -0,0 +1,188 @@
#!/usr/bin/env node
import fs from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
const DEFAULT_WAIT_MS = 5000;
const MAX_WAIT_MS = 20000;
const NAV_TIMEOUT_MS = 30000;
const EXTRA_CHALLENGE_WAIT_MS = 8000;
const CONTENT_LIMIT = 12000;
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
function fail(message, details) {
const payload = { error: message };
if (details) payload.details = details;
process.stderr.write(`${JSON.stringify(payload)}\n`);
process.exit(1);
}
function parseWaitTime(raw) {
const value = Number.parseInt(raw || `${DEFAULT_WAIT_MS}`, 10);
if (!Number.isFinite(value) || value < 0) return DEFAULT_WAIT_MS;
return Math.min(value, MAX_WAIT_MS);
}
function parseTarget(rawUrl) {
if (!rawUrl) {
fail("Missing URL. Usage: node extract.js <URL>");
}
let parsed;
try {
parsed = new URL(rawUrl);
} catch (error) {
fail("Invalid URL.", error.message);
}
if (!["http:", "https:"].includes(parsed.protocol)) {
fail("Only http and https URLs are allowed.");
}
return parsed.toString();
}
function ensureParentDir(filePath) {
if (!filePath) return;
fs.mkdirSync(path.dirname(filePath), { recursive: true });
}
function sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function detectChallenge(page) {
try {
return await page.evaluate(() => {
const text = (document.body?.innerText || "").toLowerCase();
return (
text.includes("checking your browser") ||
text.includes("just a moment") ||
text.includes("verify you are human") ||
text.includes("press and hold") ||
document.querySelector('iframe[src*="challenge"]') !== null ||
document.querySelector('iframe[src*="cloudflare"]') !== null
);
});
} catch {
return false;
}
}
async function loadCloakBrowser() {
try {
return await import("cloakbrowser");
} catch (error) {
fail(
"CloakBrowser is not installed for this skill. Run pnpm install in this skill's scripts directory first.",
error.message
);
}
}
async function runWithStderrLogs(fn) {
const originalLog = console.log;
const originalError = console.error;
console.log = (...args) => process.stderr.write(`${args.join(" ")}\n`);
console.error = (...args) => process.stderr.write(`${args.join(" ")}\n`);
try {
return await fn();
} finally {
console.log = originalLog;
console.error = originalError;
}
}
async function main() {
const requestedUrl = parseTarget(process.argv[2]);
const waitTime = parseWaitTime(process.env.WAIT_TIME);
const screenshotPath = process.env.SCREENSHOT_PATH || "";
const saveHtml = process.env.SAVE_HTML === "true";
const headless = process.env.HEADLESS !== "false";
const userAgent = process.env.USER_AGENT || undefined;
const startedAt = Date.now();
const { ensureBinary, launchContext } = await loadCloakBrowser();
let context;
try {
await runWithStderrLogs(() => ensureBinary());
context = await runWithStderrLogs(() => launchContext({
headless,
userAgent,
locale: "en-US",
viewport: { width: 1440, height: 900 },
humanize: true,
}));
const page = await context.newPage();
const response = await page.goto(requestedUrl, {
waitUntil: "domcontentloaded",
timeout: NAV_TIMEOUT_MS
});
await sleep(waitTime);
let challengeDetected = await detectChallenge(page);
if (challengeDetected) {
await sleep(EXTRA_CHALLENGE_WAIT_MS);
challengeDetected = await detectChallenge(page);
}
const extracted = await page.evaluate((contentLimit) => {
const bodyText = document.body?.innerText || "";
return {
finalUrl: window.location.href,
title: document.title || "",
content: bodyText.slice(0, contentLimit),
metaDescription:
document.querySelector('meta[name="description"]')?.content ||
document.querySelector('meta[property="og:description"]')?.content ||
""
};
}, CONTENT_LIMIT);
const result = {
requestedUrl,
finalUrl: extracted.finalUrl,
title: extracted.title,
content: extracted.content,
metaDescription: extracted.metaDescription,
status: response ? response.status() : null,
challengeDetected,
elapsedSeconds: ((Date.now() - startedAt) / 1000).toFixed(2)
};
if (screenshotPath) {
ensureParentDir(screenshotPath);
await page.screenshot({ path: screenshotPath, fullPage: false, timeout: 10000 });
result.screenshot = screenshotPath;
}
if (saveHtml) {
const htmlTarget = screenshotPath
? screenshotPath.replace(/\.[^.]+$/, ".html")
: path.resolve(__dirname, `page-${Date.now()}.html`);
ensureParentDir(htmlTarget);
fs.writeFileSync(htmlTarget, await page.content());
result.htmlFile = htmlTarget;
}
process.stdout.write(`${JSON.stringify(result, null, 2)}\n`);
await context.close();
} catch (error) {
if (context) {
try {
await context.close();
} catch {
// Ignore close errors after the primary failure.
}
}
fail("Scrape failed.", error.message);
}
}
main();

View File

@@ -0,0 +1,329 @@
#!/usr/bin/env npx tsx
import parseArgs from 'minimist';
import type { Page } from 'playwright-core';
import { launchBrowser } from './browse';
type Step =
| { action: 'goto'; url: string }
| { action: 'click'; selector?: string; text?: string; role?: string; name?: string }
| { action: 'type'; selector?: string; text: string }
| { action: 'press'; key: string; selector?: string }
| { action: 'wait'; ms: number }
| { action: 'screenshot'; path: string }
| { action: 'extract'; selector: string; count?: number };
function normalizeNavigationUrl(rawUrl: string): string {
let parsed: URL;
try {
parsed = new URL(rawUrl);
} catch {
throw new Error(`Invalid navigation URL: ${rawUrl}`);
}
if (!['http:', 'https:'].includes(parsed.protocol)) {
throw new Error(`Only http and https URLs are allowed in flow steps: ${rawUrl}`);
}
return parsed.toString();
}
function normalizeKey(k: string): string {
if (!k) return 'Enter';
const lower = k.toLowerCase();
if (lower === 'enter' || lower === 'return') return 'Enter';
if (lower === 'tab') return 'Tab';
if (lower === 'escape' || lower === 'esc') return 'Escape';
return k;
}
function splitInstructions(instruction: string): string[] {
return instruction
.split(/\bthen\b|;/gi)
.map((s) => s.trim())
.filter(Boolean);
}
function parseInstruction(instruction: string): Step[] {
const parts = splitInstructions(instruction);
const steps: Step[] = [];
for (const p of parts) {
// go to https://...
const goto = p.match(/^(?:go to|open|navigate to)\s+(https?:\/\/\S+)/i);
if (goto) {
steps.push({ action: 'goto', url: normalizeNavigationUrl(goto[1]) });
continue;
}
// click on "text" or click #selector or click button "name"
const clickRole = p.match(/^click\s+(button|link|textbox|img|image|tab)\s+"([^"]+)"$/i);
if (clickRole) {
const role = clickRole[1].toLowerCase() === 'image' ? 'img' : clickRole[1].toLowerCase();
steps.push({ action: 'click', role, name: clickRole[2] });
continue;
}
const clickText = p.match(/^click(?: on)?\s+"([^"]+)"/i);
if (clickText) {
steps.push({ action: 'click', text: clickText[1] });
continue;
}
const clickSelector = p.match(/^click(?: on)?\s+(#[\w-]+|\.[\w-]+|[a-z]+\[[^\]]+\])/i);
if (clickSelector) {
steps.push({ action: 'click', selector: clickSelector[1] });
continue;
}
// type "text" [in selector]
const typeInto = p.match(/^type\s+"([^"]+)"\s+in\s+(.+)$/i);
if (typeInto) {
steps.push({ action: 'type', text: typeInto[1], selector: typeInto[2].trim() });
continue;
}
const typeOnly = p.match(/^type\s+"([^"]+)"$/i);
if (typeOnly) {
steps.push({ action: 'type', text: typeOnly[1] });
continue;
}
// press enter [in selector]
const pressIn = p.match(/^press\s+(\w+)\s+in\s+(.+)$/i);
if (pressIn) {
steps.push({ action: 'press', key: normalizeKey(pressIn[1]), selector: pressIn[2].trim() });
continue;
}
const pressOnly = p.match(/^press\s+(\w+)$/i);
if (pressOnly) {
steps.push({ action: 'press', key: normalizeKey(pressOnly[1]) });
continue;
}
// wait 2s / wait 500ms
const waitS = p.match(/^wait\s+(\d+)\s*s(?:ec(?:onds?)?)?$/i);
if (waitS) {
steps.push({ action: 'wait', ms: parseInt(waitS[1], 10) * 1000 });
continue;
}
const waitMs = p.match(/^wait\s+(\d+)\s*ms$/i);
if (waitMs) {
steps.push({ action: 'wait', ms: parseInt(waitMs[1], 10) });
continue;
}
// screenshot path
const shot = p.match(/^screenshot(?: to)?\s+(.+)$/i);
if (shot) {
steps.push({ action: 'screenshot', path: shot[1].trim() });
continue;
}
throw new Error(`Could not parse step: "${p}"`);
}
return steps;
}
function validateSteps(steps: Step[]): Step[] {
return steps.map((step) =>
step.action === 'goto'
? {
...step,
url: normalizeNavigationUrl(step.url),
}
: step
);
}
function escapeRegExp(value: string): string {
return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}
function isLikelyLoginText(text: string): boolean {
return /(login|accedi|sign\s*in|entra)/i.test(text);
}
async function clickByText(page: Page, text: string): Promise<boolean> {
const patterns = [new RegExp(`^${escapeRegExp(text)}$`, 'i'), new RegExp(escapeRegExp(text), 'i')];
for (const pattern of patterns) {
const targets = [
page.getByRole('button', { name: pattern }).first(),
page.getByRole('link', { name: pattern }).first(),
page.getByText(pattern).first(),
];
for (const target of targets) {
if (await target.count()) {
try {
await target.click({ timeout: 8000 });
return true;
} catch {
// keep trying next candidate
}
}
}
}
return false;
}
async function fallbackLoginNavigation(page: Page, requestedText: string): Promise<boolean> {
if (!isLikelyLoginText(requestedText)) return false;
const current = new URL(page.url());
const candidateLinks = await page.evaluate(() => {
const loginTerms = ['login', 'accedi', 'sign in', 'entra'];
const anchors = Array.from(document.querySelectorAll('a[href], a[onclick], button[onclick]')) as Array<HTMLAnchorElement | HTMLButtonElement>;
return anchors
.map((el) => {
const text = (el.textContent || '').trim().toLowerCase();
const href = (el as HTMLAnchorElement).getAttribute('href') || '';
return { text, href };
})
.filter((x) => x.text && loginTerms.some((t) => x.text.includes(t)))
.map((x) => x.href)
.filter(Boolean);
});
// Prefer real URLs (not javascript:)
const realCandidate = candidateLinks.find((h) => /login|account\/login/i.test(h) && !h.startsWith('javascript:'));
if (realCandidate) {
const target = new URL(realCandidate, page.url()).toString();
await page.goto(target, { waitUntil: 'domcontentloaded', timeout: 60000 });
return true;
}
// Site-specific fallback for Corriere
if (/corriere\.it$/i.test(current.hostname) || /\.corriere\.it$/i.test(current.hostname)) {
await page.goto('https://www.corriere.it/account/login', {
waitUntil: 'domcontentloaded',
timeout: 60000,
});
return true;
}
return false;
}
async function typeInBestTarget(page: Page, text: string, selector?: string) {
if (selector) {
await page.locator(selector).first().click({ timeout: 10000 });
await page.locator(selector).first().fill(text);
return;
}
const loc = page.locator('input[name="q"], input[type="search"], input[type="text"], textarea').first();
await loc.click({ timeout: 10000 });
await loc.fill(text);
}
async function pressOnTarget(page: Page, key: string, selector?: string) {
if (selector) {
await page.locator(selector).first().press(key);
return;
}
await page.keyboard.press(key);
}
async function runSteps(page: Page, steps: Step[]) {
for (const step of steps) {
switch (step.action) {
case 'goto':
await page.goto(normalizeNavigationUrl(step.url), {
waitUntil: 'domcontentloaded',
timeout: 60000,
});
break;
case 'click':
if (step.selector) {
await page.locator(step.selector).first().click({ timeout: 15000 });
} else if (step.role && step.name) {
await page.getByRole(step.role as any, { name: new RegExp(escapeRegExp(step.name), 'i') }).first().click({ timeout: 15000 });
} else if (step.text) {
const clicked = await clickByText(page, step.text);
if (!clicked) {
const recovered = await fallbackLoginNavigation(page, step.text);
if (!recovered) {
throw new Error(`Could not click target text: ${step.text}`);
}
}
} else {
throw new Error('click step missing selector/text/role');
}
try {
await page.waitForLoadState('domcontentloaded', { timeout: 10000 });
} catch {
// no navigation is fine
}
break;
case 'type':
await typeInBestTarget(page, step.text, step.selector);
break;
case 'press':
await pressOnTarget(page, step.key, step.selector);
break;
case 'wait':
await page.waitForTimeout(step.ms);
break;
case 'screenshot':
await page.screenshot({ path: step.path, fullPage: true });
break;
case 'extract': {
const items = await page.locator(step.selector).allTextContents();
const out = items.slice(0, step.count ?? items.length).map((t) => t.trim()).filter(Boolean);
console.log(JSON.stringify(out, null, 2));
break;
}
default:
throw new Error('Unknown step');
}
}
}
async function main() {
const args = parseArgs(process.argv.slice(2), {
string: ['instruction', 'steps'],
boolean: ['headless', 'help'],
default: { headless: true },
alias: { i: 'instruction', s: 'steps', h: 'help' },
});
if (args.help || (!args.instruction && !args.steps)) {
console.log(`
General Web Flow Runner (CloakBrowser)
Usage:
npx tsx flow.ts --instruction "go to https://example.com then type \"hello\" then press enter"
npx tsx flow.ts --steps '[{"action":"goto","url":"https://example.com"}]'
Supported natural steps:
- go to/open/navigate to <url>
- click on "Text"
- click <css-selector>
- type "text"
- type "text" in <css-selector>
- press <key>
- press <key> in <css-selector>
- wait <N>s | wait <N>ms
- screenshot <path>
`);
process.exit(args.help ? 0 : 1);
}
const steps = validateSteps(args.steps ? JSON.parse(args.steps) : parseInstruction(args.instruction));
const browser = await launchBrowser({ headless: args.headless });
const page = await browser.newPage();
try {
await runSteps(page, steps);
console.log('Flow complete. Final URL:', page.url());
} finally {
await browser.close();
}
}
main().catch((e) => {
console.error('Error:', e instanceof Error ? e.message : e);
process.exit(1);
});

View File

@@ -1,19 +1,26 @@
{ {
"name": "web-automation-scripts", "name": "web-automation-scripts",
"version": "1.0.0", "version": "1.0.0",
"description": "Web browsing and scraping scripts using Camoufox", "description": "Web browsing and scraping scripts using CloakBrowser",
"type": "module", "type": "module",
"scripts": { "scripts": {
"check-install": "node check-install.js",
"extract": "node extract.js",
"browse": "tsx browse.ts", "browse": "tsx browse.ts",
"auth": "tsx auth.ts",
"flow": "tsx flow.ts",
"scrape": "tsx scrape.ts", "scrape": "tsx scrape.ts",
"fetch-browser": "npx camoufox-js fetch" "typecheck": "tsc --noEmit -p tsconfig.json",
"lint": "pnpm run typecheck && node --check check-install.js && node --check extract.js",
"fetch-browser": "npx cloakbrowser install"
}, },
"dependencies": { "dependencies": {
"@mozilla/readability": "^0.5.0", "@mozilla/readability": "^0.5.0",
"camoufox-js": "^0.8.5", "better-sqlite3": "^12.6.2",
"cloakbrowser": "^0.3.22",
"jsdom": "^24.0.0", "jsdom": "^24.0.0",
"minimist": "^1.2.8", "minimist": "^1.2.8",
"playwright-core": "^1.40.0", "playwright-core": "^1.59.1",
"turndown": "^7.1.2", "turndown": "^7.1.2",
"turndown-plugin-gfm": "^1.0.2" "turndown-plugin-gfm": "^1.0.2"
}, },

View File

@@ -11,9 +11,12 @@ importers:
'@mozilla/readability': '@mozilla/readability':
specifier: ^0.5.0 specifier: ^0.5.0
version: 0.5.0 version: 0.5.0
camoufox-js: better-sqlite3:
specifier: ^0.8.5 specifier: ^12.6.2
version: 0.8.5(playwright-core@1.57.0) version: 12.8.0
cloakbrowser:
specifier: ^0.3.22
version: 0.3.22(mmdb-lib@3.0.1)(playwright-core@1.59.1)
jsdom: jsdom:
specifier: ^24.0.0 specifier: ^24.0.0
version: 24.1.3 version: 24.1.3
@@ -21,8 +24,8 @@ importers:
specifier: ^1.2.8 specifier: ^1.2.8
version: 1.2.8 version: 1.2.8
playwright-core: playwright-core:
specifier: ^1.40.0 specifier: ^1.59.1
version: 1.57.0 version: 1.59.1
turndown: turndown:
specifier: ^7.1.2 specifier: ^7.1.2
version: 7.2.2 version: 7.2.2
@@ -238,13 +241,9 @@ packages:
cpu: [x64] cpu: [x64]
os: [win32] os: [win32]
'@isaacs/balanced-match@4.0.1': '@isaacs/fs-minipass@4.0.1':
resolution: {integrity: sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==} resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==}
engines: {node: 20 || >=22} engines: {node: '>=18.0.0'}
'@isaacs/brace-expansion@5.0.0':
resolution: {integrity: sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==}
engines: {node: 20 || >=22}
'@mixmark-io/domino@2.2.0': '@mixmark-io/domino@2.2.0':
resolution: {integrity: sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==} resolution: {integrity: sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==}
@@ -253,10 +252,6 @@ packages:
resolution: {integrity: sha512-Z+CZ3QaosfFaTqvhQsIktyGrjFjSC0Fa4EMph4mqKnWhmyoGICsV/8QK+8HpXut6zV7zwfWwqDmEjtk1Qf6EgQ==} resolution: {integrity: sha512-Z+CZ3QaosfFaTqvhQsIktyGrjFjSC0Fa4EMph4mqKnWhmyoGICsV/8QK+8HpXut6zV7zwfWwqDmEjtk1Qf6EgQ==}
engines: {node: '>=14.0.0'} engines: {node: '>=14.0.0'}
'@sindresorhus/is@4.6.0':
resolution: {integrity: sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==}
engines: {node: '>=10'}
'@types/jsdom@21.1.7': '@types/jsdom@21.1.7':
resolution: {integrity: sha512-yOriVnggzrnQ3a9OKOCxaVuSug3w3/SbOj5i7VwXWZEyUNl3bLF9V3MfxGbZKuwqJOQyRfqXyROBB1CoZLFWzA==} resolution: {integrity: sha512-yOriVnggzrnQ3a9OKOCxaVuSug3w3/SbOj5i7VwXWZEyUNl3bLF9V3MfxGbZKuwqJOQyRfqXyROBB1CoZLFWzA==}
@@ -272,10 +267,6 @@ packages:
'@types/turndown@5.0.6': '@types/turndown@5.0.6':
resolution: {integrity: sha512-ru00MoyeeouE5BX4gRL+6m/BsDfbRayOskWqUvh7CLGW+UXxHQItqALa38kKnOiZPqJrtzJUgAC2+F0rL1S4Pg==} resolution: {integrity: sha512-ru00MoyeeouE5BX4gRL+6m/BsDfbRayOskWqUvh7CLGW+UXxHQItqALa38kKnOiZPqJrtzJUgAC2+F0rL1S4Pg==}
adm-zip@0.5.16:
resolution: {integrity: sha512-TGw5yVi4saajsSEgz25grObGHEUaDrniwvA2qwSC060KfqGPdglhvPMA2lPIoxs3PQIItj2iag35fONcQqgUaQ==}
engines: {node: '>=12.0'}
agent-base@7.1.4: agent-base@7.1.4:
resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==}
engines: {node: '>= 14'} engines: {node: '>= 14'}
@@ -286,12 +277,8 @@ packages:
base64-js@1.5.1: base64-js@1.5.1:
resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
baseline-browser-mapping@2.9.14: better-sqlite3@12.8.0:
resolution: {integrity: sha512-B0xUquLkiGLgHhpPBqvl7GWegWBUNuujQ6kXd/r1U38ElPT6Ok8KZ8e+FpUGEc2ZoRQUzq/aUnaKFc/svWUGSg==} resolution: {integrity: sha512-RxD2Vd96sQDjQr20kdP+F+dK/1OUNiVOl200vKBZY8u0vTwysfolF6Hq+3ZK2+h8My9YvZhHsF+RSGZW2VYrPQ==}
hasBin: true
better-sqlite3@12.6.0:
resolution: {integrity: sha512-FXI191x+D6UPWSze5IzZjhz+i9MK9nsuHsmTX9bXVl52k06AfZ2xql0lrgIUuzsMsJ7Vgl5kIptvDgBLIV3ZSQ==}
engines: {node: 20.x || 22.x || 23.x || 24.x || 25.x} engines: {node: 20.x || 22.x || 23.x || 24.x || 25.x}
bindings@1.5.0: bindings@1.5.0:
@@ -300,11 +287,6 @@ packages:
bl@4.1.0: bl@4.1.0:
resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==} resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==}
browserslist@4.28.1:
resolution: {integrity: sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==}
engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
hasBin: true
buffer@5.7.1: buffer@5.7.1:
resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==}
@@ -312,31 +294,33 @@ packages:
resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
callsites@3.1.0:
resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==}
engines: {node: '>=6'}
camoufox-js@0.8.5:
resolution: {integrity: sha512-20ihPbspAcOVSUTX9Drxxp0C116DON1n8OVA1eUDglWZiHwiHwFVFOMrIEBwAHMZpU11mIEH/kawJtstRIrDPA==}
engines: {node: '>= 20'}
hasBin: true
peerDependencies:
playwright-core: '*'
caniuse-lite@1.0.30001764:
resolution: {integrity: sha512-9JGuzl2M+vPL+pz70gtMF9sHdMFbY9FJaQBi186cHKH3pSzDvzoUJUPV6fqiKIMyXbud9ZLg4F3Yza1vJ1+93g==}
chownr@1.1.4: chownr@1.1.4:
resolution: {integrity: sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==} resolution: {integrity: sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==}
chownr@3.0.0:
resolution: {integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==}
engines: {node: '>=18'}
cloakbrowser@0.3.22:
resolution: {integrity: sha512-L2CWQiVdunhKslTli8HCe4INhaAt4npbvsM2Ox4/idqiRmT2BADndQ05eDS8TonNSWeWqbjsh04UhSZOD3B6mg==}
engines: {node: '>=18.0.0'}
hasBin: true
peerDependencies:
mmdb-lib: '>=2.0.0'
playwright-core: '>=1.40.0'
puppeteer-core: '>=21.0.0'
peerDependenciesMeta:
mmdb-lib:
optional: true
playwright-core:
optional: true
puppeteer-core:
optional: true
combined-stream@1.0.8: combined-stream@1.0.8:
resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==}
engines: {node: '>= 0.8'} engines: {node: '>= 0.8'}
commander@14.0.2:
resolution: {integrity: sha512-TywoWNNRbhoD0BXs1P3ZEScW8W5iKrnbithIl0YH+uCmBd0QpPOA8yc82DS3BIE5Ma6FnBVUsJ7wVUDz4dvOWQ==}
engines: {node: '>=20'}
cssstyle@4.6.0: cssstyle@4.6.0:
resolution: {integrity: sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==} resolution: {integrity: sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==}
engines: {node: '>=18'} engines: {node: '>=18'}
@@ -369,24 +353,14 @@ packages:
resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==}
engines: {node: '>=0.4.0'} engines: {node: '>=0.4.0'}
detect-europe-js@0.1.2:
resolution: {integrity: sha512-lgdERlL3u0aUdHocoouzT10d9I89VVhk0qNRmll7mXdGfJT1/wqZ2ZLA4oJAjeACPY5fT1wsbq2AT+GkuInsow==}
detect-libc@2.1.2: detect-libc@2.1.2:
resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==} resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==}
engines: {node: '>=8'} engines: {node: '>=8'}
dot-prop@6.0.1:
resolution: {integrity: sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==}
engines: {node: '>=10'}
dunder-proto@1.0.1: dunder-proto@1.0.1:
resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
electron-to-chromium@1.5.267:
resolution: {integrity: sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==}
end-of-stream@1.4.5: end-of-stream@1.4.5:
resolution: {integrity: sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==} resolution: {integrity: sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==}
@@ -415,10 +389,6 @@ packages:
engines: {node: '>=18'} engines: {node: '>=18'}
hasBin: true hasBin: true
escalade@3.2.0:
resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==}
engines: {node: '>=6'}
expand-template@2.0.3: expand-template@2.0.3:
resolution: {integrity: sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==} resolution: {integrity: sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==}
engines: {node: '>=6'} engines: {node: '>=6'}
@@ -426,10 +396,6 @@ packages:
file-uri-to-path@1.0.0: file-uri-to-path@1.0.0:
resolution: {integrity: sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==} resolution: {integrity: sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==}
fingerprint-generator@2.1.79:
resolution: {integrity: sha512-0dr3kTgvRYHleRPp6OBDcPb8amJmOyFr9aOuwnpN6ooWJ5XyT+/aL/SZ6CU4ZrEtzV26EyJ2Lg7PT32a0NdrRA==}
engines: {node: '>=16.0.0'}
form-data@4.0.5: form-data@4.0.5:
resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==}
engines: {node: '>= 6'} engines: {node: '>= 6'}
@@ -445,9 +411,6 @@ packages:
function-bind@1.1.2: function-bind@1.1.2:
resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==}
generative-bayesian-network@2.1.79:
resolution: {integrity: sha512-aPH+V2wO+HE0BUX1LbsM8Ak99gmV43lgh+D7GDteM0zgnPqiAwcK9JZPxMPZa3aJUleFtFaL1lAei8g9zNrDIA==}
get-intrinsic@1.3.0: get-intrinsic@1.3.0:
resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
@@ -462,10 +425,6 @@ packages:
github-from-package@0.0.0: github-from-package@0.0.0:
resolution: {integrity: sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==} resolution: {integrity: sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==}
glob@13.0.0:
resolution: {integrity: sha512-tvZgpqk6fz4BaNZ66ZsRaZnbHvP/jG3uKJvAZOwEVUL4RTA5nJeeLYfyN9/VA8NX/V3IBG+hkeuGpKjvELkVhA==}
engines: {node: 20 || >=22}
gopd@1.2.0: gopd@1.2.0:
resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
@@ -482,10 +441,6 @@ packages:
resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
header-generator@2.1.79:
resolution: {integrity: sha512-YvHx8teq4QmV5mz7wdPMsj9n1OZBPnZxA4QE+EOrtx7xbmGvd1gBvDNKCb5XqS4GR/TL75MU5hqMqqqANdILRg==}
engines: {node: '>=16.0.0'}
html-encoding-sniffer@4.0.0: html-encoding-sniffer@4.0.0:
resolution: {integrity: sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==} resolution: {integrity: sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==}
engines: {node: '>=18'} engines: {node: '>=18'}
@@ -505,74 +460,15 @@ packages:
ieee754@1.2.1: ieee754@1.2.1:
resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==}
impit-darwin-arm64@0.7.6:
resolution: {integrity: sha512-M7NQXkttyzqilWfzVkNCp7hApT69m0etyJkVpHze4bR5z1kJnHhdsb8BSdDv2dzvZL4u1JyqZNxq+qoMn84eUw==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [darwin]
impit-darwin-x64@0.7.6:
resolution: {integrity: sha512-kikTesWirAwJp9JPxzGLoGVc+heBlEabWS5AhTkQedACU153vmuL90OBQikVr3ul2N0LPImvnuB+51wV0zDE6g==}
engines: {node: '>= 10'}
cpu: [x64]
os: [darwin]
impit-linux-arm64-gnu@0.7.6:
resolution: {integrity: sha512-H6GHjVr/0lG9VEJr6IHF8YLq+YkSIOF4k7Dfue2ygzUAj1+jZ5ZwnouhG/XrZHYW6EWsZmEAjjRfWE56Q0wDRQ==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [linux]
impit-linux-arm64-musl@0.7.6:
resolution: {integrity: sha512-1sCB/UBVXLZTpGJsXRdNNSvhN9xmmQcYLMWAAB4Itb7w684RHX1pLoCb6ichv7bfAf6tgaupcFIFZNBp3ghmQA==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [linux]
impit-linux-x64-gnu@0.7.6:
resolution: {integrity: sha512-yYhlRnZ4fhKt8kuGe0JK2WSHc8TkR6BEH0wn+guevmu8EOn9Xu43OuRvkeOyVAkRqvFnlZtMyySUo/GuSLz9Gw==}
engines: {node: '>= 10'}
cpu: [x64]
os: [linux]
impit-linux-x64-musl@0.7.6:
resolution: {integrity: sha512-sdGWyu+PCLmaOXy7Mzo4WP61ZLl5qpZ1L+VeXW+Ycazgu0e7ox0NZLdiLRunIrEzD+h0S+e4CyzNwaiP3yIolg==}
engines: {node: '>= 10'}
cpu: [x64]
os: [linux]
impit-win32-arm64-msvc@0.7.6:
resolution: {integrity: sha512-sM5deBqo0EuXg5GACBUMKEua9jIau/i34bwNlfrf/Amnw1n0GB4/RkuUh+sKiUcbNAntrRq+YhCq8qDP8IW19w==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [win32]
impit-win32-x64-msvc@0.7.6:
resolution: {integrity: sha512-ry63ADGLCB/PU/vNB1VioRt2V+klDJ34frJUXUZBEv1kA96HEAg9AxUk+604o+UHS3ttGH2rkLmrbwHOdAct5Q==}
engines: {node: '>= 10'}
cpu: [x64]
os: [win32]
impit@0.7.6:
resolution: {integrity: sha512-AkS6Gv63+E6GMvBrcRhMmOREKpq5oJ0J5m3xwfkHiEs97UIsbpEqFmW3sFw/sdyOTDGRF5q4EjaLxtb922Ta8g==}
engines: {node: '>= 20'}
inherits@2.0.4: inherits@2.0.4:
resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
ini@1.3.8: ini@1.3.8:
resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==} resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==}
is-obj@2.0.0:
resolution: {integrity: sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==}
engines: {node: '>=8'}
is-potential-custom-element-name@1.0.1: is-potential-custom-element-name@1.0.1:
resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==} resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==}
is-standalone-pwa@0.1.1:
resolution: {integrity: sha512-9Cbovsa52vNQCjdXOzeQq5CnCbAcRk05aU62K20WO372NrTv0NxibLFCK6lQ4/iZEFdEA3p3t2VNOn8AJ53F5g==}
jsdom@24.1.3: jsdom@24.1.3:
resolution: {integrity: sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==} resolution: {integrity: sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==}
engines: {node: '>=18'} engines: {node: '>=18'}
@@ -582,32 +478,13 @@ packages:
canvas: canvas:
optional: true optional: true
language-subtag-registry@0.3.23:
resolution: {integrity: sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==}
language-tags@2.1.0:
resolution: {integrity: sha512-D4CgpyCt+61f6z2jHjJS1OmZPviAWM57iJ9OKdFFWSNgS7Udj9QVWqyGs/cveVNF57XpZmhSvMdVIV5mjLA7Vg==}
engines: {node: '>=22'}
lodash.isequal@4.5.0:
resolution: {integrity: sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==}
deprecated: This package is deprecated. Use require('node:util').isDeepStrictEqual instead.
lru-cache@10.4.3: lru-cache@10.4.3:
resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==}
lru-cache@11.2.4:
resolution: {integrity: sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==}
engines: {node: 20 || >=22}
math-intrinsics@1.1.0: math-intrinsics@1.1.0:
resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
maxmind@5.0.3:
resolution: {integrity: sha512-oMtZwLrsp0LcZehfYKIirtwKMBycMMqMA1/Dc9/BlUqIEtXO75mIzMJ3PYCV1Ji+BpoUCk+lTzRfh9c+ptGdyQ==}
engines: {node: '>=12', npm: '>=6'}
mime-db@1.52.0: mime-db@1.52.0:
resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==}
engines: {node: '>= 0.6'} engines: {node: '>= 0.6'}
@@ -620,10 +497,6 @@ packages:
resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==} resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==}
engines: {node: '>=10'} engines: {node: '>=10'}
minimatch@10.1.1:
resolution: {integrity: sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==}
engines: {node: 20 || >=22}
minimist@1.2.8: minimist@1.2.8:
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
@@ -631,6 +504,10 @@ packages:
resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==}
engines: {node: '>=16 || 14 >=14.17'} engines: {node: '>=16 || 14 >=14.17'}
minizlib@3.1.0:
resolution: {integrity: sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==}
engines: {node: '>= 18'}
mkdirp-classic@0.5.3: mkdirp-classic@0.5.3:
resolution: {integrity: sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==} resolution: {integrity: sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==}
@@ -648,43 +525,26 @@ packages:
resolution: {integrity: sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==} resolution: {integrity: sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==}
engines: {node: '>=10'} engines: {node: '>=10'}
node-releases@2.0.27:
resolution: {integrity: sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==}
nwsapi@2.2.23: nwsapi@2.2.23:
resolution: {integrity: sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==} resolution: {integrity: sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==}
once@1.4.0: once@1.4.0:
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
ow@0.28.2:
resolution: {integrity: sha512-dD4UpyBh/9m4X2NVjA+73/ZPBRF+uF4zIMFvvQsabMiEK8x41L3rQ8EENOi35kyyoaJwNxEeJcP6Fj1H4U409Q==}
engines: {node: '>=12'}
parse5@7.3.0: parse5@7.3.0:
resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==} resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==}
path-scurry@2.0.1: playwright-core@1.59.1:
resolution: {integrity: sha512-oWyT4gICAu+kaA7QWk/jvCHWarMKNs6pXOGWKDTr7cw4IGcUbW+PeTfbaQiLGheFRpjo6O9J0PmyMfQPjH71oA==} resolution: {integrity: sha512-HBV/RJg81z5BiiZ9yPzIiClYV/QMsDCKUyogwH9p3MCP6IYjUFu/MActgYAvK0oWyV9NlwM3GLBjADyWgydVyg==}
engines: {node: 20 || >=22}
picocolors@1.1.1:
resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==}
playwright-core@1.57.0:
resolution: {integrity: sha512-agTcKlMw/mjBWOnD6kFZttAAGHgi/Nw0CZ2o6JqWSbMlI219lAFLZZCyqByTsvVAJq5XA5H8cA6PrvBRpBWEuQ==}
engines: {node: '>=18'} engines: {node: '>=18'}
hasBin: true hasBin: true
prebuild-install@7.1.3: prebuild-install@7.1.3:
resolution: {integrity: sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==} resolution: {integrity: sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==}
engines: {node: '>=10'} engines: {node: '>=10'}
deprecated: No longer maintained. Please contact the author of the relevant native addon; alternatives are available.
hasBin: true hasBin: true
progress@2.0.3:
resolution: {integrity: sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==}
engines: {node: '>=0.4.0'}
psl@1.15.0: psl@1.15.0:
resolution: {integrity: sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==} resolution: {integrity: sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==}
@@ -724,10 +584,6 @@ packages:
safer-buffer@2.1.2: safer-buffer@2.1.2:
resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==}
sax@1.4.4:
resolution: {integrity: sha512-1n3r/tGXO6b6VXMdFT54SHzT9ytu9yr7TaELowdYpMqY/Ao7EnlQGmAQ1+RatX7Tkkdm6hONI2owqNx2aZj5Sw==}
engines: {node: '>=11.0.0'}
saxes@6.0.0: saxes@6.0.0:
resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==} resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==}
engines: {node: '>=v12.22.7'} engines: {node: '>=v12.22.7'}
@@ -760,9 +616,9 @@ packages:
resolution: {integrity: sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==} resolution: {integrity: sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==}
engines: {node: '>=6'} engines: {node: '>=6'}
tiny-lru@11.4.5: tar@7.5.13:
resolution: {integrity: sha512-hkcz3FjNJfKXjV4mjQ1OrXSLAehg8Hw+cEZclOVT+5c/cWQWImQ9wolzTjth+dmmDe++p3bme3fTxz6Q4Etsqw==} resolution: {integrity: sha512-tOG/7GyXpFevhXVh8jOPJrmtRpOTsYqUIkVdVooZYJS/z8WhfQUX8RJILmeuJNinGAMSu1veBr4asSHFt5/hng==}
engines: {node: '>=12'} engines: {node: '>=18'}
tough-cookie@4.1.4: tough-cookie@4.1.4:
resolution: {integrity: sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==} resolution: {integrity: sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==}
@@ -772,9 +628,6 @@ packages:
resolution: {integrity: sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==} resolution: {integrity: sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==}
engines: {node: '>=18'} engines: {node: '>=18'}
tslib@2.8.1:
resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==}
tsx@4.21.0: tsx@4.21.0:
resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==} resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==}
engines: {node: '>=18.0.0'} engines: {node: '>=18.0.0'}
@@ -794,13 +647,6 @@ packages:
engines: {node: '>=14.17'} engines: {node: '>=14.17'}
hasBin: true hasBin: true
ua-is-frozen@0.1.2:
resolution: {integrity: sha512-RwKDW2p3iyWn4UbaxpP2+VxwqXh0jpvdxsYpZ5j/MLLiQOfbsV5shpgQiw93+KMYQPcteeMQ289MaAFzs3G9pw==}
ua-parser-js@2.0.7:
resolution: {integrity: sha512-CFdHVHr+6YfbktNZegH3qbYvYgC7nRNEUm2tk7nSFXSODUu4tDBpaFpP1jdXBUOKKwapVlWRfTtS8bCPzsQ47w==}
hasBin: true
undici-types@7.16.0: undici-types@7.16.0:
resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==} resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==}
@@ -808,22 +654,12 @@ packages:
resolution: {integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==} resolution: {integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==}
engines: {node: '>= 4.0.0'} engines: {node: '>= 4.0.0'}
update-browserslist-db@1.2.3:
resolution: {integrity: sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==}
hasBin: true
peerDependencies:
browserslist: '>= 4.21.0'
url-parse@1.5.10: url-parse@1.5.10:
resolution: {integrity: sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==} resolution: {integrity: sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==}
util-deprecate@1.0.2: util-deprecate@1.0.2:
resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
vali-date@1.0.0:
resolution: {integrity: sha512-sgECfZthyaCKW10N0fm27cg8HYTFK5qMWgypqkXMQ4Wbl/zZKx7xZICgcoxIIE+WFAP/MBL2EFwC/YvLxw3Zeg==}
engines: {node: '>=0.10.0'}
w3c-xmlserializer@5.0.0: w3c-xmlserializer@5.0.0:
resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==} resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==}
engines: {node: '>=18'} engines: {node: '>=18'}
@@ -864,17 +700,13 @@ packages:
resolution: {integrity: sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==} resolution: {integrity: sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==}
engines: {node: '>=18'} engines: {node: '>=18'}
xml2js@0.6.2:
resolution: {integrity: sha512-T4rieHaC1EXcES0Kxxj4JWgaUQHDk+qwHcYOCFHfiwKz7tOVPLq7Hjq9dM1WCMhylqMEfP7hMcOIChvotiZegA==}
engines: {node: '>=4.0.0'}
xmlbuilder@11.0.1:
resolution: {integrity: sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==}
engines: {node: '>=4.0'}
xmlchars@2.2.0: xmlchars@2.2.0:
resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==}
yallist@5.0.0:
resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==}
engines: {node: '>=18'}
snapshots: snapshots:
'@asamuzakjp/css-color@3.2.0': '@asamuzakjp/css-color@3.2.0':
@@ -983,18 +815,14 @@ snapshots:
'@esbuild/win32-x64@0.27.0': '@esbuild/win32-x64@0.27.0':
optional: true optional: true
'@isaacs/balanced-match@4.0.1': {} '@isaacs/fs-minipass@4.0.1':
'@isaacs/brace-expansion@5.0.0':
dependencies: dependencies:
'@isaacs/balanced-match': 4.0.1 minipass: 7.1.2
'@mixmark-io/domino@2.2.0': {} '@mixmark-io/domino@2.2.0': {}
'@mozilla/readability@0.5.0': {} '@mozilla/readability@0.5.0': {}
'@sindresorhus/is@4.6.0': {}
'@types/jsdom@21.1.7': '@types/jsdom@21.1.7':
dependencies: dependencies:
'@types/node': 25.0.6 '@types/node': 25.0.6
@@ -1011,17 +839,13 @@ snapshots:
'@types/turndown@5.0.6': {} '@types/turndown@5.0.6': {}
adm-zip@0.5.16: {}
agent-base@7.1.4: {} agent-base@7.1.4: {}
asynckit@0.4.0: {} asynckit@0.4.0: {}
base64-js@1.5.1: {} base64-js@1.5.1: {}
baseline-browser-mapping@2.9.14: {} better-sqlite3@12.8.0:
better-sqlite3@12.6.0:
dependencies: dependencies:
bindings: 1.5.0 bindings: 1.5.0
prebuild-install: 7.1.3 prebuild-install: 7.1.3
@@ -1036,14 +860,6 @@ snapshots:
inherits: 2.0.4 inherits: 2.0.4
readable-stream: 3.6.2 readable-stream: 3.6.2
browserslist@4.28.1:
dependencies:
baseline-browser-mapping: 2.9.14
caniuse-lite: 1.0.30001764
electron-to-chromium: 1.5.267
node-releases: 2.0.27
update-browserslist-db: 1.2.3(browserslist@4.28.1)
buffer@5.7.1: buffer@5.7.1:
dependencies: dependencies:
base64-js: 1.5.1 base64-js: 1.5.1
@@ -1054,33 +870,21 @@ snapshots:
es-errors: 1.3.0 es-errors: 1.3.0
function-bind: 1.1.2 function-bind: 1.1.2
callsites@3.1.0: {}
camoufox-js@0.8.5(playwright-core@1.57.0):
dependencies:
adm-zip: 0.5.16
better-sqlite3: 12.6.0
commander: 14.0.2
fingerprint-generator: 2.1.79
glob: 13.0.0
impit: 0.7.6
language-tags: 2.1.0
maxmind: 5.0.3
playwright-core: 1.57.0
progress: 2.0.3
ua-parser-js: 2.0.7
xml2js: 0.6.2
caniuse-lite@1.0.30001764: {}
chownr@1.1.4: {} chownr@1.1.4: {}
chownr@3.0.0: {}
cloakbrowser@0.3.22(mmdb-lib@3.0.1)(playwright-core@1.59.1):
dependencies:
tar: 7.5.13
optionalDependencies:
mmdb-lib: 3.0.1
playwright-core: 1.59.1
combined-stream@1.0.8: combined-stream@1.0.8:
dependencies: dependencies:
delayed-stream: 1.0.0 delayed-stream: 1.0.0
commander@14.0.2: {}
cssstyle@4.6.0: cssstyle@4.6.0:
dependencies: dependencies:
'@asamuzakjp/css-color': 3.2.0 '@asamuzakjp/css-color': 3.2.0
@@ -1105,22 +909,14 @@ snapshots:
delayed-stream@1.0.0: {} delayed-stream@1.0.0: {}
detect-europe-js@0.1.2: {}
detect-libc@2.1.2: {} detect-libc@2.1.2: {}
dot-prop@6.0.1:
dependencies:
is-obj: 2.0.0
dunder-proto@1.0.1: dunder-proto@1.0.1:
dependencies: dependencies:
call-bind-apply-helpers: 1.0.2 call-bind-apply-helpers: 1.0.2
es-errors: 1.3.0 es-errors: 1.3.0
gopd: 1.2.0 gopd: 1.2.0
electron-to-chromium@1.5.267: {}
end-of-stream@1.4.5: end-of-stream@1.4.5:
dependencies: dependencies:
once: 1.4.0 once: 1.4.0
@@ -1171,18 +967,10 @@ snapshots:
'@esbuild/win32-ia32': 0.27.0 '@esbuild/win32-ia32': 0.27.0
'@esbuild/win32-x64': 0.27.0 '@esbuild/win32-x64': 0.27.0
escalade@3.2.0: {}
expand-template@2.0.3: {} expand-template@2.0.3: {}
file-uri-to-path@1.0.0: {} file-uri-to-path@1.0.0: {}
fingerprint-generator@2.1.79:
dependencies:
generative-bayesian-network: 2.1.79
header-generator: 2.1.79
tslib: 2.8.1
form-data@4.0.5: form-data@4.0.5:
dependencies: dependencies:
asynckit: 0.4.0 asynckit: 0.4.0
@@ -1198,11 +986,6 @@ snapshots:
function-bind@1.1.2: {} function-bind@1.1.2: {}
generative-bayesian-network@2.1.79:
dependencies:
adm-zip: 0.5.16
tslib: 2.8.1
get-intrinsic@1.3.0: get-intrinsic@1.3.0:
dependencies: dependencies:
call-bind-apply-helpers: 1.0.2 call-bind-apply-helpers: 1.0.2
@@ -1227,12 +1010,6 @@ snapshots:
github-from-package@0.0.0: {} github-from-package@0.0.0: {}
glob@13.0.0:
dependencies:
minimatch: 10.1.1
minipass: 7.1.2
path-scurry: 2.0.1
gopd@1.2.0: {} gopd@1.2.0: {}
has-symbols@1.1.0: {} has-symbols@1.1.0: {}
@@ -1245,13 +1022,6 @@ snapshots:
dependencies: dependencies:
function-bind: 1.1.2 function-bind: 1.1.2
header-generator@2.1.79:
dependencies:
browserslist: 4.28.1
generative-bayesian-network: 2.1.79
ow: 0.28.2
tslib: 2.8.1
html-encoding-sniffer@4.0.0: html-encoding-sniffer@4.0.0:
dependencies: dependencies:
whatwg-encoding: 3.1.1 whatwg-encoding: 3.1.1
@@ -1276,51 +1046,12 @@ snapshots:
ieee754@1.2.1: {} ieee754@1.2.1: {}
impit-darwin-arm64@0.7.6:
optional: true
impit-darwin-x64@0.7.6:
optional: true
impit-linux-arm64-gnu@0.7.6:
optional: true
impit-linux-arm64-musl@0.7.6:
optional: true
impit-linux-x64-gnu@0.7.6:
optional: true
impit-linux-x64-musl@0.7.6:
optional: true
impit-win32-arm64-msvc@0.7.6:
optional: true
impit-win32-x64-msvc@0.7.6:
optional: true
impit@0.7.6:
optionalDependencies:
impit-darwin-arm64: 0.7.6
impit-darwin-x64: 0.7.6
impit-linux-arm64-gnu: 0.7.6
impit-linux-arm64-musl: 0.7.6
impit-linux-x64-gnu: 0.7.6
impit-linux-x64-musl: 0.7.6
impit-win32-arm64-msvc: 0.7.6
impit-win32-x64-msvc: 0.7.6
inherits@2.0.4: {} inherits@2.0.4: {}
ini@1.3.8: {} ini@1.3.8: {}
is-obj@2.0.0: {}
is-potential-custom-element-name@1.0.1: {} is-potential-custom-element-name@1.0.1: {}
is-standalone-pwa@0.1.1: {}
jsdom@24.1.3: jsdom@24.1.3:
dependencies: dependencies:
cssstyle: 4.6.0 cssstyle: 4.6.0
@@ -1349,25 +1080,10 @@ snapshots:
- supports-color - supports-color
- utf-8-validate - utf-8-validate
language-subtag-registry@0.3.23: {}
language-tags@2.1.0:
dependencies:
language-subtag-registry: 0.3.23
lodash.isequal@4.5.0: {}
lru-cache@10.4.3: {} lru-cache@10.4.3: {}
lru-cache@11.2.4: {}
math-intrinsics@1.1.0: {} math-intrinsics@1.1.0: {}
maxmind@5.0.3:
dependencies:
mmdb-lib: 3.0.1
tiny-lru: 11.4.5
mime-db@1.52.0: {} mime-db@1.52.0: {}
mime-types@2.1.35: mime-types@2.1.35:
@@ -1376,17 +1092,18 @@ snapshots:
mimic-response@3.1.0: {} mimic-response@3.1.0: {}
minimatch@10.1.1:
dependencies:
'@isaacs/brace-expansion': 5.0.0
minimist@1.2.8: {} minimist@1.2.8: {}
minipass@7.1.2: {} minipass@7.1.2: {}
minizlib@3.1.0:
dependencies:
minipass: 7.1.2
mkdirp-classic@0.5.3: {} mkdirp-classic@0.5.3: {}
mmdb-lib@3.0.1: {} mmdb-lib@3.0.1:
optional: true
ms@2.1.3: {} ms@2.1.3: {}
@@ -1396,34 +1113,17 @@ snapshots:
dependencies: dependencies:
semver: 7.7.3 semver: 7.7.3
node-releases@2.0.27: {}
nwsapi@2.2.23: {} nwsapi@2.2.23: {}
once@1.4.0: once@1.4.0:
dependencies: dependencies:
wrappy: 1.0.2 wrappy: 1.0.2
ow@0.28.2:
dependencies:
'@sindresorhus/is': 4.6.0
callsites: 3.1.0
dot-prop: 6.0.1
lodash.isequal: 4.5.0
vali-date: 1.0.0
parse5@7.3.0: parse5@7.3.0:
dependencies: dependencies:
entities: 6.0.1 entities: 6.0.1
path-scurry@2.0.1: playwright-core@1.59.1: {}
dependencies:
lru-cache: 11.2.4
minipass: 7.1.2
picocolors@1.1.1: {}
playwright-core@1.57.0: {}
prebuild-install@7.1.3: prebuild-install@7.1.3:
dependencies: dependencies:
@@ -1440,8 +1140,6 @@ snapshots:
tar-fs: 2.1.4 tar-fs: 2.1.4
tunnel-agent: 0.6.0 tunnel-agent: 0.6.0
progress@2.0.3: {}
psl@1.15.0: psl@1.15.0:
dependencies: dependencies:
punycode: 2.3.1 punycode: 2.3.1
@@ -1480,8 +1178,6 @@ snapshots:
safer-buffer@2.1.2: {} safer-buffer@2.1.2: {}
sax@1.4.4: {}
saxes@6.0.0: saxes@6.0.0:
dependencies: dependencies:
xmlchars: 2.2.0 xmlchars: 2.2.0
@@ -1519,7 +1215,13 @@ snapshots:
inherits: 2.0.4 inherits: 2.0.4
readable-stream: 3.6.2 readable-stream: 3.6.2
tiny-lru@11.4.5: {} tar@7.5.13:
dependencies:
'@isaacs/fs-minipass': 4.0.1
chownr: 3.0.0
minipass: 7.1.2
minizlib: 3.1.0
yallist: 5.0.0
tough-cookie@4.1.4: tough-cookie@4.1.4:
dependencies: dependencies:
@@ -1532,8 +1234,6 @@ snapshots:
dependencies: dependencies:
punycode: 2.3.1 punycode: 2.3.1
tslib@2.8.1: {}
tsx@4.21.0: tsx@4.21.0:
dependencies: dependencies:
esbuild: 0.27.0 esbuild: 0.27.0
@@ -1553,24 +1253,10 @@ snapshots:
typescript@5.9.3: {} typescript@5.9.3: {}
ua-is-frozen@0.1.2: {}
ua-parser-js@2.0.7:
dependencies:
detect-europe-js: 0.1.2
is-standalone-pwa: 0.1.1
ua-is-frozen: 0.1.2
undici-types@7.16.0: {} undici-types@7.16.0: {}
universalify@0.2.0: {} universalify@0.2.0: {}
update-browserslist-db@1.2.3(browserslist@4.28.1):
dependencies:
browserslist: 4.28.1
escalade: 3.2.0
picocolors: 1.1.1
url-parse@1.5.10: url-parse@1.5.10:
dependencies: dependencies:
querystringify: 2.2.0 querystringify: 2.2.0
@@ -1578,8 +1264,6 @@ snapshots:
util-deprecate@1.0.2: {} util-deprecate@1.0.2: {}
vali-date@1.0.0: {}
w3c-xmlserializer@5.0.0: w3c-xmlserializer@5.0.0:
dependencies: dependencies:
xml-name-validator: 5.0.0 xml-name-validator: 5.0.0
@@ -1603,11 +1287,6 @@ snapshots:
xml-name-validator@5.0.0: {} xml-name-validator@5.0.0: {}
xml2js@0.6.2:
dependencies:
sax: 1.4.4
xmlbuilder: 11.0.1
xmlbuilder@11.0.1: {}
xmlchars@2.2.0: {} xmlchars@2.2.0: {}
yallist@5.0.0: {}

View File

@@ -1,12 +1,9 @@
import { writeFileSync } from 'fs'; #!/usr/bin/env npx tsx
import { mkdirSync, writeFileSync } from 'fs';
import { dirname, resolve } from 'path';
import { getPage } from './browse.js'; import { getPage } from './browse.js';
const baseUrl = 'http://localhost:3000';
const username = 'analyst@fhb.local';
const password = process.env.CAMOUFOX_PASSWORD ?? '';
const reportPath = '/Users/stefano.fiorini/Documents/projects/fhb-loan-spreading-pilot-a/docs/plans/2026-01-24-financials-analysis-redesign/web-automation-scan.md';
type NavResult = { type NavResult = {
requestedUrl: string; requestedUrl: string;
url: string; url: string;
@@ -15,198 +12,163 @@ type NavResult = {
error?: string; error?: string;
}; };
type RouteCheck = {
route: string;
result: NavResult;
heading: string | null;
};
const DEFAULT_BASE_URL = 'http://localhost:3000';
const DEFAULT_REPORT_PATH = resolve(process.cwd(), 'scan-local-app.md');
function env(name: string): string | undefined {
const value = process.env[name]?.trim();
return value ? value : undefined;
}
function getRoutes(baseUrl: string): string[] {
const routeList = env('SCAN_ROUTES');
if (routeList) {
return routeList
.split(',')
.map((route) => route.trim())
.filter(Boolean)
.map((route) => new URL(route, baseUrl).toString());
}
return [baseUrl];
}
async function gotoWithStatus(page: any, url: string): Promise<NavResult> { async function gotoWithStatus(page: any, url: string): Promise<NavResult> {
const resp = await page.goto(url, { waitUntil: 'domcontentloaded', timeout: 60000 }).catch((e: unknown) => ({ error: e })); const response = await page
if (resp?.error) { .goto(url, { waitUntil: 'domcontentloaded', timeout: 60000 })
.catch((error: unknown) => ({ error }));
if (response?.error) {
return { return {
requestedUrl: url, requestedUrl: url,
url: page.url(), url: page.url(),
status: null, status: null,
title: await page.title().catch(() => ''), title: await page.title().catch(() => ''),
error: String(resp.error), error: String(response.error),
}; };
} }
return { return {
requestedUrl: url, requestedUrl: url,
url: page.url(), url: page.url(),
status: resp ? resp.status() : null, status: response ? response.status() : null,
title: await page.title().catch(() => ''), title: await page.title().catch(() => ''),
}; };
} }
async function textOrNull(page: any, selector: string): Promise<string | null> { async function textOrNull(page: any, selector: string): Promise<string | null> {
const loc = page.locator(selector).first(); const locator = page.locator(selector).first();
try { try {
if ((await loc.count()) === 0) return null; if ((await locator.count()) === 0) return null;
const txt = await loc.textContent(); const value = await locator.textContent();
return txt ? txt.trim().replace(/\s+/g, ' ') : null; return value ? value.trim().replace(/\s+/g, ' ') : null;
} catch { } catch {
return null; return null;
} }
} }
async function loginIfConfigured(page: any, baseUrl: string, lines: string[]) {
const loginPath = env('SCAN_LOGIN_PATH');
const username = env('SCAN_USERNAME') ?? env('CLOAKBROWSER_USERNAME');
const password = env('SCAN_PASSWORD') ?? env('CLOAKBROWSER_PASSWORD');
const usernameSelector = env('SCAN_USERNAME_SELECTOR') ?? 'input[type="email"], input[name="email"]';
const passwordSelector = env('SCAN_PASSWORD_SELECTOR') ?? 'input[type="password"], input[name="password"]';
const submitSelector = env('SCAN_SUBMIT_SELECTOR') ?? 'button[type="submit"], input[type="submit"]';
if (!loginPath) {
lines.push('## Login');
lines.push('- Skipped: set `SCAN_LOGIN_PATH` to enable login smoke checks.');
lines.push('');
return;
}
const loginUrl = new URL(loginPath, baseUrl).toString();
lines.push('## Login');
lines.push(`- Login URL: ${loginUrl}`);
await gotoWithStatus(page, loginUrl);
if (!username || !password) {
lines.push('- Skipped: set `SCAN_USERNAME`/`SCAN_PASSWORD` or `CLOAKBROWSER_USERNAME`/`CLOAKBROWSER_PASSWORD`.');
lines.push('');
return;
}
await page.locator(usernameSelector).first().fill(username);
await page.locator(passwordSelector).first().fill(password);
await page.locator(submitSelector).first().click();
await page.waitForTimeout(2500);
lines.push(`- After submit URL: ${page.url()}`);
lines.push(`- Cookie count: ${(await page.context().cookies()).length}`);
lines.push('');
}
async function checkRoutes(page: any, baseUrl: string, lines: string[]) {
const routes = getRoutes(baseUrl);
const routeChecks: RouteCheck[] = [];
for (const url of routes) {
const result = await gotoWithStatus(page, url);
const heading = await textOrNull(page, 'h1');
routeChecks.push({
route: url,
result,
heading,
});
}
lines.push('## Route Checks');
for (const check of routeChecks) {
const relativeUrl = check.route.startsWith(baseUrl) ? check.route.slice(baseUrl.length) || '/' : check.route;
const finalPath = check.result.url.startsWith(baseUrl)
? check.result.url.slice(baseUrl.length) || '/'
: check.result.url;
const suffix = check.heading ? `, h1="${check.heading}"` : '';
const errorSuffix = check.result.error ? `, error="${check.result.error}"` : '';
lines.push(
`- ${relativeUrl} → status ${check.result.status ?? 'ERR'} (final ${finalPath})${suffix}${errorSuffix}`
);
}
lines.push('');
}
async function main() { async function main() {
const { page, browser } = await getPage({ headless: true }); const baseUrl = env('SCAN_BASE_URL') ?? DEFAULT_BASE_URL;
const reportPath = resolve(env('SCAN_REPORT_PATH') ?? DEFAULT_REPORT_PATH);
const headless = (env('SCAN_HEADLESS') ?? env('CLOAKBROWSER_HEADLESS') ?? 'true') === 'true';
const { page, browser } = await getPage({ headless });
const lines: string[] = []; const lines: string[] = [];
lines.push('# Web Automation Scan (local)'); lines.push('# Web Automation Scan (local)');
lines.push(''); lines.push('');
lines.push(`- Base URL: ${baseUrl}`); lines.push(`- Base URL: ${baseUrl}`);
lines.push(`- Timestamp: ${new Date().toISOString()}`); lines.push(`- Timestamp: ${new Date().toISOString()}`);
lines.push(`- Headless: ${headless}`);
lines.push(`- Report Path: ${reportPath}`);
lines.push(''); lines.push('');
try { try {
lines.push('## Login'); await loginIfConfigured(page, baseUrl, lines);
await gotoWithStatus(page, `${baseUrl}/login`); await checkRoutes(page, baseUrl, lines);
await page.locator('input[name="email"]').fill(username);
await page.locator('input[name="password"]').fill(password);
await page.locator('button[type="submit"]').click();
await page.waitForTimeout(2500);
const cookies = await page.context().cookies();
const sessionCookie = cookies.find((c: any) => c.name === 'fhb_session');
lines.push(`- After submit URL: ${page.url()}`);
lines.push(`- Has session cookie (fhb_session): ${Boolean(sessionCookie)}`);
lines.push('');
lines.push('## Demo Case');
const casesNav = await gotoWithStatus(page, `${baseUrl}/cases`);
lines.push(`- GET /cases → status ${casesNav.status ?? 'ERR'}, final ${casesNav.url}`);
const envCaseId = process.env.SCAN_CASE_ID?.trim() || null;
let selectedCaseId: string | null = envCaseId;
if (!selectedCaseId) {
const caseLinks = await page.$$eval('a[href^="/cases/"]', (as) =>
as
.map((a) => ({
href: (a as HTMLAnchorElement).getAttribute('href') || '',
text: (a.textContent || '').trim(),
}))
.filter((x) => x.href.includes('/cases/'))
);
const preferredTitles = ['Demo - Strong Borrower', 'Demo - Weak Borrower', 'Demo - Incomplete'];
for (const title of preferredTitles) {
const match = caseLinks.find((l) => l.text.includes(title) && l.href.includes('/cases/'));
const href = match?.href ?? '';
const m = href.match(/\/cases\/([0-9a-f-]{36})/i);
if (m) {
selectedCaseId = m[1];
break;
}
}
if (!selectedCaseId) {
const firstHref =
caseLinks.map((l) => l.href).find((h) => /\/cases\/[0-9a-f-]{36}/i.test(h)) ?? null;
const m = firstHref?.match(/\/cases\/([0-9a-f-]{36})/i) ?? null;
selectedCaseId = m?.[1] ?? null;
}
}
lines.push(`- Selected caseId: ${selectedCaseId ?? '(none found)'}`);
if (!selectedCaseId) {
lines.push('');
lines.push('⚠️ Could not find a demo case link on /cases.');
writeFileSync(reportPath, lines.join('\n') + '\n', 'utf-8');
return;
}
const caseBase = `${baseUrl}/cases/${selectedCaseId}/journey`;
lines.push('');
lines.push('## Route Checks');
const routesToCheck = [
`${caseBase}`,
`${caseBase}/financials`,
`${caseBase}/financials/income`,
`${caseBase}/analysis`,
`${caseBase}/analysis/configure`,
`${caseBase}/analysis/ai`,
`${caseBase}/analysis/ai/detail`,
`${caseBase}/spreads`,
];
for (const url of routesToCheck) {
const r = await gotoWithStatus(page, url);
const h1 = await textOrNull(page, 'h1');
const finalPath = r.url.startsWith(baseUrl) ? r.url.slice(baseUrl.length) : r.url;
lines.push(`- ${url.slice(baseUrl.length)} → status ${r.status ?? 'ERR'} (final ${finalPath})${h1 ? `, h1="${h1}"` : ''}`);
}
lines.push('');
lines.push('## Spreadsheet Analysis (UI)');
await gotoWithStatus(page, `${caseBase}/analysis/configure`);
const runButton = page.locator('button:has-text("Run Analysis")').first();
const disabled = await runButton.isDisabled().catch(() => true);
lines.push(`- Run button disabled: ${disabled}`);
if (!disabled) {
await runButton.click();
const resultsWait = page
.waitForURL('**/journey/analysis/results**', { timeout: 180000 })
.then(() => 'results' as const);
const errorWait = page
.locator('[role="alert"]')
.filter({ hasText: 'Error' })
.first()
.waitFor({ timeout: 180000 })
.then(() => 'error' as const);
const outcome = await Promise.race([resultsWait, errorWait]).catch(() => 'timeout' as const);
if (outcome === 'results') {
await page.waitForTimeout(1500);
lines.push(`- Results URL: ${page.url().replace(baseUrl, '')}`);
const downloadHref = await page
.locator('a[href*="/journey/analysis/download"]')
.first()
.getAttribute('href')
.catch(() => null);
if (downloadHref) {
const dlUrl = downloadHref.startsWith('http') ? downloadHref : `${baseUrl}${downloadHref}`;
const dlResp = await page.goto(dlUrl, { waitUntil: 'commit', timeout: 60000 }).catch(() => null);
lines.push(
`- Download route status: ${dlResp?.status() ?? 'ERR'} (Content-Type: ${dlResp?.headers()?.['content-type'] ?? 'n/a'})`
);
} else {
lines.push('- Download link not found on results page');
}
} else if (outcome === 'error') {
const errorText = await page
.locator('[role="alert"]')
.first()
.textContent()
.then((t: string | null) => (t ? t.trim().replace(/\\s+/g, ' ') : null))
.catch(() => null);
lines.push(`- Stayed on configure page; saw error callout: ${errorText ?? '(unable to read)'}`);
lines.push('- Skipping download check because analysis did not complete.');
} else {
lines.push('- Timed out waiting for results or error after clicking Run Analysis.');
}
} else {
lines.push('- Skipped running analysis because Run button was disabled.');
}
lines.push('');
lines.push('## Notes'); lines.push('## Notes');
lines.push('- This scan avoids scraping financial values; it records route availability and basic headings.'); lines.push('- This generic smoke helper records route availability and top-level headings for a local app.');
lines.push('- Configure login and route coverage with `SCAN_*` environment variables.');
writeFileSync(reportPath, lines.join('\n') + '\n', 'utf-8');
} finally { } finally {
await browser.close(); await browser.close();
} }
mkdirSync(dirname(reportPath), { recursive: true });
writeFileSync(reportPath, `${lines.join('\n')}\n`, 'utf-8');
console.log(`Report written to ${reportPath}`);
} }
main().catch((err) => { main().catch((error) => {
console.error(err); console.error(error);
process.exitCode = 1; process.exitCode = 1;
}); });

View File

@@ -1,28 +1,25 @@
import { Camoufox } from 'camoufox-js'; import { launchPersistentContext } from 'cloakbrowser';
import { homedir } from 'os'; import { homedir } from 'os';
import { join } from 'path'; import { join } from 'path';
import { mkdirSync, existsSync } from 'fs'; import { mkdirSync, existsSync } from 'fs';
async function test() { async function test() {
const profilePath = join(homedir(), '.camoufox-profile'); const profilePath = join(homedir(), '.cloakbrowser-profile');
if (!existsSync(profilePath)) { if (!existsSync(profilePath)) {
mkdirSync(profilePath, { recursive: true }); mkdirSync(profilePath, { recursive: true });
} }
console.log('Profile path:', profilePath); console.log('Profile path:', profilePath);
console.log('Launching with full options...'); console.log('Launching CloakBrowser with full options...');
const browser = await Camoufox({ const browser = await launchPersistentContext({
headless: true, headless: true,
user_data_dir: profilePath, userDataDir: profilePath,
// humanize: 1.5, // Test without this first humanize: true,
// geoip: true, // Test without this first
// enable_cache: true,
// block_webrtc: false,
}); });
console.log('Browser launched'); console.log('Browser launched');
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
console.log('Page created'); console.log('Page created');
await page.goto('https://github.com', { timeout: 30000 }); await page.goto('https://github.com', { timeout: 30000 });

View File

@@ -1,10 +1,11 @@
import { Camoufox } from 'camoufox-js'; import { launch } from 'cloakbrowser';
async function test() { async function test() {
console.log('Launching Camoufox with minimal config...'); console.log('Launching CloakBrowser with minimal config...');
const browser = await Camoufox({ const browser = await launch({
headless: true, headless: true,
humanize: true,
}); });
console.log('Browser launched'); console.log('Browser launched');

View File

@@ -1,24 +1,25 @@
import { Camoufox } from 'camoufox-js'; import { launchPersistentContext } from 'cloakbrowser';
import { homedir } from 'os'; import { homedir } from 'os';
import { join } from 'path'; import { join } from 'path';
import { mkdirSync, existsSync } from 'fs'; import { mkdirSync, existsSync } from 'fs';
async function test() { async function test() {
const profilePath = join(homedir(), '.camoufox-profile'); const profilePath = join(homedir(), '.cloakbrowser-profile');
if (!existsSync(profilePath)) { if (!existsSync(profilePath)) {
mkdirSync(profilePath, { recursive: true }); mkdirSync(profilePath, { recursive: true });
} }
console.log('Profile path:', profilePath); console.log('Profile path:', profilePath);
console.log('Launching with user_data_dir...'); console.log('Launching with persistent userDataDir...');
const browser = await Camoufox({ const browser = await launchPersistentContext({
headless: true, headless: true,
user_data_dir: profilePath, userDataDir: profilePath,
humanize: true,
}); });
console.log('Browser launched'); console.log('Browser launched');
const page = await browser.newPage(); const page = browser.pages()[0] || await browser.newPage();
console.log('Page created'); console.log('Page created');
await page.goto('https://example.com', { timeout: 30000 }); await page.goto('https://example.com', { timeout: 30000 });

View File

@@ -1,78 +0,0 @@
import { getPage } from './browse.js';
type Extracted = {
title: string;
url: string;
colorVars: Array<[string, string]>;
samples: Record<string, null | { background: string; color: string; border: string }>;
};
function isColorValue(value: string) {
return /#([0-9a-f]{3,4}|[0-9a-f]{6}|[0-9a-f]{8})\b/i.test(value) || /\brgb\(|\bhsl\(/i.test(value);
}
async function main() {
const url = process.argv[2] ?? 'https://www.firsthorizon.com';
const { page, browser } = await getPage({ headless: true });
try {
await page.goto(url, { waitUntil: 'domcontentloaded', timeout: 60000 });
await page.waitForTimeout(5000);
const data = await page.evaluate(`(() => {
const rootStyles = getComputedStyle(document.documentElement);
const vars = {};
for (let i = 0; i < rootStyles.length; i++) {
const prop = rootStyles[i];
if (prop && prop.startsWith('--')) {
vars[prop] = rootStyles.getPropertyValue(prop).trim();
}
}
const pick = (selector) => {
const el = document.querySelector(selector);
if (!el) return null;
const cs = getComputedStyle(el);
return {
background: cs.backgroundColor,
color: cs.color,
border: cs.borderColor,
};
};
return {
title: document.title,
url: location.href,
vars,
samples: {
body: pick('body'),
header: pick('header'),
nav: pick('nav'),
primaryButton: pick('button, [role="button"], a[role="button"], a.button, .button'),
link: pick('a'),
},
};
})()`);
const entries = Object.entries(data.vars) as Array<[string, string]>;
const colorVars = entries
.filter(([, v]) => v && isColorValue(v))
.sort((a, b) => a[0].localeCompare(b[0]));
const out: Extracted = {
title: data.title,
url: data.url,
colorVars,
samples: data.samples,
};
process.stdout.write(JSON.stringify(out, null, 2));
} finally {
await browser.close();
}
}
main().catch((error) => {
console.error(error);
process.exit(1);
});