feat: add comprehensive CI/CD pipeline

Adds GitHub Actions workflows for CI, maintenance, and releases with multi-platform testing matrix.
This commit is contained in:
Roei Bar Aviv
2026-01-29 08:05:43 +01:00
committed by GitHub
parent 58a97c8a84
commit 7c0bc25982
21 changed files with 3679 additions and 0 deletions

17
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,17 @@
## Description
<!-- Brief description of changes -->
## Type of Change
- [ ] `fix:` Bug fix
- [ ] `feat:` New feature
- [ ] `refactor:` Code refactoring
- [ ] `docs:` Documentation
- [ ] `test:` Tests
- [ ] `chore:` Maintenance/tooling
- [ ] `ci:` CI/CD changes
## Checklist
- [ ] Tests pass locally (`node tests/run-all.js`)
- [ ] Validation scripts pass
- [ ] Follows conventional commits format
- [ ] Updated relevant documentation

218
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,218 @@
name: CI
on:
push:
branches: [main]
pull_request:
branches: [main]
# Prevent duplicate runs
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# Minimal permissions
permissions:
contents: read
jobs:
test:
name: Test (${{ matrix.os }}, Node ${{ matrix.node }}, ${{ matrix.pm }})
runs-on: ${{ matrix.os }}
timeout-minutes: 10
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
node: ['18.x', '20.x', '22.x']
pm: [npm, pnpm, yarn, bun]
exclude:
# Bun has limited Windows support
- os: windows-latest
pm: bun
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js ${{ matrix.node }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node }}
# Package manager setup
- name: Setup pnpm
if: matrix.pm == 'pnpm'
uses: pnpm/action-setup@v4
with:
version: latest
- name: Setup Bun
if: matrix.pm == 'bun'
uses: oven-sh/setup-bun@v2
# Cache configuration
- name: Get npm cache directory
if: matrix.pm == 'npm'
id: npm-cache-dir
shell: bash
run: echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
- name: Cache npm
if: matrix.pm == 'npm'
uses: actions/cache@v4
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ matrix.node }}-npm-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-${{ matrix.node }}-npm-
- name: Get pnpm store directory
if: matrix.pm == 'pnpm'
id: pnpm-cache-dir
shell: bash
run: echo "dir=$(pnpm store path)" >> $GITHUB_OUTPUT
- name: Cache pnpm
if: matrix.pm == 'pnpm'
uses: actions/cache@v4
with:
path: ${{ steps.pnpm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ matrix.node }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-node-${{ matrix.node }}-pnpm-
- name: Get yarn cache directory
if: matrix.pm == 'yarn'
id: yarn-cache-dir
shell: bash
run: |
# Try Yarn Berry first, fall back to Yarn v1
if yarn config get cacheFolder >/dev/null 2>&1; then
echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
else
echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
fi
- name: Cache yarn
if: matrix.pm == 'yarn'
uses: actions/cache@v4
with:
path: ${{ steps.yarn-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ matrix.node }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-node-${{ matrix.node }}-yarn-
- name: Cache bun
if: matrix.pm == 'bun'
uses: actions/cache@v4
with:
path: ~/.bun/install/cache
key: ${{ runner.os }}-bun-${{ hashFiles('**/bun.lockb') }}
restore-keys: |
${{ runner.os }}-bun-
# Install dependencies
- name: Install dependencies
shell: bash
run: |
case "${{ matrix.pm }}" in
npm) npm ci ;;
pnpm) pnpm install ;;
# --ignore-engines required for Node 18 compat with some devDependencies (e.g., markdownlint-cli)
yarn) yarn install --ignore-engines ;;
bun) bun install ;;
*) echo "Unsupported package manager: ${{ matrix.pm }}" && exit 1 ;;
esac
# Run tests
- name: Run tests
run: node tests/run-all.js
env:
CLAUDE_CODE_PACKAGE_MANAGER: ${{ matrix.pm }}
# Upload test artifacts on failure
- name: Upload test artifacts
if: failure()
uses: actions/upload-artifact@v4
with:
name: test-results-${{ matrix.os }}-node${{ matrix.node }}-${{ matrix.pm }}
path: |
tests/
!tests/node_modules/
validate:
name: Validate Components
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20.x'
- name: Validate agents
run: node scripts/ci/validate-agents.js
continue-on-error: false
- name: Validate hooks
run: node scripts/ci/validate-hooks.js
continue-on-error: false
- name: Validate commands
run: node scripts/ci/validate-commands.js
continue-on-error: false
- name: Validate skills
run: node scripts/ci/validate-skills.js
continue-on-error: false
- name: Validate rules
run: node scripts/ci/validate-rules.js
continue-on-error: false
security:
name: Security Scan
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20.x'
- name: Run npm audit
run: npm audit --audit-level=high
continue-on-error: true # Allows PR to proceed, but marks job as failed if vulnerabilities found
lint:
name: Lint
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20.x'
- name: Install dependencies
run: npm ci
- name: Run ESLint
run: npx eslint scripts/**/*.js tests/**/*.js
- name: Run markdownlint
run: npx markdownlint "agents/**/*.md" "skills/**/*.md" "commands/**/*.md" "rules/**/*.md"

51
.github/workflows/maintenance.yml vendored Normal file
View File

@@ -0,0 +1,51 @@
name: Scheduled Maintenance
on:
schedule:
- cron: '0 9 * * 1' # Weekly Monday 9am UTC
workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs:
dependency-check:
name: Check Dependencies
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '20.x'
- name: Check for outdated packages
run: npm outdated || true
security-audit:
name: Security Audit
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '20.x'
- name: Run security audit
run: |
if [ -f package-lock.json ]; then
npm ci
npm audit --audit-level=high
else
echo "No package-lock.json found; skipping npm audit"
fi
stale:
name: Stale Issues/PRs
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v9
with:
stale-issue-message: 'This issue is stale due to inactivity.'
stale-pr-message: 'This PR is stale due to inactivity.'
days-before-stale: 30
days-before-close: 7

47
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: Release
on:
push:
tags: ['v*']
permissions:
contents: write
jobs:
release:
name: Create Release
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Validate version tag
run: |
if ! [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Invalid version tag format. Expected vX.Y.Z"
exit 1
fi
- name: Generate changelog
id: changelog
run: |
PREV_TAG=$(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || echo "")
if [ -z "$PREV_TAG" ]; then
COMMITS=$(git log --pretty=format:"- %s" HEAD)
else
COMMITS=$(git log --pretty=format:"- %s" ${PREV_TAG}..HEAD)
fi
echo "commits<<EOF" >> $GITHUB_OUTPUT
echo "$COMMITS" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
with:
body: |
## Changes
${{ steps.changelog.outputs.commits }}
generate_release_notes: false

59
.github/workflows/reusable-release.yml vendored Normal file
View File

@@ -0,0 +1,59 @@
name: Reusable Release Workflow
on:
workflow_call:
inputs:
tag:
description: 'Version tag (e.g., v1.0.0)'
required: true
type: string
generate-notes:
description: 'Auto-generate release notes'
required: false
type: boolean
default: true
permissions:
contents: write
jobs:
release:
name: Create Release
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Validate version tag
run: |
if ! [[ "${{ inputs.tag }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Invalid version tag format. Expected vX.Y.Z"
exit 1
fi
- name: Generate changelog
id: changelog
run: |
PREV_TAG=$(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || echo "")
if [ -z "$PREV_TAG" ]; then
COMMITS=$(git log --pretty=format:"- %s" HEAD)
else
COMMITS=$(git log --pretty=format:"- %s" ${PREV_TAG}..HEAD)
fi
# Use unique delimiter to prevent truncation if commit messages contain EOF
DELIMITER="COMMITS_END_$(date +%s)"
echo "commits<<${DELIMITER}" >> $GITHUB_OUTPUT
echo "$COMMITS" >> $GITHUB_OUTPUT
echo "${DELIMITER}" >> $GITHUB_OUTPUT
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
with:
tag_name: ${{ inputs.tag }}
body: |
## Changes
${{ steps.changelog.outputs.commits }}
generate_release_notes: ${{ inputs.generate-notes }}

130
.github/workflows/reusable-test.yml vendored Normal file
View File

@@ -0,0 +1,130 @@
name: Reusable Test Workflow
on:
workflow_call:
inputs:
os:
description: 'Operating system'
required: false
type: string
default: 'ubuntu-latest'
node-version:
description: 'Node.js version'
required: false
type: string
default: '20.x'
package-manager:
description: 'Package manager to use'
required: false
type: string
default: 'npm'
jobs:
test:
name: Test
runs-on: ${{ inputs.os }}
timeout-minutes: 10
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ inputs.node-version }}
- name: Setup pnpm
if: inputs.package-manager == 'pnpm'
uses: pnpm/action-setup@v4
with:
version: latest
- name: Setup Bun
if: inputs.package-manager == 'bun'
uses: oven-sh/setup-bun@v2
- name: Get npm cache directory
if: inputs.package-manager == 'npm'
id: npm-cache-dir
shell: bash
run: echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
- name: Cache npm
if: inputs.package-manager == 'npm'
uses: actions/cache@v4
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ inputs.node-version }}-npm-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-${{ inputs.node-version }}-npm-
- name: Get pnpm store directory
if: inputs.package-manager == 'pnpm'
id: pnpm-cache-dir
shell: bash
run: echo "dir=$(pnpm store path)" >> $GITHUB_OUTPUT
- name: Cache pnpm
if: inputs.package-manager == 'pnpm'
uses: actions/cache@v4
with:
path: ${{ steps.pnpm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ inputs.node-version }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-node-${{ inputs.node-version }}-pnpm-
- name: Get yarn cache directory
if: inputs.package-manager == 'yarn'
id: yarn-cache-dir
shell: bash
run: |
# Try Yarn Berry first, fall back to Yarn v1
if yarn config get cacheFolder >/dev/null 2>&1; then
echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
else
echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
fi
- name: Cache yarn
if: inputs.package-manager == 'yarn'
uses: actions/cache@v4
with:
path: ${{ steps.yarn-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ inputs.node-version }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-node-${{ inputs.node-version }}-yarn-
- name: Cache bun
if: inputs.package-manager == 'bun'
uses: actions/cache@v4
with:
path: ~/.bun/install/cache
key: ${{ runner.os }}-bun-${{ hashFiles('**/bun.lockb') }}
restore-keys: |
${{ runner.os }}-bun-
- name: Install dependencies
shell: bash
run: |
case "${{ inputs.package-manager }}" in
npm) npm ci ;;
pnpm) pnpm install ;;
yarn) yarn install --ignore-engines ;;
bun) bun install ;;
*) echo "Unsupported package manager: ${{ inputs.package-manager }}" && exit 1 ;;
esac
- name: Run tests
run: node tests/run-all.js
env:
CLAUDE_CODE_PACKAGE_MANAGER: ${{ inputs.package-manager }}
- name: Upload test artifacts
if: failure()
uses: actions/upload-artifact@v4
with:
name: test-results-${{ inputs.os }}-node${{ inputs.node-version }}-${{ inputs.package-manager }}
path: |
tests/
!tests/node_modules/

40
.github/workflows/reusable-validate.yml vendored Normal file
View File

@@ -0,0 +1,40 @@
name: Reusable Validation Workflow
on:
workflow_call:
inputs:
node-version:
description: 'Node.js version'
required: false
type: string
default: '20.x'
jobs:
validate:
name: Validate Components
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ inputs.node-version }}
- name: Validate agents
run: node scripts/ci/validate-agents.js
- name: Validate hooks
run: node scripts/ci/validate-hooks.js
- name: Validate commands
run: node scripts/ci/validate-commands.js
- name: Validate skills
run: node scripts/ci/validate-skills.js
- name: Validate rules
run: node scripts/ci/validate-rules.js

17
.markdownlint.json Normal file
View File

@@ -0,0 +1,17 @@
{
"default": true,
"MD013": false,
"MD033": false,
"MD041": false,
"MD022": false,
"MD031": false,
"MD032": false,
"MD040": false,
"MD036": false,
"MD026": false,
"MD029": false,
"MD060": false,
"MD024": {
"siblings_only": true
}
}

11
commitlint.config.js Normal file
View File

@@ -0,0 +1,11 @@
module.exports = {
extends: ['@commitlint/config-conventional'],
rules: {
'type-enum': [2, 'always', [
'feat', 'fix', 'docs', 'style', 'refactor',
'perf', 'test', 'chore', 'ci', 'build', 'revert'
]],
'subject-case': [2, 'never', ['sentence-case', 'start-case', 'pascal-case', 'upper-case']],
'header-max-length': [2, 'always', 100]
}
};

25
eslint.config.js Normal file
View File

@@ -0,0 +1,25 @@
const js = require('@eslint/js');
const globals = require('globals');
module.exports = [
js.configs.recommended,
{
languageOptions: {
ecmaVersion: 2022,
sourceType: 'commonjs',
globals: {
...globals.node,
...globals.es2022
}
},
rules: {
'no-unused-vars': ['error', {
argsIgnorePattern: '^_',
varsIgnorePattern: '^_',
caughtErrorsIgnorePattern: '^_'
}],
'no-undef': 'error',
'eqeqeq': 'warn'
}
}
];

2178
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

8
package.json Normal file
View File

@@ -0,0 +1,8 @@
{
"devDependencies": {
"@eslint/js": "^9.39.2",
"eslint": "^9.39.2",
"globals": "^17.1.0",
"markdownlint-cli": "^0.47.0"
}
}

81
schemas/hooks.schema.json Normal file
View File

@@ -0,0 +1,81 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Claude Code Hooks Configuration",
"description": "Configuration for Claude Code hooks. Event types are validated at runtime and must be one of: PreToolUse, PostToolUse, PreCompact, SessionStart, SessionEnd, Stop, Notification, SubagentStop",
"$defs": {
"hookItem": {
"type": "object",
"required": [
"type",
"command"
],
"properties": {
"type": {
"type": "string"
},
"command": {
"oneOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
]
}
}
},
"matcherEntry": {
"type": "object",
"required": [
"matcher",
"hooks"
],
"properties": {
"matcher": {
"type": "string"
},
"hooks": {
"type": "array",
"items": {
"$ref": "#/$defs/hookItem"
}
},
"description": {
"type": "string"
}
}
}
},
"oneOf": [
{
"type": "object",
"properties": {
"$schema": {
"type": "string"
},
"hooks": {
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"$ref": "#/$defs/matcherEntry"
}
}
}
},
"required": [
"hooks"
]
},
{
"type": "array",
"items": {
"$ref": "#/$defs/matcherEntry"
}
}
]
}

View File

@@ -0,0 +1,17 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Package Manager Configuration",
"type": "object",
"properties": {
"packageManager": {
"type": "string",
"enum": [
"npm",
"pnpm",
"yarn",
"bun"
]
}
},
"additionalProperties": false
}

View File

@@ -0,0 +1,13 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Claude Plugin Configuration",
"type": "object",
"required": ["name"],
"properties": {
"name": { "type": "string" },
"description": { "type": "string" },
"author": { "type": "string" },
"repository": { "type": "string" },
"license": { "type": "string" }
}
}

View File

@@ -0,0 +1,67 @@
#!/usr/bin/env node
/**
* Validate agent markdown files have required frontmatter
*/
const fs = require('fs');
const path = require('path');
const AGENTS_DIR = path.join(__dirname, '../../agents');
const REQUIRED_FIELDS = ['model', 'tools'];
function extractFrontmatter(content) {
// Strip BOM if present (UTF-8 BOM: \uFEFF)
const cleanContent = content.replace(/^\uFEFF/, '');
// Support both LF and CRLF line endings
const match = cleanContent.match(/^---\r?\n([\s\S]*?)\r?\n---/);
if (!match) return null;
const frontmatter = {};
const lines = match[1].split('\n');
for (const line of lines) {
const colonIdx = line.indexOf(':');
if (colonIdx > 0) {
const key = line.slice(0, colonIdx).trim();
const value = line.slice(colonIdx + 1).trim();
frontmatter[key] = value;
}
}
return frontmatter;
}
function validateAgents() {
if (!fs.existsSync(AGENTS_DIR)) {
console.log('No agents directory found, skipping validation');
process.exit(0);
}
const files = fs.readdirSync(AGENTS_DIR).filter(f => f.endsWith('.md'));
let hasErrors = false;
for (const file of files) {
const filePath = path.join(AGENTS_DIR, file);
const content = fs.readFileSync(filePath, 'utf-8');
const frontmatter = extractFrontmatter(content);
if (!frontmatter) {
console.error(`ERROR: ${file} - Missing frontmatter`);
hasErrors = true;
continue;
}
for (const field of REQUIRED_FIELDS) {
if (!frontmatter[field]) {
console.error(`ERROR: ${file} - Missing required field: ${field}`);
hasErrors = true;
}
}
}
if (hasErrors) {
process.exit(1);
}
console.log(`Validated ${files.length} agent files`);
}
validateAgents();

View File

@@ -0,0 +1,38 @@
#!/usr/bin/env node
/**
* Validate command markdown files are non-empty and readable
*/
const fs = require('fs');
const path = require('path');
const COMMANDS_DIR = path.join(__dirname, '../../commands');
function validateCommands() {
if (!fs.existsSync(COMMANDS_DIR)) {
console.log('No commands directory found, skipping validation');
process.exit(0);
}
const files = fs.readdirSync(COMMANDS_DIR).filter(f => f.endsWith('.md'));
let hasErrors = false;
for (const file of files) {
const filePath = path.join(COMMANDS_DIR, file);
const content = fs.readFileSync(filePath, 'utf-8');
// Validate the file is non-empty readable markdown
if (content.trim().length === 0) {
console.error(`ERROR: ${file} - Empty command file`);
hasErrors = true;
}
}
if (hasErrors) {
process.exit(1);
}
console.log(`Validated ${files.length} command files`);
}
validateCommands();

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env node
/**
* Validate hooks.json schema
*/
const fs = require('fs');
const path = require('path');
const HOOKS_FILE = path.join(__dirname, '../../hooks/hooks.json');
const VALID_EVENTS = ['PreToolUse', 'PostToolUse', 'PreCompact', 'SessionStart', 'SessionEnd', 'Stop', 'Notification', 'SubagentStop'];
function validateHooks() {
if (!fs.existsSync(HOOKS_FILE)) {
console.log('No hooks.json found, skipping validation');
process.exit(0);
}
let data;
try {
data = JSON.parse(fs.readFileSync(HOOKS_FILE, 'utf-8'));
} catch (e) {
console.error(`ERROR: Invalid JSON in hooks.json: ${e.message}`);
process.exit(1);
}
// Support both object format { hooks: {...} } and array format
const hooks = data.hooks || data;
let hasErrors = false;
let totalMatchers = 0;
if (typeof hooks === 'object' && !Array.isArray(hooks)) {
// Object format: { EventType: [matchers] }
for (const [eventType, matchers] of Object.entries(hooks)) {
if (!VALID_EVENTS.includes(eventType)) {
console.error(`ERROR: Invalid event type: ${eventType}`);
hasErrors = true;
continue;
}
if (!Array.isArray(matchers)) {
console.error(`ERROR: ${eventType} must be an array`);
hasErrors = true;
continue;
}
for (let i = 0; i < matchers.length; i++) {
const matcher = matchers[i];
if (typeof matcher !== 'object' || matcher === null) {
console.error(`ERROR: ${eventType}[${i}] is not an object`);
hasErrors = true;
continue;
}
if (!matcher.matcher) {
console.error(`ERROR: ${eventType}[${i}] missing 'matcher' field`);
hasErrors = true;
}
if (!matcher.hooks || !Array.isArray(matcher.hooks)) {
console.error(`ERROR: ${eventType}[${i}] missing 'hooks' array`);
hasErrors = true;
} else {
// Validate each hook entry
for (let j = 0; j < matcher.hooks.length; j++) {
const hook = matcher.hooks[j];
if (!hook.type || typeof hook.type !== 'string') {
console.error(`ERROR: ${eventType}[${i}].hooks[${j}] missing or invalid 'type' field`);
hasErrors = true;
}
if (!hook.command || (typeof hook.command !== 'string' && !Array.isArray(hook.command))) {
console.error(`ERROR: ${eventType}[${i}].hooks[${j}] missing or invalid 'command' field`);
hasErrors = true;
}
}
}
totalMatchers++;
}
}
} else if (Array.isArray(hooks)) {
// Array format (legacy)
for (let i = 0; i < hooks.length; i++) {
const hook = hooks[i];
if (!hook.matcher) {
console.error(`ERROR: Hook ${i} missing 'matcher' field`);
hasErrors = true;
}
if (!hook.hooks || !Array.isArray(hook.hooks)) {
console.error(`ERROR: Hook ${i} missing 'hooks' array`);
hasErrors = true;
} else {
// Validate each hook entry
for (let j = 0; j < hook.hooks.length; j++) {
const h = hook.hooks[j];
if (!h.type || typeof h.type !== 'string') {
console.error(`ERROR: Hook ${i}.hooks[${j}] missing or invalid 'type' field`);
hasErrors = true;
}
if (!h.command || (typeof h.command !== 'string' && !Array.isArray(h.command))) {
console.error(`ERROR: Hook ${i}.hooks[${j}] missing or invalid 'command' field`);
hasErrors = true;
}
}
}
totalMatchers++;
}
} else {
console.error('ERROR: hooks.json must be an object or array');
process.exit(1);
}
if (hasErrors) {
process.exit(1);
}
console.log(`Validated ${totalMatchers} hook matchers`);
}
validateHooks();

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env node
/**
* Validate rule markdown files
*/
const fs = require('fs');
const path = require('path');
const RULES_DIR = path.join(__dirname, '../../rules');
function validateRules() {
if (!fs.existsSync(RULES_DIR)) {
console.log('No rules directory found, skipping validation');
process.exit(0);
}
const files = fs.readdirSync(RULES_DIR, { recursive: true })
.filter(f => f.endsWith('.md'));
let hasErrors = false;
let validatedCount = 0;
for (const file of files) {
const filePath = path.join(RULES_DIR, file);
try {
const stat = fs.statSync(filePath);
if (!stat.isFile()) continue;
const content = fs.readFileSync(filePath, 'utf-8');
if (content.trim().length === 0) {
console.error(`ERROR: ${file} - Empty rule file`);
hasErrors = true;
continue;
}
validatedCount++;
} catch (err) {
console.error(`ERROR: ${file} - ${err.message}`);
hasErrors = true;
}
}
if (hasErrors) {
process.exit(1);
}
console.log(`Validated ${validatedCount} rule files`);
}
validateRules();

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env node
/**
* Validate skill directories have SKILL.md with required structure
*/
const fs = require('fs');
const path = require('path');
const SKILLS_DIR = path.join(__dirname, '../../skills');
function validateSkills() {
if (!fs.existsSync(SKILLS_DIR)) {
console.log('No skills directory found, skipping validation');
process.exit(0);
}
const entries = fs.readdirSync(SKILLS_DIR, { withFileTypes: true });
const dirs = entries.filter(e => e.isDirectory()).map(e => e.name);
let hasErrors = false;
let validCount = 0;
for (const dir of dirs) {
const skillMd = path.join(SKILLS_DIR, dir, 'SKILL.md');
if (!fs.existsSync(skillMd)) {
console.error(`ERROR: ${dir}/ - Missing SKILL.md`);
hasErrors = true;
continue;
}
const content = fs.readFileSync(skillMd, 'utf-8');
if (content.trim().length === 0) {
console.error(`ERROR: ${dir}/SKILL.md - Empty file`);
hasErrors = true;
continue;
}
validCount++;
}
if (hasErrors) {
process.exit(1);
}
console.log(`Validated ${validCount} skill directories`);
}
validateSkills();

View File

@@ -0,0 +1,451 @@
/**
* Integration tests for hook scripts
*
* Tests hook behavior in realistic scenarios with proper input/output handling.
*
* Run with: node tests/integration/hooks.test.js
*/
const assert = require('assert');
const path = require('path');
const fs = require('fs');
const os = require('os');
const { spawn } = require('child_process');
// Test helper
function _test(name, fn) {
try {
fn();
console.log(`${name}`);
return true;
} catch (err) {
console.log(`${name}`);
console.log(` Error: ${err.message}`);
return false;
}
}
// Async test helper
async function asyncTest(name, fn) {
try {
await fn();
console.log(`${name}`);
return true;
} catch (err) {
console.log(`${name}`);
console.log(` Error: ${err.message}`);
return false;
}
}
/**
* Run a hook script with simulated Claude Code input
* @param {string} scriptPath - Path to the hook script
* @param {object} input - Hook input object (will be JSON stringified)
* @param {object} env - Environment variables
* @returns {Promise<{code: number, stdout: string, stderr: string}>}
*/
function runHookWithInput(scriptPath, input = {}, env = {}, timeoutMs = 10000) {
return new Promise((resolve, reject) => {
const proc = spawn('node', [scriptPath], {
env: { ...process.env, ...env },
stdio: ['pipe', 'pipe', 'pipe']
});
let stdout = '';
let stderr = '';
proc.stdout.on('data', data => stdout += data);
proc.stderr.on('data', data => stderr += data);
// Ignore EPIPE errors (process may exit before we finish writing)
proc.stdin.on('error', (err) => {
if (err.code !== 'EPIPE') {
reject(err);
}
});
// Send JSON input on stdin (simulating Claude Code hook invocation)
if (input && Object.keys(input).length > 0) {
proc.stdin.write(JSON.stringify(input));
}
proc.stdin.end();
const timer = setTimeout(() => {
proc.kill('SIGKILL');
reject(new Error(`Hook timed out after ${timeoutMs}ms`));
}, timeoutMs);
proc.on('close', code => {
clearTimeout(timer);
resolve({ code, stdout, stderr });
});
proc.on('error', err => {
clearTimeout(timer);
reject(err);
});
});
}
/**
* Run an inline hook command (like those in hooks.json)
* @param {string} command - The node -e "..." command
* @param {object} input - Hook input object
* @param {object} env - Environment variables
*/
function _runInlineHook(command, input = {}, env = {}, timeoutMs = 10000) {
return new Promise((resolve, reject) => {
// Extract the code from node -e "..."
const match = command.match(/^node -e "(.+)"$/s);
if (!match) {
reject(new Error('Invalid inline hook command format'));
return;
}
const proc = spawn('node', ['-e', match[1]], {
env: { ...process.env, ...env },
stdio: ['pipe', 'pipe', 'pipe']
});
let stdout = '';
let stderr = '';
let timer;
proc.stdout.on('data', data => stdout += data);
proc.stderr.on('data', data => stderr += data);
// Ignore EPIPE errors (process may exit before we finish writing)
proc.stdin.on('error', (err) => {
if (err.code !== 'EPIPE') {
if (timer) clearTimeout(timer);
reject(err);
}
});
if (input && Object.keys(input).length > 0) {
proc.stdin.write(JSON.stringify(input));
}
proc.stdin.end();
timer = setTimeout(() => {
proc.kill('SIGKILL');
reject(new Error(`Inline hook timed out after ${timeoutMs}ms`));
}, timeoutMs);
proc.on('close', code => {
clearTimeout(timer);
resolve({ code, stdout, stderr });
});
proc.on('error', err => {
clearTimeout(timer);
reject(err);
});
});
}
// Create a temporary test directory
function createTestDir() {
return fs.mkdtempSync(path.join(os.tmpdir(), 'hook-integration-test-'));
}
// Clean up test directory
function cleanupTestDir(testDir) {
fs.rmSync(testDir, { recursive: true, force: true });
}
// Test suite
async function runTests() {
console.log('\n=== Hook Integration Tests ===\n');
let passed = 0;
let failed = 0;
const scriptsDir = path.join(__dirname, '..', '..', 'scripts', 'hooks');
const hooksJsonPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
const hooks = JSON.parse(fs.readFileSync(hooksJsonPath, 'utf8'));
// ==========================================
// Input Format Tests
// ==========================================
console.log('Hook Input Format Handling:');
if (await asyncTest('hooks handle empty stdin gracefully', async () => {
const result = await runHookWithInput(path.join(scriptsDir, 'session-start.js'), {});
assert.strictEqual(result.code, 0, `Should exit 0, got ${result.code}`);
})) passed++; else failed++;
if (await asyncTest('hooks handle malformed JSON input', async () => {
const proc = spawn('node', [path.join(scriptsDir, 'session-start.js')], {
stdio: ['pipe', 'pipe', 'pipe']
});
let code = null;
proc.stdin.write('{ invalid json }');
proc.stdin.end();
await new Promise((resolve) => {
proc.on('close', (c) => {
code = c;
resolve();
});
});
// Hook should not crash on malformed input (exit 0)
assert.strictEqual(code, 0, 'Should handle malformed JSON gracefully');
})) passed++; else failed++;
if (await asyncTest('hooks parse valid tool_input correctly', async () => {
// Test the console.log warning hook with valid input
const command = 'node -e "const fs=require(\'fs\');let d=\'\';process.stdin.on(\'data\',c=>d+=c);process.stdin.on(\'end\',()=>{const i=JSON.parse(d);const p=i.tool_input?.file_path||\'\';console.log(\'Path:\',p)})"';
const match = command.match(/^node -e "(.+)"$/s);
const proc = spawn('node', ['-e', match[1]], {
stdio: ['pipe', 'pipe', 'pipe']
});
let stdout = '';
proc.stdout.on('data', data => stdout += data);
proc.stdin.write(JSON.stringify({
tool_input: { file_path: '/test/path.js' }
}));
proc.stdin.end();
await new Promise(resolve => proc.on('close', resolve));
assert.ok(stdout.includes('/test/path.js'), 'Should extract file_path from input');
})) passed++; else failed++;
// ==========================================
// Output Format Tests
// ==========================================
console.log('\nHook Output Format:');
if (await asyncTest('hooks output messages to stderr (not stdout)', async () => {
const result = await runHookWithInput(path.join(scriptsDir, 'session-start.js'), {});
// Session-start should write info to stderr
assert.ok(result.stderr.length > 0, 'Should have stderr output');
assert.ok(result.stderr.includes('[SessionStart]'), 'Should have [SessionStart] prefix');
})) passed++; else failed++;
if (await asyncTest('PreCompact hook logs to stderr', async () => {
const result = await runHookWithInput(path.join(scriptsDir, 'pre-compact.js'), {});
assert.ok(result.stderr.includes('[PreCompact]'), 'Should output to stderr with prefix');
})) passed++; else failed++;
if (await asyncTest('blocking hooks output BLOCKED message', async () => {
// Test the dev server blocking hook
const blockingCommand = hooks.hooks.PreToolUse[0].hooks[0].command;
const match = blockingCommand.match(/^node -e "(.+)"$/s);
const proc = spawn('node', ['-e', match[1]], {
stdio: ['pipe', 'pipe', 'pipe']
});
let stderr = '';
let code = null;
proc.stderr.on('data', data => stderr += data);
proc.stdin.end();
await new Promise(resolve => {
proc.on('close', (c) => {
code = c;
resolve();
});
});
assert.ok(stderr.includes('BLOCKED'), 'Blocking hook should output BLOCKED');
assert.strictEqual(code, 1, 'Blocking hook should exit with code 1');
})) passed++; else failed++;
// ==========================================
// Exit Code Tests
// ==========================================
console.log('\nHook Exit Codes:');
if (await asyncTest('non-blocking hooks exit with code 0', async () => {
const result = await runHookWithInput(path.join(scriptsDir, 'session-end.js'), {});
assert.strictEqual(result.code, 0, 'Non-blocking hook should exit 0');
})) passed++; else failed++;
if (await asyncTest('blocking hooks exit with code 1', async () => {
// The dev server blocker always blocks
const blockingCommand = hooks.hooks.PreToolUse[0].hooks[0].command;
const match = blockingCommand.match(/^node -e "(.+)"$/s);
const proc = spawn('node', ['-e', match[1]], {
stdio: ['pipe', 'pipe', 'pipe']
});
let code = null;
proc.stdin.end();
await new Promise(resolve => {
proc.on('close', (c) => {
code = c;
resolve();
});
});
assert.strictEqual(code, 1, 'Blocking hook should exit 1');
})) passed++; else failed++;
if (await asyncTest('hooks handle missing files gracefully', async () => {
const testDir = createTestDir();
const transcriptPath = path.join(testDir, 'nonexistent.jsonl');
try {
const result = await runHookWithInput(
path.join(scriptsDir, 'evaluate-session.js'),
{},
{ CLAUDE_TRANSCRIPT_PATH: transcriptPath }
);
// Should not crash, just skip processing
assert.strictEqual(result.code, 0, 'Should exit 0 for missing file');
} finally {
cleanupTestDir(testDir);
}
})) passed++; else failed++;
// ==========================================
// Realistic Scenario Tests
// ==========================================
console.log('\nRealistic Scenarios:');
if (await asyncTest('suggest-compact increments and triggers at threshold', async () => {
const sessionId = 'integration-test-' + Date.now();
const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`);
try {
// Set counter just below threshold
fs.writeFileSync(counterFile, '49');
const result = await runHookWithInput(
path.join(scriptsDir, 'suggest-compact.js'),
{},
{ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '50' }
);
assert.ok(
result.stderr.includes('50 tool calls'),
'Should suggest compact at threshold'
);
} finally {
if (fs.existsSync(counterFile)) fs.unlinkSync(counterFile);
}
})) passed++; else failed++;
if (await asyncTest('evaluate-session processes transcript with sufficient messages', async () => {
const testDir = createTestDir();
const transcriptPath = path.join(testDir, 'transcript.jsonl');
// Create a transcript with 15 user messages
const messages = Array(15).fill(null).map((_, i) => ({
type: 'user',
content: `Test message ${i + 1}`
}));
fs.writeFileSync(
transcriptPath,
messages.map(m => JSON.stringify(m)).join('\n')
);
try {
const result = await runHookWithInput(
path.join(scriptsDir, 'evaluate-session.js'),
{},
{ CLAUDE_TRANSCRIPT_PATH: transcriptPath }
);
assert.ok(result.stderr.includes('15 messages'), 'Should process session');
} finally {
cleanupTestDir(testDir);
}
})) passed++; else failed++;
if (await asyncTest('PostToolUse PR hook extracts PR URL', async () => {
// Find the PR logging hook
const prHook = hooks.hooks.PostToolUse.find(h =>
h.description && h.description.includes('PR URL')
);
assert.ok(prHook, 'PR hook should exist');
const match = prHook.hooks[0].command.match(/^node -e "(.+)"$/s);
const proc = spawn('node', ['-e', match[1]], {
stdio: ['pipe', 'pipe', 'pipe']
});
let stderr = '';
proc.stderr.on('data', data => stderr += data);
// Simulate gh pr create output
proc.stdin.write(JSON.stringify({
tool_input: { command: 'gh pr create --title "Test"' },
tool_output: { output: 'Creating pull request...\nhttps://github.com/owner/repo/pull/123' }
}));
proc.stdin.end();
await new Promise(resolve => proc.on('close', resolve));
assert.ok(
stderr.includes('PR created') || stderr.includes('github.com'),
'Should extract and log PR URL'
);
})) passed++; else failed++;
// ==========================================
// Error Handling Tests
// ==========================================
console.log('\nError Handling:');
if (await asyncTest('hooks do not crash on unexpected input structure', async () => {
const result = await runHookWithInput(
path.join(scriptsDir, 'suggest-compact.js'),
{ unexpected: { nested: { deeply: 'value' } } }
);
assert.strictEqual(result.code, 0, 'Should handle unexpected input structure');
})) passed++; else failed++;
if (await asyncTest('hooks handle null and missing values in input', async () => {
const result = await runHookWithInput(
path.join(scriptsDir, 'session-start.js'),
{ tool_input: null }
);
assert.strictEqual(result.code, 0, 'Should handle null/missing values gracefully');
})) passed++; else failed++;
if (await asyncTest('hooks handle very large input without hanging', async () => {
const largeInput = {
tool_input: { file_path: '/test.js' },
tool_output: { output: 'x'.repeat(100000) }
};
const startTime = Date.now();
const result = await runHookWithInput(
path.join(scriptsDir, 'session-start.js'),
largeInput
);
const elapsed = Date.now() - startTime;
assert.strictEqual(result.code, 0, 'Should complete successfully');
assert.ok(elapsed < 5000, `Should complete in <5s, took ${elapsed}ms`);
})) passed++; else failed++;
// Summary
console.log('\n=== Test Results ===');
console.log(`Passed: ${passed}`);
console.log(`Failed: ${failed}`);
console.log(`Total: ${passed + failed}\n`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();