Browse Source

refactor: Streamline skills with reference extraction and add test suite

- Extract verbose content from SKILL.md files into references/ subdirs
- Add skill template for consistent structure
- Add functional test suite with fixtures (config.yaml, docker-compose.yml, example.js)
- Add validation scripts for trigger patterns
- Include pulse news digest and state tracking

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
0xDarkMatter 3 months ago
parent
commit
4fe9e03927
50 changed files with 4759 additions and 591 deletions
  1. 32 1
      .claude/settings.local.json
  2. 142 0
      news/2025-12-20_pulse.md
  3. 28 0
      news/state.json
  4. 2 0
      skills/code-stats/SKILL.md
  5. 18 276
      skills/data-processing/SKILL.md
  6. 42 0
      skills/data-processing/references/config-files.md
  7. 160 0
      skills/data-processing/references/jq-patterns.md
  8. 99 0
      skills/data-processing/references/yq-patterns.md
  9. 1 0
      skills/doc-scanner/SKILL.md
  10. 8 5
      skills/file-search/SKILL.md
  11. 8 5
      skills/find-replace/SKILL.md
  12. 2 0
      skills/git-workflow/SKILL.md
  13. 38 273
      skills/mcp-patterns/SKILL.md
  14. 50 0
      skills/mcp-patterns/references/auth-patterns.md
  15. 48 0
      skills/mcp-patterns/references/resource-patterns.md
  16. 57 0
      skills/mcp-patterns/references/state-patterns.md
  17. 39 0
      skills/mcp-patterns/references/testing-patterns.md
  18. 88 0
      skills/mcp-patterns/references/tool-patterns.md
  19. 2 1
      skills/project-planner/SKILL.md
  20. 8 5
      skills/python-env/SKILL.md
  21. 7 5
      skills/rest-patterns/SKILL.md
  22. 7 5
      skills/sql-patterns/SKILL.md
  23. 8 5
      skills/sqlite-ops/SKILL.md
  24. 2 0
      skills/structural-search/SKILL.md
  25. 8 5
      skills/tailwind-patterns/SKILL.md
  26. 2 0
      skills/task-runner/SKILL.md
  27. 7 5
      skills/tool-discovery/SKILL.md
  28. 243 0
      templates/SKILL.template.md
  29. 251 0
      tests/skills/README.md
  30. 18 0
      tests/skills/fixtures/config.yaml
  31. 27 0
      tests/skills/fixtures/docker-compose.yml
  32. 31 0
      tests/skills/fixtures/example.js
  33. 20 0
      tests/skills/fixtures/package.json
  34. 154 0
      tests/skills/functional/code-stats.md
  35. 209 0
      tests/skills/functional/code-stats.sh
  36. 150 0
      tests/skills/functional/data-processing.md
  37. 224 0
      tests/skills/functional/data-processing.sh
  38. 123 0
      tests/skills/functional/git-workflow.md
  39. 223 0
      tests/skills/functional/git-workflow.sh
  40. 168 0
      tests/skills/functional/structural-search.md
  41. 233 0
      tests/skills/functional/structural-search.sh
  42. 63 0
      tests/skills/manual-trigger-test.md
  43. 203 0
      tests/skills/reports/report_2025-12-20_222644.md
  44. 203 0
      tests/skills/reports/report_2025-12-20_222717.md
  45. 167 0
      tests/skills/reports/report_2025-12-20_222830.md
  46. 187 0
      tests/skills/reports/report_2025-12-20_222919.md
  47. 231 0
      tests/skills/reports/skill-analysis.md
  48. 241 0
      tests/skills/run-tests.sh
  49. 267 0
      tests/skills/trigger-tests.md
  50. 210 0
      tests/skills/validate-triggers.sh

+ 32 - 1
.claude/settings.local.json

@@ -56,7 +56,38 @@
       "Bash(claude:*)",
       "Bash(nc:*)",
       "Bash(curl:*)",
-      "Bash(npm update:*)"
+      "Bash(npm update:*)",
+      "Bash(xargs:*)",
+      "SlashCommand(/pulse:*)",
+      "Bash(gh api:*)",
+      "Bash(gh search:*)",
+      "WebFetch(domain:www.anthropic.com)",
+      "WebFetch(domain:simonwillison.net)",
+      "WebFetch(domain:blog.sshh.io)",
+      "WebFetch(domain:blog.gitbutler.com)",
+      "WebFetch(domain:github.com)",
+      "WebFetch(domain:nx.dev)",
+      "WebFetch(domain:every.to)",
+      "Bash(brew install:*)",
+      "WebFetch(domain:raw.githubusercontent.com)",
+      "WebFetch(domain:agentskills.io)",
+      "Bash(eza --tree:*)",
+      "Bash(chmod:*)",
+      "Bash(/Users/mack/projects/claude-mods/tests/skills/run-tests.sh:*)",
+      "Bash(/Users/mack/projects/claude-mods/tests/skills/validate-triggers.sh:*)",
+      "Bash(bash:*)",
+      "Bash(tests/skills/run-tests.sh)",
+      "Bash(command -v rg:*)",
+      "Bash(command -v:*)",
+      "Bash(tests/skills/run-tests.sh:*)",
+      "Skill(data-processing)",
+      "Skill(code-stats)",
+      "Bash(tokei:*)",
+      "Bash(difft:*)",
+      "Skill(structural-search)",
+      "Bash(sg:*)",
+      "Bash(fd:*)",
+      "Bash(sd:*)"
     ],
     "deny": [],
     "ask": []

File diff suppressed because it is too large
+ 142 - 0
news/2025-12-20_pulse.md


+ 28 - 0
news/state.json

@@ -0,0 +1,28 @@
+{
+  "version": "1.0",
+  "last_run": "2025-12-20T12:00:00Z",
+  "seen_urls": [
+    "https://www.anthropic.com/news/donating-the-model-context-protocol-and-establishing-of-the-agentic-ai-foundation",
+    "https://www.anthropic.com/news/compliance-framework-SB53",
+    "https://www.anthropic.com/news/genesis-mission-partnership",
+    "https://www.anthropic.com/news/protecting-well-being-of-users",
+    "https://www.anthropic.com/news/anthropic-accenture-partnership",
+    "https://simonwillison.net/2025/Dec/19/agent-skills/",
+    "https://simonwillison.net/2025/Dec/19/andrej-karpathy/",
+    "https://simonwillison.net/2025/Dec/19/introducing-gpt-52-codex/",
+    "https://simonwillison.net/2025/Dec/19/sam-rose-llms/",
+    "https://simonwillison.net/2025/Dec/18/code-proven-to-work/",
+    "https://simonwillison.net/2025/Dec/18/swift-justhtml/",
+    "https://www.docker.com/blog/develop-deploy-voice-ai-apps/",
+    "https://www.docker.com/blog/add-mcp-server-to-chatgpt/",
+    "https://www.docker.com/blog/docker-model-runner-universal-blue/",
+    "https://www.docker.com/blog/docker-hardened-images-security-independently-validated-by-srlabs/",
+    "https://www.docker.com/blog/from-the-captains-chair-igor-aleksandrov/",
+    "https://every.to/source-code/openai-gave-us-a-glimpse-into-their-ai-coding-playbook",
+    "https://every.to/p/how-ai-can-cut-your-planning-cycle-from-two-weeks-to-two-days",
+    "https://blog.gitbutler.com/gitbutler-with-multiple-accounts/",
+    "https://github.com/anthropics/skills",
+    "https://github.com/obra/superpowers"
+  ],
+  "seen_commits": {}
+}

+ 2 - 0
skills/code-stats/SKILL.md

@@ -1,6 +1,8 @@
 ---
 name: code-stats
 description: "Analyze codebase with tokei (fast line counts by language) and difft (semantic AST-aware diffs). Get quick project overview without manual counting. Triggers on: how big is codebase, count lines of code, what languages, show semantic diff, compare files, code statistics."
+compatibility: "Requires tokei and difft CLI tools. Install: brew install tokei difft (macOS) or cargo install tokei difftastic (cross-platform)."
+allowed-tools: "Bash"
 ---
 
 # Code Statistics

+ 18 - 276
skills/data-processing/SKILL.md

@@ -1,11 +1,12 @@
 ---
 name: data-processing
 description: "Process JSON with jq and YAML/TOML with yq. Filter, transform, query structured data efficiently. Triggers on: parse JSON, extract from YAML, query config, Docker Compose, K8s manifests, GitHub Actions workflows, package.json, filter data."
+compatibility: "Requires jq and yq CLI tools. Install: brew install jq yq (macOS)."
+allowed-tools: "Bash Read"
 ---
 
 # Data Processing
 
-## Purpose
 Query, filter, and transform structured data (JSON, YAML, TOML) efficiently from the command line.
 
 ## Tools
@@ -15,9 +16,7 @@ Query, filter, and transform structured data (JSON, YAML, TOML) efficiently from
 | jq | `jq '.key' file.json` | JSON processing |
 | yq | `yq '.key' file.yaml` | YAML/TOML processing |
 
-## jq Basics
-
-### Selection and Navigation
+## jq Essentials
 
 ```bash
 # Extract single field
@@ -34,140 +33,21 @@ jq '{name, version}' package.json
 
 # Navigate deeply nested
 jq '.data.users[0].profile.email' response.json
-```
-
-### Array Operations
-
-```bash
-# Get all array elements
-jq '.users[]' data.json
-
-# Get specific index
-jq '.users[0]' data.json
-
-# Slice array
-jq '.users[0:3]' data.json           # First 3 elements
-jq '.users[-2:]' data.json           # Last 2 elements
-
-# Array length
-jq '.users | length' data.json
-
-# Get array of specific field
-jq '.users[].name' data.json
-
-# Wrap results in array
-jq '[.users[].name]' data.json
-```
 
-### Filtering with select
-
-```bash
 # Filter by condition
 jq '.users[] | select(.active == true)' data.json
 
-# Multiple conditions
-jq '.users[] | select(.age > 21 and .status == "active")' data.json
-
-# String contains
-jq '.users[] | select(.email | contains("@gmail"))' data.json
-
-# Regex match
-jq '.users[] | select(.email | test("@(gmail|yahoo)"))' data.json
-
-# Not null check
-jq '.users[] | select(.profile != null)' data.json
-```
-
-### Transformation with map
-
-```bash
 # Transform each element
 jq '.users | map({id, name})' data.json
 
-# Add computed field
-jq '.users | map(. + {full_name: (.first + " " + .last)})' data.json
-
-# Filter and transform
-jq '.users | map(select(.active)) | map(.email)' data.json
-
-# map_values for objects
-jq '.config | map_values(. * 2)' data.json
-```
-
-### Object Manipulation
-
-```bash
-# Add/update field
-jq '.version = "2.0.0"' package.json
-
-# Delete field
-jq 'del(.devDependencies)' package.json
-
-# Rename key
-jq '.dependencies | to_entries | map(.key |= gsub("@"; ""))' package.json
-
-# Merge objects
-jq '. + {newField: "value"}' data.json
-
-# Update nested field
-jq '.scripts.test = "jest --coverage"' package.json
-
-# Conditional update
-jq 'if .version == "1.0.0" then .version = "1.0.1" else . end' package.json
-```
-
-### Aggregation
-
-```bash
-# Count
+# Count elements
 jq '.users | length' data.json
 
-# Sum
-jq '[.items[].price] | add' data.json
-
-# Min/Max
-jq '[.scores[]] | min' data.json
-jq '[.scores[]] | max' data.json
-
-# Average
-jq '[.scores[]] | add / length' data.json
-
-# Group by
-jq 'group_by(.category) | map({category: .[0].category, count: length})' data.json
-
-# Unique values
-jq '[.users[].role] | unique' data.json
-
-# Sort
-jq '.users | sort_by(.created_at)' data.json
-jq '.users | sort_by(.name) | reverse' data.json
-```
-
-### Output Formatting
-
-```bash
-# Pretty print
-jq '.' response.json
-
-# Compact output (single line)
-jq -c '.results[]' data.json
-
-# Raw strings (no quotes)
+# Raw string output
 jq -r '.name' package.json
-
-# Tab-separated output
-jq -r '.users[] | [.id, .name, .email] | @tsv' data.json
-
-# CSV output
-jq -r '.users[] | [.id, .name, .email] | @csv' data.json
-
-# URI encoding
-jq -r '.query | @uri' data.json
 ```
 
-## yq for YAML/TOML
-
-### Basic YAML Operations
+## yq Essentials
 
 ```bash
 # Extract field
@@ -179,163 +59,17 @@ yq '.services.web.image' docker-compose.yml
 # List all keys
 yq 'keys' config.yaml
 
-# Get array element
-yq '.volumes[0]' docker-compose.yml
-```
-
-### Docker Compose Queries
-
-```bash
-# List all service names
+# List all service names (Docker Compose)
 yq '.services | keys' docker-compose.yml
 
-# Get all images
-yq '.services[].image' docker-compose.yml
-
-# Get environment variables for a service
-yq '.services.web.environment' docker-compose.yml
-
-# Find services with specific image
-yq '.services | to_entries | map(select(.value.image | contains("nginx")))' docker-compose.yml
-```
-
-### Kubernetes Manifests
-
-```bash
-# Get resource name
-yq '.metadata.name' deployment.yaml
-
-# Get container images
+# Get container images (K8s)
 yq '.spec.template.spec.containers[].image' deployment.yaml
 
-# Get all labels
-yq '.metadata.labels' deployment.yaml
-
-# Multi-document YAML (---)
-yq eval-all '.metadata.name' manifests.yaml
-```
-
-### GitHub Actions Workflows
-
-```bash
-# List all jobs
-yq '.jobs | keys' .github/workflows/ci.yml
-
-# Get steps for a job
-yq '.jobs.build.steps[].name' .github/workflows/ci.yml
-
-# Find jobs using specific action
-yq '.jobs[].steps[] | select(.uses | contains("actions/checkout"))' .github/workflows/ci.yml
-
-# Get all environment variables
-yq '.env' .github/workflows/ci.yml
-```
-
-### TOML Processing
-
-```bash
-# Read TOML file
-yq -p toml '.dependencies' Cargo.toml
-
-# Convert TOML to JSON
-yq -p toml -o json '.' config.toml
-
-# Extract pyproject.toml dependencies
-yq -p toml '.project.dependencies[]' pyproject.toml
-```
-
-### YAML Modification
-
-```bash
 # Update value (in-place)
 yq -i '.version = "2.0.0"' config.yaml
 
-# Add new field
-yq -i '.new_field = "value"' config.yaml
-
-# Delete field
-yq -i 'del(.old_field)' config.yaml
-
-# Add to array
-yq -i '.tags += ["new-tag"]' config.yaml
-
-# Merge YAML files
-yq eval-all 'select(fileIndex == 0) * select(fileIndex == 1)' base.yaml override.yaml
-```
-
-## Common Config Files
-
-### package.json
-
-```bash
-# List all dependencies
-jq '.dependencies | keys' package.json
-
-# Get all scripts
-jq '.scripts' package.json
-
-# Find outdated patterns
-jq '.dependencies | to_entries | map(select(.value | startswith("^")))' package.json
-
-# Extract dev dependencies
-jq '.devDependencies | keys | .[]' package.json
-```
-
-### tsconfig.json
-
-```bash
-# Get compiler options
-jq '.compilerOptions' tsconfig.json
-
-# Check strict mode
-jq '.compilerOptions.strict' tsconfig.json
-
-# List paths aliases
-jq '.compilerOptions.paths' tsconfig.json
-```
-
-### ESLint/Prettier
-
-```bash
-# Get enabled rules
-jq '.rules | to_entries | map(select(.value != "off"))' .eslintrc.json
-
-# Check prettier options
-jq '.' .prettierrc.json
-```
-
-## Advanced Patterns
-
-### Combining jq with Shell
-
-```bash
-# Process multiple files
-for f in *.json; do jq '.name' "$f"; done
-
-# Pipeline with other tools
-curl -s https://api.github.com/users/octocat | jq '.login'
-
-# Assign to variable
-VERSION=$(jq -r '.version' package.json)
-
-# Conditional logic
-jq -e '.errors | length == 0' response.json && echo "Success"
-```
-
-### Complex Transformations
-
-```bash
-# Flatten nested structure
-jq '[.categories[].items[]] | flatten' data.json
-
-# Reshape data
-jq '.users | map({(.id | tostring): .name}) | add' data.json
-
-# Pivot data
-jq 'group_by(.date) | map({date: .[0].date, values: map(.value)})' data.json
-
-# Join arrays
-jq -s '.[0] + .[1]' file1.json file2.json
+# TOML to JSON
+yq -p toml -o json '.' config.toml
 ```
 
 ## Quick Reference
@@ -363,3 +97,11 @@ jq -s '.[0] + .[1]' file1.json file2.json
 - Filtering large JSON datasets
 - Config file manipulation
 - Data format conversion
+
+## Additional Resources
+
+For complete pattern libraries, load:
+
+- `./references/jq-patterns.md` - Arrays, filtering, transformation, aggregation, output formatting
+- `./references/yq-patterns.md` - Docker Compose, K8s, GitHub Actions, TOML, YAML modification
+- `./references/config-files.md` - package.json, tsconfig, eslint/prettier patterns

+ 42 - 0
skills/data-processing/references/config-files.md

@@ -0,0 +1,42 @@
+# Config File Patterns
+
+Common patterns for processing configuration files.
+
+## package.json
+
+```bash
+# List all dependencies
+jq '.dependencies | keys' package.json
+
+# Get all scripts
+jq '.scripts' package.json
+
+# Find outdated patterns
+jq '.dependencies | to_entries | map(select(.value | startswith("^")))' package.json
+
+# Extract dev dependencies
+jq '.devDependencies | keys | .[]' package.json
+```
+
+## tsconfig.json
+
+```bash
+# Get compiler options
+jq '.compilerOptions' tsconfig.json
+
+# Check strict mode
+jq '.compilerOptions.strict' tsconfig.json
+
+# List paths aliases
+jq '.compilerOptions.paths' tsconfig.json
+```
+
+## ESLint/Prettier
+
+```bash
+# Get enabled rules
+jq '.rules | to_entries | map(select(.value != "off"))' .eslintrc.json
+
+# Check prettier options
+jq '.' .prettierrc.json
+```

+ 160 - 0
skills/data-processing/references/jq-patterns.md

@@ -0,0 +1,160 @@
+# jq Patterns Reference
+
+Complete jq patterns for JSON processing.
+
+## Array Operations
+
+```bash
+# Get all array elements
+jq '.users[]' data.json
+
+# Get specific index
+jq '.users[0]' data.json
+
+# Slice array
+jq '.users[0:3]' data.json           # First 3 elements
+jq '.users[-2:]' data.json           # Last 2 elements
+
+# Array length
+jq '.users | length' data.json
+
+# Get array of specific field
+jq '.users[].name' data.json
+
+# Wrap results in array
+jq '[.users[].name]' data.json
+```
+
+## Filtering with select
+
+```bash
+# Filter by condition
+jq '.users[] | select(.active == true)' data.json
+
+# Multiple conditions
+jq '.users[] | select(.age > 21 and .status == "active")' data.json
+
+# String contains
+jq '.users[] | select(.email | contains("@gmail"))' data.json
+
+# Regex match
+jq '.users[] | select(.email | test("@(gmail|yahoo)"))' data.json
+
+# Not null check
+jq '.users[] | select(.profile != null)' data.json
+```
+
+## Transformation with map
+
+```bash
+# Transform each element
+jq '.users | map({id, name})' data.json
+
+# Add computed field
+jq '.users | map(. + {full_name: (.first + " " + .last)})' data.json
+
+# Filter and transform
+jq '.users | map(select(.active)) | map(.email)' data.json
+
+# map_values for objects
+jq '.config | map_values(. * 2)' data.json
+```
+
+## Object Manipulation
+
+```bash
+# Add/update field
+jq '.version = "2.0.0"' package.json
+
+# Delete field
+jq 'del(.devDependencies)' package.json
+
+# Rename key
+jq '.dependencies | to_entries | map(.key |= gsub("@"; ""))' package.json
+
+# Merge objects
+jq '. + {newField: "value"}' data.json
+
+# Update nested field
+jq '.scripts.test = "jest --coverage"' package.json
+
+# Conditional update
+jq 'if .version == "1.0.0" then .version = "1.0.1" else . end' package.json
+```
+
+## Aggregation
+
+```bash
+# Count
+jq '.users | length' data.json
+
+# Sum
+jq '[.items[].price] | add' data.json
+
+# Min/Max
+jq '[.scores[]] | min' data.json
+jq '[.scores[]] | max' data.json
+
+# Average
+jq '[.scores[]] | add / length' data.json
+
+# Group by
+jq 'group_by(.category) | map({category: .[0].category, count: length})' data.json
+
+# Unique values
+jq '[.users[].role] | unique' data.json
+
+# Sort
+jq '.users | sort_by(.created_at)' data.json
+jq '.users | sort_by(.name) | reverse' data.json
+```
+
+## Output Formatting
+
+```bash
+# Pretty print
+jq '.' response.json
+
+# Compact output (single line)
+jq -c '.results[]' data.json
+
+# Raw strings (no quotes)
+jq -r '.name' package.json
+
+# Tab-separated output
+jq -r '.users[] | [.id, .name, .email] | @tsv' data.json
+
+# CSV output
+jq -r '.users[] | [.id, .name, .email] | @csv' data.json
+
+# URI encoding
+jq -r '.query | @uri' data.json
+```
+
+## Advanced Patterns
+
+```bash
+# Process multiple files
+for f in *.json; do jq '.name' "$f"; done
+
+# Pipeline with other tools
+curl -s https://api.github.com/users/octocat | jq '.login'
+
+# Assign to variable
+VERSION=$(jq -r '.version' package.json)
+
+# Conditional logic
+jq -e '.errors | length == 0' response.json && echo "Success"
+
+# Flatten nested structure
+jq '[.categories[].items[]] | flatten' data.json
+
+# Reshape data
+jq '.users | map({(.id | tostring): .name}) | add' data.json
+
+# Pivot data
+jq 'group_by(.date) | map({date: .[0].date, values: map(.value)})' data.json
+
+# Join arrays
+jq -s '.[0] + .[1]' file1.json file2.json
+```

+ 99 - 0
skills/data-processing/references/yq-patterns.md

@@ -0,0 +1,99 @@
+# yq Patterns Reference
+
+Complete yq patterns for YAML/TOML processing.
+
+## Basic YAML Operations
+
+```bash
+# Extract field
+yq '.name' config.yaml
+
+# Extract nested
+yq '.services.web.image' docker-compose.yml
+
+# List all keys
+yq 'keys' config.yaml
+
+# Get array element
+yq '.volumes[0]' docker-compose.yml
+```
+
+## Docker Compose Queries
+
+```bash
+# List all service names
+yq '.services | keys' docker-compose.yml
+
+# Get all images
+yq '.services[].image' docker-compose.yml
+
+# Get environment variables for a service
+yq '.services.web.environment' docker-compose.yml
+
+# Find services with specific image
+yq '.services | to_entries | map(select(.value.image | contains("nginx")))' docker-compose.yml
+```
+
+## Kubernetes Manifests
+
+```bash
+# Get resource name
+yq '.metadata.name' deployment.yaml
+
+# Get container images
+yq '.spec.template.spec.containers[].image' deployment.yaml
+
+# Get all labels
+yq '.metadata.labels' deployment.yaml
+
+# Multi-document YAML (---)
+yq eval-all '.metadata.name' manifests.yaml
+```
+
+## GitHub Actions Workflows
+
+```bash
+# List all jobs
+yq '.jobs | keys' .github/workflows/ci.yml
+
+# Get steps for a job
+yq '.jobs.build.steps[].name' .github/workflows/ci.yml
+
+# Find jobs using specific action
+yq '.jobs[].steps[] | select(.uses | contains("actions/checkout"))' .github/workflows/ci.yml
+
+# Get all environment variables
+yq '.env' .github/workflows/ci.yml
+```
+
+## TOML Processing
+
+```bash
+# Read TOML file
+yq -p toml '.dependencies' Cargo.toml
+
+# Convert TOML to JSON
+yq -p toml -o json '.' config.toml
+
+# Extract pyproject.toml dependencies
+yq -p toml '.project.dependencies[]' pyproject.toml
+```
+
+## YAML Modification
+
+```bash
+# Update value (in-place)
+yq -i '.version = "2.0.0"' config.yaml
+
+# Add new field
+yq -i '.new_field = "value"' config.yaml
+
+# Delete field
+yq -i 'del(.old_field)' config.yaml
+
+# Add to array
+yq -i '.tags += ["new-tag"]' config.yaml
+
+# Merge YAML files
+yq eval-all 'select(fileIndex == 0) * select(fileIndex == 1)' base.yaml override.yaml
+```

+ 1 - 0
skills/doc-scanner/SKILL.md

@@ -1,6 +1,7 @@
 ---
 name: doc-scanner
 description: "Scans for project documentation files (AGENTS.md, CLAUDE.md, GEMINI.md, COPILOT.md, CURSOR.md, WARP.md, and 15+ other formats) and synthesizes guidance. Auto-activates when user asks to review, understand, or explore a codebase, when starting work in a new project, when asking about conventions or agents, or when documentation context would help. Can consolidate multiple platform docs into unified AGENTS.md."
+allowed-tools: "Glob Read Write Bash"
 ---
 
 # Documentation Scanner

+ 8 - 5
skills/file-search/SKILL.md

@@ -1,10 +1,13 @@
-# File Search Skill
+---
+name: file-search
+description: "Modern file and content search using fd, ripgrep (rg), and fzf. Triggers on: fd, ripgrep, rg, find files, search code, fzf, fuzzy find, search codebase."
+compatibility: "Requires fd, ripgrep (rg), and optionally fzf. Install: brew install fd ripgrep fzf (macOS)."
+allowed-tools: "Bash"
+---
 
-Modern file and content search using fd, ripgrep (rg), and fzf for interactive selection.
-
-## Triggers
+# File Search
 
-fd, ripgrep, rg, find files, search code, fzf, fuzzy find, search codebase
+Modern file and content search using fd, ripgrep (rg), and fzf for interactive selection.
 
 ## fd - Find Files (Better than find)
 

+ 8 - 5
skills/find-replace/SKILL.md

@@ -1,10 +1,13 @@
-# Find Replace Skill
+---
+name: find-replace
+description: "Modern find-and-replace using sd (simpler than sed) and batch replacement patterns. Triggers on: sd, find replace, batch replace, sed replacement, string replacement, rename."
+compatibility: "Requires sd CLI tool. Install: brew install sd (macOS) or cargo install sd (cross-platform)."
+allowed-tools: "Bash"
+---
 
-Modern find-and-replace using sd (simpler than sed) and batch replacement patterns.
-
-## Triggers
+# Find Replace
 
-sd, find replace, batch replace, sed replacement, string replacement, rename
+Modern find-and-replace using sd (simpler than sed) and batch replacement patterns.
 
 ## sd Basics
 

+ 2 - 0
skills/git-workflow/SKILL.md

@@ -1,6 +1,8 @@
 ---
 name: git-workflow
 description: "Enhanced git operations using lazygit, gh (GitHub CLI), and delta. Triggers on stage changes, create PR, review PR, check issues, git diff, commit interactively, GitHub operations, rebase, stash, bisect."
+compatibility: "Requires git, gh (GitHub CLI), lazygit, and delta. Network access needed for GitHub operations."
+allowed-tools: "Bash"
 ---
 
 # Git Workflow

+ 38 - 273
skills/mcp-patterns/SKILL.md

@@ -1,14 +1,16 @@
-# MCP Patterns Skill
+---
+name: mcp-patterns
+description: "Model Context Protocol (MCP) server patterns for building integrations with Claude Code. Triggers on: mcp server, model context protocol, tool handler, mcp resource, mcp tool."
+compatibility: "Requires Python 3.10+ or Node.js 18+ for MCP server development."
+allowed-tools: "Read Write Bash"
+---
 
-Model Context Protocol (MCP) server patterns for building integrations with Claude Code.
-
-## Triggers
+# MCP Patterns
 
-mcp server, model context protocol, tool handler, mcp resource, mcp tool
+Model Context Protocol (MCP) server patterns for building integrations with Claude Code.
 
-## Server Structure
+## Basic MCP Server (Python)
 
-### Basic MCP Server (Python)
 ```python
 from mcp.server import Server
 from mcp.server.stdio import stdio_server
@@ -47,7 +49,8 @@ if __name__ == "__main__":
     asyncio.run(main())
 ```
 
-### Project Layout
+## Project Layout
+
 ```
 my-mcp-server/
 ├── src/
@@ -60,239 +63,10 @@ my-mcp-server/
 └── README.md
 ```
 
-## Tool Patterns
-
-### Tool with Validation
-```python
-from pydantic import BaseModel, Field
-
-class SearchInput(BaseModel):
-    query: str = Field(..., min_length=1, max_length=500)
-    limit: int = Field(default=10, ge=1, le=100)
-
-@app.call_tool()
-async def call_tool(name: str, arguments: dict):
-    if name == "search":
-        # Pydantic validates and parses
-        params = SearchInput(**arguments)
-        results = await search(params.query, params.limit)
-        return {"content": [{"type": "text", "text": json.dumps(results)}]}
-```
-
-### Tool with Error Handling
-```python
-@app.call_tool()
-async def call_tool(name: str, arguments: dict):
-    try:
-        if name == "fetch_data":
-            data = await fetch_data(arguments["url"])
-            return {"content": [{"type": "text", "text": data}]}
-    except httpx.HTTPStatusError as e:
-        return {
-            "content": [{"type": "text", "text": f"HTTP error: {e.response.status_code}"}],
-            "isError": True
-        }
-    except Exception as e:
-        return {
-            "content": [{"type": "text", "text": f"Error: {str(e)}"}],
-            "isError": True
-        }
-```
-
-### Multiple Tool Registration
-```python
-TOOLS = {
-    "list_items": {
-        "description": "List all items",
-        "schema": {"type": "object", "properties": {}},
-        "handler": handle_list_items
-    },
-    "get_item": {
-        "description": "Get specific item",
-        "schema": {
-            "type": "object",
-            "properties": {"id": {"type": "string"}},
-            "required": ["id"]
-        },
-        "handler": handle_get_item
-    },
-    "create_item": {
-        "description": "Create new item",
-        "schema": {
-            "type": "object",
-            "properties": {
-                "name": {"type": "string"},
-                "data": {"type": "object"}
-            },
-            "required": ["name"]
-        },
-        "handler": handle_create_item
-    }
-}
-
-@app.list_tools()
-async def list_tools():
-    return [
-        {"name": name, "description": t["description"], "inputSchema": t["schema"]}
-        for name, t in TOOLS.items()
-    ]
-
-@app.call_tool()
-async def call_tool(name: str, arguments: dict):
-    if name not in TOOLS:
-        raise ValueError(f"Unknown tool: {name}")
-    return await TOOLS[name]["handler"](arguments)
-```
-
-## Resource Patterns
-
-### Static Resource
-```python
-@app.list_resources()
-async def list_resources():
-    return [
-        {
-            "uri": "config://settings",
-            "name": "Application Settings",
-            "mimeType": "application/json"
-        }
-    ]
-
-@app.read_resource()
-async def read_resource(uri: str):
-    if uri == "config://settings":
-        return json.dumps({"theme": "dark", "lang": "en"})
-    raise ValueError(f"Unknown resource: {uri}")
-```
-
-### Dynamic Resources
-```python
-@app.list_resources()
-async def list_resources():
-    # List available resources dynamically
-    items = await get_all_items()
-    return [
-        {
-            "uri": f"item://{item.id}",
-            "name": item.name,
-            "mimeType": "application/json"
-        }
-        for item in items
-    ]
-
-@app.read_resource()
-async def read_resource(uri: str):
-    if uri.startswith("item://"):
-        item_id = uri.replace("item://", "")
-        item = await get_item(item_id)
-        return json.dumps(item.to_dict())
-    raise ValueError(f"Unknown resource: {uri}")
-```
-
-## Authentication Patterns
-
-### Environment Variables
-```python
-import os
-
-API_KEY = os.environ.get("MY_API_KEY")
-if not API_KEY:
-    raise ValueError("MY_API_KEY environment variable required")
-
-async def make_api_call(endpoint: str):
-    async with httpx.AsyncClient() as client:
-        response = await client.get(
-            f"https://api.example.com/{endpoint}",
-            headers={"Authorization": f"Bearer {API_KEY}"}
-        )
-        response.raise_for_status()
-        return response.json()
-```
-
-### OAuth Token Refresh
-```python
-from datetime import datetime, timedelta
-
-class TokenManager:
-    def __init__(self):
-        self.token = None
-        self.expires_at = None
-
-    async def get_token(self) -> str:
-        if self.token and self.expires_at > datetime.now():
-            return self.token
-
-        # Refresh token
-        async with httpx.AsyncClient() as client:
-            response = await client.post(
-                "https://auth.example.com/token",
-                data={"grant_type": "client_credentials", ...}
-            )
-            data = response.json()
-            self.token = data["access_token"]
-            self.expires_at = datetime.now() + timedelta(seconds=data["expires_in"] - 60)
-            return self.token
-
-token_manager = TokenManager()
-```
-
-## State Management
-
-### SQLite for Persistence
-```python
-import aiosqlite
-
-DB_PATH = Path.home() / ".my-mcp-server" / "state.db"
-
-async def init_db():
-    DB_PATH.parent.mkdir(parents=True, exist_ok=True)
-    async with aiosqlite.connect(DB_PATH) as db:
-        await db.execute("""
-            CREATE TABLE IF NOT EXISTS cache (
-                key TEXT PRIMARY KEY,
-                value TEXT,
-                expires_at TEXT
-            )
-        """)
-        await db.commit()
-
-async def get_cached(key: str) -> str | None:
-    async with aiosqlite.connect(DB_PATH) as db:
-        cursor = await db.execute(
-            "SELECT value FROM cache WHERE key = ? AND expires_at > datetime('now')",
-            (key,)
-        )
-        row = await cursor.fetchone()
-        return row[0] if row else None
-
-async def set_cached(key: str, value: str, ttl_seconds: int = 3600):
-    async with aiosqlite.connect(DB_PATH) as db:
-        await db.execute(
-            "INSERT OR REPLACE INTO cache (key, value, expires_at) VALUES (?, ?, datetime('now', '+' || ? || ' seconds'))",
-            (key, value, ttl_seconds)
-        )
-        await db.commit()
-```
-
-### In-Memory Cache
-```python
-from functools import lru_cache
-from cachetools import TTLCache
-
-# Simple TTL cache
-cache = TTLCache(maxsize=100, ttl=300)  # 5 minute TTL
-
-async def get_data(key: str):
-    if key in cache:
-        return cache[key]
-    data = await fetch_from_api(key)
-    cache[key] = data
-    return data
-```
-
 ## Claude Desktop Configuration
 
-### claude_desktop_config.json
+### Basic Configuration
+
 ```json
 {
   "mcpServers": {
@@ -308,6 +82,7 @@ async def get_data(key: str):
 ```
 
 ### With uv (Recommended)
+
 ```json
 {
   "mcpServers": {
@@ -322,41 +97,21 @@ async def get_data(key: str):
 }
 ```
 
-## Testing Patterns
-
-### Manual Test Script
-```python
-# test_server.py
-import asyncio
-from my_server.server import app
-
-async def test_tools():
-    tools = await app.list_tools()
-    print(f"Available tools: {[t['name'] for t in tools]}")
-
-    result = await app.call_tool("my_tool", {"query": "test"})
-    print(f"Result: {result}")
-
-if __name__ == "__main__":
-    asyncio.run(test_tools())
-```
-
-### pytest with Async
-```python
-import pytest
-from my_server.tools import handle_search
+## Quick Reference
 
-@pytest.mark.asyncio
-async def test_search_returns_results():
-    result = await handle_search({"query": "test", "limit": 5})
-    assert "content" in result
-    assert len(result["content"]) > 0
-
-@pytest.mark.asyncio
-async def test_search_handles_empty():
-    result = await handle_search({"query": "xyznonexistent123"})
-    assert result["content"][0]["text"] == "No results found"
-```
+| Pattern | Use Case | Reference |
+|---------|----------|-----------|
+| Tool validation | Input sanitization with Pydantic | `./references/tool-patterns.md` |
+| Error handling | Graceful failure responses | `./references/tool-patterns.md` |
+| Multiple tools | CRUD-style tool registration | `./references/tool-patterns.md` |
+| Static resources | Config/settings exposure | `./references/resource-patterns.md` |
+| Dynamic resources | Database-backed resources | `./references/resource-patterns.md` |
+| Environment auth | API key from env vars | `./references/auth-patterns.md` |
+| OAuth tokens | Token refresh with TTL | `./references/auth-patterns.md` |
+| SQLite cache | Persistent state storage | `./references/state-patterns.md` |
+| In-memory cache | TTL-based caching | `./references/state-patterns.md` |
+| Manual testing | Quick validation script | `./references/testing-patterns.md` |
+| pytest async | Unit tests for tools | `./references/testing-patterns.md` |
 
 ## Common Issues
 
@@ -367,3 +122,13 @@ async def test_search_handles_empty():
 | Auth failures | Check env vars are set in config, not shell |
 | Timeout errors | Add timeout to httpx calls, use async properly |
 | JSON parse errors | Ensure `call_tool` returns proper content structure |
+
+## Additional Resources
+
+For detailed patterns, load:
+
+- `./references/tool-patterns.md` - Validation, error handling, multi-tool registration
+- `./references/resource-patterns.md` - Static and dynamic resource exposure
+- `./references/auth-patterns.md` - Environment variables, OAuth token refresh
+- `./references/state-patterns.md` - SQLite persistence, in-memory caching
+- `./references/testing-patterns.md` - Manual test scripts, pytest async patterns

+ 50 - 0
skills/mcp-patterns/references/auth-patterns.md

@@ -0,0 +1,50 @@
+# MCP Authentication Patterns
+
+Patterns for handling authentication in MCP servers.
+
+## Environment Variables
+
+```python
+import os
+
+API_KEY = os.environ.get("MY_API_KEY")
+if not API_KEY:
+    raise ValueError("MY_API_KEY environment variable required")
+
+async def make_api_call(endpoint: str):
+    async with httpx.AsyncClient() as client:
+        response = await client.get(
+            f"https://api.example.com/{endpoint}",
+            headers={"Authorization": f"Bearer {API_KEY}"}
+        )
+        response.raise_for_status()
+        return response.json()
+```
+
+## OAuth Token Refresh
+
+```python
+from datetime import datetime, timedelta
+
+class TokenManager:
+    def __init__(self):
+        self.token = None
+        self.expires_at = None
+
+    async def get_token(self) -> str:
+        if self.token and self.expires_at > datetime.now():
+            return self.token
+
+        # Refresh token
+        async with httpx.AsyncClient() as client:
+            response = await client.post(
+                "https://auth.example.com/token",
+                data={"grant_type": "client_credentials", ...}
+            )
+            data = response.json()
+            self.token = data["access_token"]
+            self.expires_at = datetime.now() + timedelta(seconds=data["expires_in"] - 60)
+            return self.token
+
+token_manager = TokenManager()
+```

+ 48 - 0
skills/mcp-patterns/references/resource-patterns.md

@@ -0,0 +1,48 @@
+# MCP Resource Patterns
+
+Patterns for exposing resources via MCP.
+
+## Static Resource
+
+```python
+@app.list_resources()
+async def list_resources():
+    return [
+        {
+            "uri": "config://settings",
+            "name": "Application Settings",
+            "mimeType": "application/json"
+        }
+    ]
+
+@app.read_resource()
+async def read_resource(uri: str):
+    if uri == "config://settings":
+        return json.dumps({"theme": "dark", "lang": "en"})
+    raise ValueError(f"Unknown resource: {uri}")
+```
+
+## Dynamic Resources
+
+```python
+@app.list_resources()
+async def list_resources():
+    # List available resources dynamically
+    items = await get_all_items()
+    return [
+        {
+            "uri": f"item://{item.id}",
+            "name": item.name,
+            "mimeType": "application/json"
+        }
+        for item in items
+    ]
+
+@app.read_resource()
+async def read_resource(uri: str):
+    if uri.startswith("item://"):
+        item_id = uri.replace("item://", "")
+        item = await get_item(item_id)
+        return json.dumps(item.to_dict())
+    raise ValueError(f"Unknown resource: {uri}")
+```

+ 57 - 0
skills/mcp-patterns/references/state-patterns.md

@@ -0,0 +1,57 @@
+# MCP State Management Patterns
+
+Patterns for persisting and caching state in MCP servers.
+
+## SQLite for Persistence
+
+```python
+import aiosqlite
+
+DB_PATH = Path.home() / ".my-mcp-server" / "state.db"
+
+async def init_db():
+    DB_PATH.parent.mkdir(parents=True, exist_ok=True)
+    async with aiosqlite.connect(DB_PATH) as db:
+        await db.execute("""
+            CREATE TABLE IF NOT EXISTS cache (
+                key TEXT PRIMARY KEY,
+                value TEXT,
+                expires_at TEXT
+            )
+        """)
+        await db.commit()
+
+async def get_cached(key: str) -> str | None:
+    async with aiosqlite.connect(DB_PATH) as db:
+        cursor = await db.execute(
+            "SELECT value FROM cache WHERE key = ? AND expires_at > datetime('now')",
+            (key,)
+        )
+        row = await cursor.fetchone()
+        return row[0] if row else None
+
+async def set_cached(key: str, value: str, ttl_seconds: int = 3600):
+    async with aiosqlite.connect(DB_PATH) as db:
+        await db.execute(
+            "INSERT OR REPLACE INTO cache (key, value, expires_at) VALUES (?, ?, datetime('now', '+' || ? || ' seconds'))",
+            (key, value, ttl_seconds)
+        )
+        await db.commit()
+```
+
+## In-Memory Cache
+
+```python
+from functools import lru_cache
+from cachetools import TTLCache
+
+# Simple TTL cache
+cache = TTLCache(maxsize=100, ttl=300)  # 5 minute TTL
+
+async def get_data(key: str):
+    if key in cache:
+        return cache[key]
+    data = await fetch_from_api(key)
+    cache[key] = data
+    return data
+```

+ 39 - 0
skills/mcp-patterns/references/testing-patterns.md

@@ -0,0 +1,39 @@
+# MCP Testing Patterns
+
+Patterns for testing MCP servers.
+
+## Manual Test Script
+
+```python
+# test_server.py
+import asyncio
+from my_server.server import app
+
+async def test_tools():
+    tools = await app.list_tools()
+    print(f"Available tools: {[t['name'] for t in tools]}")
+
+    result = await app.call_tool("my_tool", {"query": "test"})
+    print(f"Result: {result}")
+
+if __name__ == "__main__":
+    asyncio.run(test_tools())
+```
+
+## pytest with Async
+
+```python
+import pytest
+from my_server.tools import handle_search
+
+@pytest.mark.asyncio
+async def test_search_returns_results():
+    result = await handle_search({"query": "test", "limit": 5})
+    assert "content" in result
+    assert len(result["content"]) > 0
+
+@pytest.mark.asyncio
+async def test_search_handles_empty():
+    result = await handle_search({"query": "xyznonexistent123"})
+    assert result["content"][0]["text"] == "No results found"
+```

+ 88 - 0
skills/mcp-patterns/references/tool-patterns.md

@@ -0,0 +1,88 @@
+# MCP Tool Patterns
+
+Extended patterns for MCP tool implementation.
+
+## Tool with Validation
+
+```python
+from pydantic import BaseModel, Field
+
+class SearchInput(BaseModel):
+    query: str = Field(..., min_length=1, max_length=500)
+    limit: int = Field(default=10, ge=1, le=100)
+
+@app.call_tool()
+async def call_tool(name: str, arguments: dict):
+    if name == "search":
+        # Pydantic validates and parses
+        params = SearchInput(**arguments)
+        results = await search(params.query, params.limit)
+        return {"content": [{"type": "text", "text": json.dumps(results)}]}
+```
+
+## Tool with Error Handling
+
+```python
+@app.call_tool()
+async def call_tool(name: str, arguments: dict):
+    try:
+        if name == "fetch_data":
+            data = await fetch_data(arguments["url"])
+            return {"content": [{"type": "text", "text": data}]}
+    except httpx.HTTPStatusError as e:
+        return {
+            "content": [{"type": "text", "text": f"HTTP error: {e.response.status_code}"}],
+            "isError": True
+        }
+    except Exception as e:
+        return {
+            "content": [{"type": "text", "text": f"Error: {str(e)}"}],
+            "isError": True
+        }
+```
+
+## Multiple Tool Registration
+
+```python
+TOOLS = {
+    "list_items": {
+        "description": "List all items",
+        "schema": {"type": "object", "properties": {}},
+        "handler": handle_list_items
+    },
+    "get_item": {
+        "description": "Get specific item",
+        "schema": {
+            "type": "object",
+            "properties": {"id": {"type": "string"}},
+            "required": ["id"]
+        },
+        "handler": handle_get_item
+    },
+    "create_item": {
+        "description": "Create new item",
+        "schema": {
+            "type": "object",
+            "properties": {
+                "name": {"type": "string"},
+                "data": {"type": "object"}
+            },
+            "required": ["name"]
+        },
+        "handler": handle_create_item
+    }
+}
+
+@app.list_tools()
+async def list_tools():
+    return [
+        {"name": name, "description": t["description"], "inputSchema": t["schema"]}
+        for name, t in TOOLS.items()
+    ]
+
+@app.call_tool()
+async def call_tool(name: str, arguments: dict):
+    if name not in TOOLS:
+        raise ValueError(f"Unknown tool: {name}")
+    return await TOOLS[name]["handler"](arguments)
+```

+ 2 - 1
skills/project-planner/SKILL.md

@@ -1,6 +1,7 @@
 ---
 name: project-planner
-description: Detects stale project plans and suggests /plan command usage. Triggers on: sync plan, update plan, check status, plan is stale, track progress, project planning.
+description: "Detects stale project plans and suggests /plan command usage. Triggers on: sync plan, update plan, check status, plan is stale, track progress, project planning."
+allowed-tools: "Read Glob TodoWrite"
 ---
 
 # Project Planner Skill

+ 8 - 5
skills/python-env/SKILL.md

@@ -1,10 +1,13 @@
-# Python Environment Skill
+---
+name: python-env
+description: "Fast Python environment management with uv (10-100x faster than pip). Triggers on: uv, venv, pip, pyproject, python environment, install package, dependencies."
+compatibility: "Requires uv CLI tool. Install: curl -LsSf https://astral.sh/uv/install.sh | sh"
+allowed-tools: "Bash"
+---
 
-Fast Python environment management with uv (10-100x faster than pip).
-
-## Triggers
+# Python Environment
 
-uv, venv, pip, pyproject, python environment, install package, dependencies
+Fast Python environment management with uv (10-100x faster than pip).
 
 ## Quick Commands
 

+ 7 - 5
skills/rest-patterns/SKILL.md

@@ -1,10 +1,12 @@
-# REST Patterns Skill
+---
+name: rest-patterns
+description: "Quick reference for RESTful API design patterns, HTTP semantics, caching, and rate limiting. Triggers on: rest api, http methods, status codes, api design, endpoint design, api versioning, rate limiting, caching."
+allowed-tools: "Read Write"
+---
 
-Quick reference for RESTful API design patterns, HTTP semantics, caching, and rate limiting.
-
-## Triggers
+# REST Patterns
 
-rest api, http methods, status codes, api design, endpoint design, rest patterns, api versioning, rate limiting, caching
+Quick reference for RESTful API design patterns, HTTP semantics, caching, and rate limiting.
 
 ## HTTP Methods
 

+ 7 - 5
skills/sql-patterns/SKILL.md

@@ -1,10 +1,12 @@
-# SQL Patterns Skill
+---
+name: sql-patterns
+description: "Quick reference for common SQL patterns, CTEs, window functions, and indexing strategies. Triggers on: sql patterns, cte example, window functions, sql join, index strategy, pagination sql."
+allowed-tools: "Read Write"
+---
 
-Quick reference for common SQL patterns, CTEs, window functions, and indexing strategies.
-
-## Triggers
+# SQL Patterns
 
-sql patterns, cte example, window functions, sql join, index strategy, pagination sql
+Quick reference for common SQL patterns, CTEs, window functions, and indexing strategies.
 
 ## CTE (Common Table Expressions)
 

+ 8 - 5
skills/sqlite-ops/SKILL.md

@@ -1,10 +1,13 @@
-# SQLite Operations Skill
+---
+name: sqlite-ops
+description: "Patterns for SQLite databases in Python projects - state management, caching, and async operations. Triggers on: sqlite, sqlite3, aiosqlite, local database, database schema, migration, wal mode."
+compatibility: "Requires Python 3.8+ with sqlite3 (standard library) or aiosqlite for async."
+allowed-tools: "Read Write Bash"
+---
 
-Patterns for SQLite databases in Python projects - state management, caching, and async operations.
-
-## Triggers
+# SQLite Operations
 
-sqlite, sqlite3, aiosqlite, local database, database schema, migration, wal mode
+Patterns for SQLite databases in Python projects - state management, caching, and async operations.
 
 ## Schema Design Patterns
 

+ 2 - 0
skills/structural-search/SKILL.md

@@ -1,6 +1,8 @@
 ---
 name: structural-search
 description: "Search code by AST structure using ast-grep. Find semantic patterns like function calls, imports, class definitions instead of text patterns. Triggers on: find all calls to X, search for pattern, refactor usages, find where function is used, structural search."
+compatibility: "Requires ast-grep (sg) CLI tool. Install: brew install ast-grep (macOS) or cargo install ast-grep (cross-platform)."
+allowed-tools: "Bash"
 ---
 
 # Structural Search

+ 8 - 5
skills/tailwind-patterns/SKILL.md

@@ -1,10 +1,13 @@
-# Tailwind Patterns Skill
+---
+name: tailwind-patterns
+description: "Quick reference for Tailwind CSS utility patterns, responsive design, and configuration. Triggers on: tailwind, utility classes, responsive design, tailwind config, dark mode css, tw classes."
+compatibility: "For projects using Tailwind CSS v3+."
+allowed-tools: "Read Write"
+---
 
-Quick reference for Tailwind CSS utility patterns, responsive design, and configuration.
-
-## Triggers
+# Tailwind Patterns
 
-tailwind, utility classes, responsive design, tailwind config, dark mode css, tw classes
+Quick reference for Tailwind CSS utility patterns, responsive design, and configuration.
 
 ## Responsive Breakpoints
 

+ 2 - 0
skills/task-runner/SKILL.md

@@ -1,6 +1,8 @@
 ---
 name: task-runner
 description: "Run project commands with just. Check for justfile in project root, list available tasks, execute common operations like test, build, lint. Triggers on: run tests, build project, list tasks, check available commands, run script, project commands."
+compatibility: "Requires just CLI tool. Install: brew install just (macOS) or cargo install just (cross-platform)."
+allowed-tools: "Bash Glob"
 ---
 
 # Task Runner

+ 7 - 5
skills/tool-discovery/SKILL.md

@@ -1,10 +1,12 @@
-# Tool Discovery Skill
-
-Recommend the right agents and skills for any task. Covers both heavyweight agents (Task tool) and lightweight skills (Skill tool).
+---
+name: tool-discovery
+description: "Recommend the right agents and skills for any task. Covers both heavyweight agents (Task tool) and lightweight skills (Skill tool). Triggers on: which agent, which skill, what tool should I use, help me choose, recommend agent, find the right tool."
+allowed-tools: "Read Glob"
+---
 
-## Triggers
+# Tool Discovery
 
-which agent, which skill, what tool should I use, help me choose, recommend agent, recommend skill, find the right tool, what's available
+Recommend the right agents and skills for any task. Covers both heavyweight agents (Task tool) and lightweight skills (Skill tool).
 
 ## Decision Flowchart
 

+ 243 - 0
templates/SKILL.template.md

@@ -0,0 +1,243 @@
+# Skill Template
+
+> Official specification: https://agentskills.io/specification
+
+## Frontmatter Reference
+
+```yaml
+---
+# ┌─────────────────────────────────────────────────────────────────────────────┐
+# │ REQUIRED FIELDS                                                              │
+# └─────────────────────────────────────────────────────────────────────────────┘
+
+name: my-skill-name
+# - 1-64 characters
+# - Lowercase letters, numbers, and hyphens only
+# - Must NOT start or end with a hyphen
+# - Examples: "code-review", "git-workflow", "api-client"
+
+description: "Clear description of what this skill does and when Claude should use it."
+# - Max 1024 characters
+# - Must be non-empty
+# - Should answer: What does it do? When should it activate?
+# - Include trigger phrases for discoverability
+
+# ┌─────────────────────────────────────────────────────────────────────────────┐
+# │ OPTIONAL FIELDS                                                              │
+# └─────────────────────────────────────────────────────────────────────────────┘
+
+license: MIT
+# - Licensing terms for the skill
+# - Keep brief (e.g., "MIT", "Apache-2.0", "Proprietary")
+
+compatibility: "Requires Node.js 18+, macOS/Linux only"
+# - Max 500 characters
+# - Only include if skill has specific environment requirements
+# - Omit if skill works everywhere
+
+metadata:
+  author: "Your Name"
+  version: "1.0.0"
+  tags: ["git", "workflow", "automation"]
+# - Key-value mapping for custom properties
+# - Not defined by spec - use for your own tracking
+
+allowed-tools: "Bash Read Write Glob Grep"
+# - Space-delimited list of pre-approved tools
+# - EXPERIMENTAL: May change in future spec versions
+# - Use when skill requires specific tool access
+---
+```
+
+## Progressive Disclosure
+
+Skills load content in stages to minimize context consumption:
+
+| Level | Content | When Loaded | Token Budget |
+|-------|---------|-------------|--------------|
+| **Metadata** | `name` + `description` | Startup (all skills) | ~100 tokens |
+| **Instructions** | SKILL.md body | Skill activated | <5000 tokens |
+| **Resources** | references/, scripts/, assets/ | On-demand | As needed |
+
+### When to Split Content
+
+| SKILL.md Size | Action |
+|---------------|--------|
+| < 150 lines | Keep as single file |
+| 150-300 lines | Consider extracting reference tables |
+| 300+ lines | **Must split** - extract to references/ |
+
+## Directory Structure
+
+### Simple Skill (<150 lines)
+
+```
+my-skill/
+└── SKILL.md
+```
+
+### Medium Skill (150-300 lines)
+
+```
+my-skill/
+├── SKILL.md
+└── references/
+    └── REFERENCE.md         # Extended patterns
+```
+
+### Complex Skill (300+ lines)
+
+```
+my-skill/
+├── SKILL.md                    # Core only (<300 lines)
+├── references/
+│   ├── REFERENCE.md            # Primary reference
+│   ├── {lang}-patterns.md      # Language-specific
+│   └── {domain}-examples.md    # Domain-specific
+├── scripts/                    # Optional
+│   └── helper.{sh,py,js}
+└── assets/                     # Optional
+    └── template.{json,yaml}
+```
+
+## Content Migration Rules
+
+### What Stays in SKILL.md
+
+- Frontmatter (name, description, compatibility, allowed-tools)
+- Purpose/Overview (1-3 sentences)
+- Activation triggers (when to use)
+- **Top 10 most-common patterns** (essential use cases)
+- Quick reference table
+- "Additional Resources" section linking to references/
+
+### What Moves to references/
+
+| Content Type | Destination |
+|--------------|-------------|
+| Complete pattern lookup tables | `references/REFERENCE.md` |
+| Language-specific patterns | `references/{lang}-patterns.md` |
+| Framework-specific patterns | `references/{framework}-patterns.md` |
+| Domain-specific examples | `references/{domain}-examples.md` |
+| Edge cases and gotchas | `references/REFERENCE.md` |
+
+### What Goes in scripts/
+
+| Content Type | Destination |
+|--------------|-------------|
+| Reusable shell scripts | `scripts/{name}.sh` |
+| Python helper utilities | `scripts/{name}.py` |
+| Node.js helpers | `scripts/{name}.js` |
+
+### What Goes in assets/
+
+| Content Type | Destination |
+|--------------|-------------|
+| JSON/YAML templates | `assets/{name}.template.{ext}` |
+| Schema definitions | `assets/{name}.schema.json` |
+| Example configs | `assets/example-{name}.{ext}` |
+
+## File Naming Conventions
+
+| Location | Convention | Example |
+|----------|------------|---------|
+| references/ primary | `REFERENCE.md` | `references/REFERENCE.md` |
+| references/ language | `{lang}-patterns.md` | `references/python-patterns.md` |
+| references/ domain | `{domain}-examples.md` | `references/k8s-examples.md` |
+| scripts/ | `{action}.{ext}` | `scripts/validate.sh` |
+| assets/ templates | `{name}.template.{ext}` | `assets/server.template.py` |
+| assets/ schemas | `{name}.schema.json` | `assets/tool.schema.json` |
+
+## Reference Loading Pattern
+
+Add this section to SKILL.md files with supporting references:
+
+```markdown
+## Additional Resources
+
+For detailed patterns, load:
+
+- `./references/REFERENCE.md` - Complete pattern library
+- `./references/{domain}-patterns.md` - Domain-specific examples
+```
+
+## Minimum Compliant Skill
+
+```markdown
+---
+name: example-skill
+description: "Brief description of skill purpose and activation triggers."
+---
+
+# Example Skill
+
+Instructions for Claude to follow when this skill is active.
+
+## Usage
+
+Step-by-step instructions with examples.
+
+## Examples
+
+- Example input → expected output
+- Another example scenario
+```
+
+## Recommended Structure
+
+```markdown
+---
+name: my-skill
+description: "What it does. Triggers on: keyword1, keyword2, action phrases."
+compatibility: "Requires tool-x. Install: brew install tool-x"
+allowed-tools: "Bash Read"
+---
+
+# Skill Name
+
+Brief purpose statement.
+
+## Essentials
+
+Top 10 most-common patterns (inline).
+
+## Quick Reference
+
+| Task | Command |
+|------|---------|
+| Common task 1 | `command` |
+| Common task 2 | `command` |
+
+## When to Use
+
+- Use case 1
+- Use case 2
+
+## Additional Resources
+
+For complete patterns, load:
+- `./references/REFERENCE.md` - Extended patterns
+```
+
+## Validation Checklist
+
+- [ ] `name` is 1-64 chars, lowercase + numbers + hyphens only
+- [ ] `name` does not start or end with hyphen
+- [ ] `description` is non-empty and under 1024 chars
+- [ ] `description` explains what AND when
+- [ ] SKILL.md under 300 lines (target for progressive disclosure)
+- [ ] Top 10 essential patterns inline, rest in references/
+- [ ] All file references use relative paths (`./references/`)
+- [ ] No nested directories (one level deep only)
+- [ ] Works without loading references (basic cases)
+- [ ] Optional fields only included if needed
+
+## Backwards Compatibility
+
+Skills must remain functional even if supporting files are missing:
+
+1. **SKILL.md is self-contained for basic use cases**
+2. **Top 10 patterns stay inline** (no reference file required)
+3. **Quick reference table always in SKILL.md**
+4. **References are additive, not required**
+5. **Graceful degradation** if reference files missing

+ 251 - 0
tests/skills/README.md

@@ -0,0 +1,251 @@
+# Skill Tests
+
+Automated test suite for validating all 16 skills.
+
+## Quick Start
+
+```bash
+# Run all tests
+./tests/skills/run-tests.sh
+
+# Run specific test suite
+./tests/skills/run-tests.sh triggers
+./tests/skills/run-tests.sh data-processing
+
+# List available tests
+./tests/skills/run-tests.sh --list
+```
+
+## Test Types
+
+| Type | Script | Purpose |
+|------|--------|---------|
+| Trigger validation | `validate-triggers.sh` | Validates frontmatter and trigger keywords |
+| Functional | `functional/*.sh` | Tests CLI tools work correctly |
+
+## Directory Structure
+
+```
+tests/skills/
+├── run-tests.sh              # Main test runner
+├── validate-triggers.sh      # Trigger keyword validation
+├── trigger-tests.md          # Manual trigger test cases (reference)
+├── fixtures/                 # Test data files
+│   ├── package.json
+│   ├── config.yaml
+│   ├── docker-compose.yml
+│   └── example.js
+└── functional/
+    ├── data-processing.sh    # jq, yq tests
+    ├── code-stats.sh         # tokei, difft tests
+    ├── git-workflow.sh       # gh, delta, lazygit tests
+    └── structural-search.sh  # ast-grep tests
+```
+
+## Running Tests
+
+### All Tests
+
+```bash
+./tests/skills/run-tests.sh
+```
+
+Output:
+```
+╔══════════════════════════════════════════╗
+║         Skill Test Runner                ║
+╚══════════════════════════════════════════╝
+
+═══════════════════════════════════
+  Trigger Validation
+═══════════════════════════════════
+
+--- code-stats ---
+✓ code-stats: 6 trigger keywords
+...
+
+═══════════════════════════════════
+  data-processing
+═══════════════════════════════════
+
+--- jq tests ---
+✓ jq: extract single field
+✓ jq: extract nested field
+...
+
+════════════════════════════════════════
+  Test Summary
+════════════════════════════════════════
+  Suites passed: 5
+  Suites failed: 0
+
+All tests passed!
+```
+
+### Specific Tests
+
+```bash
+# Only trigger validation
+./tests/skills/run-tests.sh --triggers
+
+# Only functional tests
+./tests/skills/run-tests.sh --functional
+
+# Specific skill
+./tests/skills/run-tests.sh data-processing
+./tests/skills/run-tests.sh code-stats
+
+# Multiple skills
+./tests/skills/run-tests.sh data-processing structural-search
+```
+
+## Test Details
+
+### Trigger Validation
+
+Validates each skill's frontmatter:
+
+- `name` matches directory name
+- `name` is lowercase alphanumeric with hyphens (1-64 chars)
+- `description` is non-empty (max 1024 chars)
+- `description` contains "Triggers on:" with keywords
+- `compatibility` field exists if skill uses CLI tools
+- `allowed-tools` field is present
+
+### Functional Tests
+
+Each functional test:
+
+1. Checks prerequisites (required CLI tools)
+2. Runs test cases with assertions
+3. Uses fixtures from `fixtures/` directory
+4. Reports pass/fail/skip for each test
+5. Returns exit code 0 on success, 1 on failure
+
+#### data-processing.sh
+- 7 jq tests (extract, filter, transform)
+- 5 yq tests (YAML, TOML, Docker Compose)
+
+#### code-stats.sh
+- 3 tokei tests (line counts, JSON output)
+- 3 difft tests (file comparison, syntax-aware)
+
+#### git-workflow.sh
+- 4 gh tests (auth, repo, API)
+- 3 delta tests (diff formatting)
+- 1 lazygit test (version check)
+
+#### structural-search.sh
+- 8 ast-grep tests (patterns, multi-language)
+
+## Prerequisites
+
+Install required tools:
+
+```bash
+# All tools
+brew install jq yq tokei difftastic ast-grep gh delta lazygit
+
+# Minimum for data-processing
+brew install jq yq
+
+# Check what's installed
+./tests/skills/run-tests.sh --list
+```
+
+## Adding New Tests
+
+### New Functional Test
+
+Create `functional/skill-name.sh`:
+
+```bash
+#!/bin/bash
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+FIXTURES="$SCRIPT_DIR/../fixtures"
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+NC='\033[0m'
+
+PASSED=0
+FAILED=0
+
+pass() { ((PASSED++)); echo -e "${GREEN}✓${NC} $1"; }
+fail() { ((FAILED++)); echo -e "${RED}✗${NC} $1: $2"; }
+
+# Check prerequisites
+check_prereqs() {
+    command -v your-tool >/dev/null 2>&1 || {
+        echo "Missing: your-tool"
+        exit 1
+    }
+}
+
+# Tests
+test_example() {
+    local result
+    result=$(your-tool --version)
+    if [[ -n "$result" ]]; then
+        pass "your-tool works"
+    else
+        fail "your-tool" "no output"
+    fi
+}
+
+main() {
+    echo "=== skill-name functional tests ==="
+    check_prereqs
+    test_example
+
+    echo ""
+    echo "Passed: $PASSED"
+    echo "Failed: $FAILED"
+    [[ $FAILED -eq 0 ]]
+}
+
+main "$@"
+```
+
+### New Fixture
+
+Add files to `fixtures/`:
+- JSON: `fixtures/example.json`
+- YAML: `fixtures/example.yaml`
+- Code: `fixtures/example.{js,py,ts}`
+
+## CI Integration
+
+Add to your CI workflow:
+
+```yaml
+- name: Run skill tests
+  run: |
+    chmod +x tests/skills/run-tests.sh
+    ./tests/skills/run-tests.sh
+```
+
+## Troubleshooting
+
+### "Permission denied"
+
+```bash
+chmod +x tests/skills/*.sh tests/skills/functional/*.sh
+```
+
+### "Command not found"
+
+Install missing tools:
+```bash
+brew install jq yq tokei difftastic ast-grep gh delta
+```
+
+### Tests skip with "not authenticated"
+
+For gh tests, run:
+```bash
+gh auth login
+```

+ 18 - 0
tests/skills/fixtures/config.yaml

@@ -0,0 +1,18 @@
+name: test-service
+version: 2.0.0
+
+database:
+  host: localhost
+  port: 5432
+  name: testdb
+
+server:
+  port: 3000
+  timeout: 30
+
+features:
+  - auth
+  - logging
+  - caching
+
+environment: development

+ 27 - 0
tests/skills/fixtures/docker-compose.yml

@@ -0,0 +1,27 @@
+version: "3.8"
+
+services:
+  web:
+    image: nginx:latest
+    ports:
+      - "80:80"
+    volumes:
+      - ./html:/usr/share/nginx/html
+
+  api:
+    image: node:18-alpine
+    ports:
+      - "3000:3000"
+    environment:
+      - NODE_ENV=production
+
+  db:
+    image: postgres:15
+    environment:
+      - POSTGRES_DB=myapp
+      - POSTGRES_USER=admin
+    volumes:
+      - pgdata:/var/lib/postgresql/data
+
+volumes:
+  pgdata:

+ 31 - 0
tests/skills/fixtures/example.js

@@ -0,0 +1,31 @@
+// Test fixture for structural-search tests
+
+import React, { useState, useEffect } from 'react';
+import lodash from 'lodash';
+
+function App() {
+  const [count, setCount] = useState(0);
+
+  useEffect(() => {
+    console.log('Component mounted');
+  }, []);
+
+  async function fetchData() {
+    const response = await fetch('/api/data');
+    return response.json();
+  }
+
+  const handleClick = () => {
+    console.log('Button clicked');
+    setCount(count + 1);
+  };
+
+  return (
+    <div>
+      <h1>Count: {count}</h1>
+      <button onClick={handleClick}>Increment</button>
+    </div>
+  );
+}
+
+export default App;

+ 20 - 0
tests/skills/fixtures/package.json

@@ -0,0 +1,20 @@
+{
+  "name": "test-project",
+  "version": "1.0.0",
+  "description": "Test fixture for skill tests",
+  "main": "index.js",
+  "scripts": {
+    "start": "node index.js",
+    "test": "jest",
+    "build": "tsc",
+    "lint": "eslint ."
+  },
+  "dependencies": {
+    "express": "^4.18.0",
+    "lodash": "^4.17.21"
+  },
+  "devDependencies": {
+    "jest": "^29.0.0",
+    "typescript": "^5.0.0"
+  }
+}

+ 154 - 0
tests/skills/functional/code-stats.md

@@ -0,0 +1,154 @@
+# code-stats Functional Tests
+
+Verify tokei and difft commands work correctly.
+
+## Prerequisites
+
+```bash
+# Check tools are installed
+tokei --version   # tokei 12.x+
+difft --version   # difftastic 0.50+
+```
+
+---
+
+## tokei Tests
+
+### Test 1: Basic line count
+
+```bash
+tokei .
+```
+
+**Expected:** Table showing:
+- Languages detected
+- Files count per language
+- Lines of code, comments, blanks
+
+### Test 2: Specific language
+
+```bash
+tokei -t=TypeScript .
+```
+
+**Expected:** Only TypeScript file statistics
+
+### Test 3: Compact output
+
+```bash
+tokei --compact .
+```
+
+**Expected:** Single-line per language format
+
+### Test 4: Exclude directories
+
+```bash
+tokei -e node_modules -e .git .
+```
+
+**Expected:** Stats excluding node_modules and .git
+
+### Test 5: JSON output
+
+```bash
+tokei -o json . | jq '.TypeScript'
+```
+
+**Expected:** JSON with language breakdown
+
+---
+
+## difft (difftastic) Tests
+
+### Test 6: Compare two files
+
+```bash
+# Create test files
+cat > /tmp/old.js << 'EOF'
+function greet(name) {
+  console.log("Hello, " + name);
+}
+EOF
+
+cat > /tmp/new.js << 'EOF'
+function greet(name) {
+  console.log(`Hello, ${name}!`);
+}
+EOF
+
+difft /tmp/old.js /tmp/new.js
+
+rm /tmp/old.js /tmp/new.js
+```
+
+**Expected:** AST-aware diff showing string template change
+
+### Test 7: Compare with syntax highlighting
+
+```bash
+cat > /tmp/v1.py << 'EOF'
+def add(a, b):
+    return a + b
+EOF
+
+cat > /tmp/v2.py << 'EOF'
+def add(a: int, b: int) -> int:
+    return a + b
+EOF
+
+difft /tmp/v1.py /tmp/v2.py
+
+rm /tmp/v1.py /tmp/v2.py
+```
+
+**Expected:** Shows type annotation additions
+
+### Test 8: Git integration
+
+```bash
+# Configure difft as git diff tool (if not already set)
+git config diff.external difft
+
+# Run git diff (reverts after test)
+GIT_EXTERNAL_DIFF=difft git diff HEAD~1 --stat
+```
+
+**Expected:** Semantic diff output
+
+---
+
+## Integration Tests
+
+### Test 9: Full codebase analysis
+
+```bash
+# Get overview
+tokei --compact .
+
+# Get detailed breakdown
+tokei --files .
+```
+
+**Expected:** Complete statistics for the project
+
+### Test 10: Compare versions
+
+```bash
+# Compare current file with previous commit
+difft <(git show HEAD~1:package.json 2>/dev/null || echo '{}') package.json
+```
+
+**Expected:** Diff between versions (or error if file doesn't exist in history)
+
+---
+
+## Performance Test
+
+### Test 11: Large codebase timing
+
+```bash
+time tokei . --compact
+```
+
+**Expected:** Completes in under 2 seconds for most projects

+ 209 - 0
tests/skills/functional/code-stats.sh

@@ -0,0 +1,209 @@
+#!/bin/bash
+# Functional tests for code-stats skill
+# Tests tokei and difft CLI tools
+
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+FIXTURES="$SCRIPT_DIR/../fixtures"
+PROJECT_ROOT="$SCRIPT_DIR/../../.."
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m'
+
+PASSED=0
+FAILED=0
+SKIPPED=0
+
+pass() { ((PASSED++)); echo -e "${GREEN}✓${NC} $1"; }
+fail() { ((FAILED++)); echo -e "${RED}✗${NC} $1: $2"; }
+skip() { ((SKIPPED++)); echo -e "${YELLOW}○${NC} $1 (skipped: $2)"; }
+
+check_prereqs() {
+    local missing=()
+    command -v tokei >/dev/null 2>&1 || missing+=("tokei")
+    command -v difft >/dev/null 2>&1 || missing+=("difft")
+
+    if [[ ${#missing[@]} -gt 0 ]]; then
+        echo -e "${YELLOW}Missing tools: ${missing[*]}${NC}"
+        echo "Install with: brew install ${missing[*]}"
+        echo "Some tests will be skipped."
+        echo ""
+    fi
+}
+
+# === tokei Tests ===
+
+test_tokei_basic() {
+    if ! command -v tokei >/dev/null 2>&1; then
+        skip "tokei: basic line count" "tokei not installed"
+        return
+    fi
+
+    local result
+    result=$(tokei "$PROJECT_ROOT" --compact 2>/dev/null | head -5)
+
+    if [[ -n "$result" ]]; then
+        pass "tokei: basic line count"
+    else
+        fail "tokei: basic line count" "no output"
+    fi
+}
+
+test_tokei_json_output() {
+    if ! command -v tokei >/dev/null 2>&1; then
+        skip "tokei: JSON output" "tokei not installed"
+        return
+    fi
+
+    local result
+    result=$(tokei "$PROJECT_ROOT" -o json 2>/dev/null | jq 'keys | length')
+
+    if [[ "$result" -gt 0 ]]; then
+        pass "tokei: JSON output with languages"
+    else
+        fail "tokei: JSON output" "no languages found"
+    fi
+}
+
+test_tokei_exclude() {
+    if ! command -v tokei >/dev/null 2>&1; then
+        skip "tokei: exclude directories" "tokei not installed"
+        return
+    fi
+
+    local with_node without_node
+    with_node=$(tokei "$PROJECT_ROOT" -o json 2>/dev/null | jq '.Total.code // 0')
+    without_node=$(tokei "$PROJECT_ROOT" -e node_modules -o json 2>/dev/null | jq '.Total.code // 0')
+
+    # Both should be valid numbers
+    if [[ "$with_node" =~ ^[0-9]+$ && "$without_node" =~ ^[0-9]+$ ]]; then
+        pass "tokei: exclude directories works"
+    else
+        fail "tokei: exclude directories" "invalid output"
+    fi
+}
+
+# === difft Tests ===
+
+test_difft_basic() {
+    if ! command -v difft >/dev/null 2>&1; then
+        skip "difft: basic diff" "difft not installed"
+        return
+    fi
+
+    # Create temp files
+    local file1 file2
+    file1=$(mktemp)
+    file2=$(mktemp)
+
+    echo 'function hello() { console.log("hello"); }' > "$file1"
+    echo 'function hello() { console.log("world"); }' > "$file2"
+
+    local result
+    result=$(difft "$file1" "$file2" 2>/dev/null || true)
+
+    rm -f "$file1" "$file2"
+
+    if [[ -n "$result" ]]; then
+        pass "difft: basic file comparison"
+    else
+        fail "difft: basic file comparison" "no diff output"
+    fi
+}
+
+test_difft_identical() {
+    if ! command -v difft >/dev/null 2>&1; then
+        skip "difft: identical files" "difft not installed"
+        return
+    fi
+
+    local file1 file2
+    file1=$(mktemp)
+    file2=$(mktemp)
+
+    echo 'const x = 1;' > "$file1"
+    echo 'const x = 1;' > "$file2"
+
+    local result
+    result=$(difft "$file1" "$file2" 2>/dev/null || true)
+
+    rm -f "$file1" "$file2"
+
+    # Identical files should have minimal or no output
+    if [[ -z "$result" || "$result" == *"No changes"* || ${#result} -lt 50 ]]; then
+        pass "difft: identical files show no changes"
+    else
+        fail "difft: identical files" "unexpected output"
+    fi
+}
+
+test_difft_syntax_aware() {
+    if ! command -v difft >/dev/null 2>&1; then
+        skip "difft: syntax-aware diff" "difft not installed"
+        return
+    fi
+
+    local file1 file2
+    file1=$(mktemp)
+    file2=$(mktemp)
+    mv "$file1" "${file1}.js"; file1="${file1}.js"
+    mv "$file2" "${file2}.js"; file2="${file2}.js"
+
+    cat > "$file1" << 'EOF'
+function add(a, b) {
+    return a + b;
+}
+EOF
+
+    cat > "$file2" << 'EOF'
+function add(a, b) {
+    // Added comment
+    return a + b;
+}
+EOF
+
+    local result
+    result=$(difft "$file1" "$file2" 2>/dev/null || true)
+
+    rm -f "$file1" "$file2"
+
+    if [[ -n "$result" ]]; then
+        pass "difft: syntax-aware JavaScript diff"
+    else
+        fail "difft: syntax-aware diff" "no output"
+    fi
+}
+
+# === Run Tests ===
+
+main() {
+    echo "=== code-stats functional tests ==="
+    echo ""
+
+    check_prereqs
+
+    echo "--- tokei tests ---"
+    test_tokei_basic
+    test_tokei_json_output
+    test_tokei_exclude
+
+    echo ""
+    echo "--- difft tests ---"
+    test_difft_basic
+    test_difft_identical
+    test_difft_syntax_aware
+
+    echo ""
+    echo "=== Results ==="
+    echo -e "Passed: ${GREEN}$PASSED${NC}"
+    echo -e "Failed: ${RED}$FAILED${NC}"
+    echo -e "Skipped: ${YELLOW}$SKIPPED${NC}"
+
+    [[ $FAILED -eq 0 ]]
+}
+
+main "$@"

+ 150 - 0
tests/skills/functional/data-processing.md

@@ -0,0 +1,150 @@
+# data-processing Functional Tests
+
+Verify jq and yq commands work correctly.
+
+## Prerequisites
+
+```bash
+# Check tools are installed
+jq --version   # jq-1.7+
+yq --version   # yq 4.x (Mike Farah's version)
+```
+
+---
+
+## jq Tests
+
+### Test 1: Extract single field
+
+```bash
+echo '{"name": "test-app", "version": "1.0.0"}' | jq '.name'
+```
+
+**Expected:** `"test-app"`
+
+### Test 2: Extract nested field
+
+```bash
+echo '{"scripts": {"build": "tsc", "test": "jest"}}' | jq '.scripts.build'
+```
+
+**Expected:** `"tsc"`
+
+### Test 3: Array filtering
+
+```bash
+echo '{"users": [{"name": "Alice", "active": true}, {"name": "Bob", "active": false}]}' | jq '.users[] | select(.active == true) | .name'
+```
+
+**Expected:** `"Alice"`
+
+### Test 4: Count array length
+
+```bash
+echo '{"items": [1, 2, 3, 4, 5]}' | jq '.items | length'
+```
+
+**Expected:** `5`
+
+### Test 5: Raw string output
+
+```bash
+echo '{"name": "myapp"}' | jq -r '.name'
+```
+
+**Expected:** `myapp` (no quotes)
+
+### Test 6: Transform with map
+
+```bash
+echo '{"users": [{"id": 1, "name": "Alice"}, {"id": 2, "name": "Bob"}]}' | jq '.users | map({id, name})'
+```
+
+**Expected:** Array with id and name objects
+
+---
+
+## yq Tests
+
+### Test 7: Extract YAML field
+
+```bash
+echo 'name: myapp
+version: 2.0.0' | yq '.name'
+```
+
+**Expected:** `myapp`
+
+### Test 8: List keys
+
+```bash
+echo 'database:
+  host: localhost
+  port: 5432' | yq '.database | keys'
+```
+
+**Expected:** `- host` and `- port`
+
+### Test 9: Docker Compose services
+
+```bash
+echo 'services:
+  web:
+    image: nginx
+  db:
+    image: postgres' | yq '.services | keys'
+```
+
+**Expected:** `- web` and `- db`
+
+### Test 10: TOML parsing
+
+```bash
+echo '[package]
+name = "myapp"
+version = "1.0.0"' | yq -p toml '.package.name'
+```
+
+**Expected:** `myapp`
+
+---
+
+## Integration Test
+
+### Test: Process package.json
+
+Create a test file and verify full workflow:
+
+```bash
+cat > /tmp/test-package.json << 'EOF'
+{
+  "name": "test-project",
+  "version": "1.0.0",
+  "dependencies": {
+    "express": "^4.18.0",
+    "lodash": "^4.17.21"
+  },
+  "scripts": {
+    "start": "node index.js",
+    "test": "jest"
+  }
+}
+EOF
+
+# Extract dependencies
+jq '.dependencies | keys' /tmp/test-package.json
+
+# Extract scripts
+jq '.scripts' /tmp/test-package.json
+
+# Get version as raw string
+jq -r '.version' /tmp/test-package.json
+
+# Cleanup
+rm /tmp/test-package.json
+```
+
+**Expected:**
+- Dependencies: `["express", "lodash"]`
+- Scripts: Object with start and test
+- Version: `1.0.0`

+ 224 - 0
tests/skills/functional/data-processing.sh

@@ -0,0 +1,224 @@
+#!/bin/bash
+# Functional tests for data-processing skill
+# Tests jq and yq CLI tools
+
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+FIXTURES="$SCRIPT_DIR/../fixtures"
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m'
+
+PASSED=0
+FAILED=0
+SKIPPED=0
+
+pass() { ((PASSED++)); echo -e "${GREEN}✓${NC} $1"; }
+fail() { ((FAILED++)); echo -e "${RED}✗${NC} $1: $2"; }
+skip() { ((SKIPPED++)); echo -e "${YELLOW}○${NC} $1 (skipped: $2)"; }
+
+# Check prerequisites
+HAS_JQ=false
+HAS_YQ=false
+
+check_prereqs() {
+    local missing=()
+    if command -v jq >/dev/null 2>&1; then
+        HAS_JQ=true
+    else
+        missing+=("jq")
+    fi
+
+    if command -v yq >/dev/null 2>&1; then
+        HAS_YQ=true
+    else
+        missing+=("yq")
+    fi
+
+    if [[ ${#missing[@]} -gt 0 ]]; then
+        echo -e "${YELLOW}Missing tools: ${missing[*]}${NC}"
+        echo "Install with: brew install ${missing[*]}"
+        echo "Some tests will be skipped."
+        echo ""
+    fi
+}
+
+# Test helper
+assert_eq() {
+    local name="$1"
+    local expected="$2"
+    local actual="$3"
+
+    if [[ "$expected" == "$actual" ]]; then
+        pass "$name"
+    else
+        fail "$name" "expected '$expected', got '$actual'"
+    fi
+}
+
+assert_contains() {
+    local name="$1"
+    local needle="$2"
+    local haystack="$3"
+
+    if [[ "$haystack" == *"$needle"* ]]; then
+        pass "$name"
+    else
+        fail "$name" "output does not contain '$needle'"
+    fi
+}
+
+# === jq Tests ===
+
+test_jq_extract_field() {
+    if [[ $HAS_JQ != true ]]; then
+        skip "jq: extract single field" "jq not installed"
+        return
+    fi
+    local result
+    result=$(echo '{"name": "test-app"}' | jq -r '.name')
+    assert_eq "jq: extract single field" "test-app" "$result"
+}
+
+test_jq_nested_field() {
+    [[ $HAS_JQ != true ]] && { skip "jq: extract nested field" "jq not installed"; return; }
+    local result
+    result=$(echo '{"scripts": {"build": "tsc"}}' | jq -r '.scripts.build')
+    assert_eq "jq: extract nested field" "tsc" "$result"
+}
+
+test_jq_array_filter() {
+    [[ $HAS_JQ != true ]] && { skip "jq: filter array by condition" "jq not installed"; return; }
+    local result
+    result=$(echo '{"users": [{"name": "Alice", "active": true}, {"name": "Bob", "active": false}]}' | jq -r '.users[] | select(.active == true) | .name')
+    assert_eq "jq: filter array by condition" "Alice" "$result"
+}
+
+test_jq_array_length() {
+    [[ $HAS_JQ != true ]] && { skip "jq: count array length" "jq not installed"; return; }
+    local result
+    result=$(echo '{"items": [1, 2, 3, 4, 5]}' | jq '.items | length')
+    assert_eq "jq: count array length" "5" "$result"
+}
+
+test_jq_raw_output() {
+    [[ $HAS_JQ != true ]] && { skip "jq: raw string output" "jq not installed"; return; }
+    local quoted unquoted
+    quoted=$(echo '{"name": "myapp"}' | jq '.name')
+    unquoted=$(echo '{"name": "myapp"}' | jq -r '.name')
+
+    if [[ "$quoted" == '"myapp"' && "$unquoted" == "myapp" ]]; then
+        pass "jq: raw string output (-r flag)"
+    else
+        fail "jq: raw string output" "quoted='$quoted', unquoted='$unquoted'"
+    fi
+}
+
+test_jq_map_transform() {
+    [[ $HAS_JQ != true ]] && { skip "jq: map transformation" "jq not installed"; return; }
+    local result
+    result=$(echo '{"users": [{"id": 1, "name": "Alice"}, {"id": 2, "name": "Bob"}]}' | jq '[.users[] | {id, name}] | length')
+    assert_eq "jq: map transformation" "2" "$result"
+}
+
+test_jq_package_json() {
+    [[ $HAS_JQ != true ]] && { skip "jq: parse package.json" "jq not installed"; return; }
+    if [[ -f "$FIXTURES/package.json" ]]; then
+        local name version
+        name=$(jq -r '.name' "$FIXTURES/package.json")
+        version=$(jq -r '.version' "$FIXTURES/package.json")
+
+        if [[ -n "$name" && -n "$version" ]]; then
+            pass "jq: parse package.json fixture"
+        else
+            fail "jq: parse package.json" "name='$name', version='$version'"
+        fi
+    else
+        skip "jq: parse package.json fixture" "fixture not found"
+    fi
+}
+
+# === yq Tests ===
+
+test_yq_extract_field() {
+    [[ $HAS_YQ != true ]] && { skip "yq: extract YAML field" "yq not installed"; return; }
+    local result
+    result=$(echo -e "name: myapp\nversion: 2.0.0" | yq -r '.name')
+    assert_eq "yq: extract YAML field" "myapp" "$result"
+}
+
+test_yq_list_keys() {
+    [[ $HAS_YQ != true ]] && { skip "yq: list keys count" "yq not installed"; return; }
+    local result
+    result=$(echo -e "database:\n  host: localhost\n  port: 5432" | yq '.database | keys | length')
+    assert_eq "yq: list keys count" "2" "$result"
+}
+
+test_yq_docker_compose() {
+    [[ $HAS_YQ != true ]] && { skip "yq: Docker Compose services" "yq not installed"; return; }
+    local result
+    result=$(echo -e "services:\n  web:\n    image: nginx\n  db:\n    image: postgres" | yq '.services | keys | length')
+    assert_eq "yq: Docker Compose services" "2" "$result"
+}
+
+test_yq_toml_parsing() {
+    [[ $HAS_YQ != true ]] && { skip "yq: TOML parsing" "yq not installed"; return; }
+    local result
+    result=$(echo -e '[package]\nname = "myapp"' | yq -p toml -r '.package.name')
+    assert_eq "yq: TOML parsing" "myapp" "$result"
+}
+
+test_yq_config_fixture() {
+    [[ $HAS_YQ != true ]] && { skip "yq: parse config.yaml" "yq not installed"; return; }
+    if [[ -f "$FIXTURES/config.yaml" ]]; then
+        local result
+        result=$(yq -r '.name' "$FIXTURES/config.yaml")
+        if [[ -n "$result" && "$result" != "null" ]]; then
+            pass "yq: parse config.yaml fixture"
+        else
+            fail "yq: parse config.yaml" "got '$result'"
+        fi
+    else
+        skip "yq: parse config.yaml fixture" "fixture not found"
+    fi
+}
+
+# === Run Tests ===
+
+main() {
+    echo "=== data-processing functional tests ==="
+    echo ""
+
+    check_prereqs
+
+    echo "--- jq tests ---"
+    test_jq_extract_field
+    test_jq_nested_field
+    test_jq_array_filter
+    test_jq_array_length
+    test_jq_raw_output
+    test_jq_map_transform
+    test_jq_package_json
+
+    echo ""
+    echo "--- yq tests ---"
+    test_yq_extract_field
+    test_yq_list_keys
+    test_yq_docker_compose
+    test_yq_toml_parsing
+    test_yq_config_fixture
+
+    echo ""
+    echo "=== Results ==="
+    echo -e "Passed: ${GREEN}$PASSED${NC}"
+    echo -e "Failed: ${RED}$FAILED${NC}"
+    echo -e "Skipped: ${YELLOW}$SKIPPED${NC}"
+
+    [[ $FAILED -eq 0 ]]
+}
+
+main "$@"

+ 123 - 0
tests/skills/functional/git-workflow.md

@@ -0,0 +1,123 @@
+# git-workflow Functional Tests
+
+Verify git workflow tools work correctly.
+
+## Prerequisites
+
+```bash
+# Check tools are installed
+lazygit --version   # lazygit 0.40+
+gh --version        # gh 2.x
+delta --version     # delta 0.16+
+```
+
+---
+
+## gh (GitHub CLI) Tests
+
+### Test 1: Check auth status
+
+```bash
+gh auth status
+```
+
+**Expected:** Shows authenticated user and scopes
+
+### Test 2: List PRs
+
+```bash
+gh pr list --limit 3
+```
+
+**Expected:** List of open PRs or "no open pull requests"
+
+### Test 3: View repo info
+
+```bash
+gh repo view --json name,description
+```
+
+**Expected:** JSON with repo name and description
+
+### Test 4: List issues
+
+```bash
+gh issue list --limit 3
+```
+
+**Expected:** List of open issues or "no open issues"
+
+---
+
+## delta Tests
+
+### Test 5: Diff with syntax highlighting
+
+```bash
+# Create test files
+echo 'function hello() { console.log("hello"); }' > /tmp/test1.js
+echo 'function hello() { console.log("world"); }' > /tmp/test2.js
+
+# Run delta
+diff -u /tmp/test1.js /tmp/test2.js | delta
+
+# Cleanup
+rm /tmp/test1.js /tmp/test2.js
+```
+
+**Expected:** Colored diff output with syntax highlighting
+
+### Test 6: Git diff with delta
+
+```bash
+# In a git repo with changes
+git diff | delta
+```
+
+**Expected:** Syntax-highlighted diff (or empty if no changes)
+
+---
+
+## lazygit Tests
+
+### Test 7: Launch lazygit (manual)
+
+```bash
+# In a git repository
+lazygit
+```
+
+**Expected:** TUI interface opens showing:
+- Status panel
+- Files panel
+- Branches panel
+- Commits panel
+- Stash panel
+
+**Key bindings to verify:**
+- `q` - Quit
+- `?` - Help
+- `Space` - Stage/unstage file
+- `c` - Commit
+
+---
+
+## Integration Test
+
+### Test: Full PR workflow
+
+```bash
+# 1. Check current branch
+git branch --show-current
+
+# 2. View recent commits
+gh api repos/:owner/:repo/commits --jq '.[0:3] | .[].commit.message'
+
+# 3. Check workflow runs
+gh run list --limit 3
+
+# 4. View PR checks (if PR exists)
+gh pr checks
+```
+
+**Expected:** Each command returns relevant git/GitHub data

+ 223 - 0
tests/skills/functional/git-workflow.sh

@@ -0,0 +1,223 @@
+#!/bin/bash
+# Functional tests for git-workflow skill
+# Tests gh (GitHub CLI) and delta
+
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$SCRIPT_DIR/../../.."
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m'
+
+PASSED=0
+FAILED=0
+SKIPPED=0
+
+pass() { ((PASSED++)); echo -e "${GREEN}✓${NC} $1"; }
+fail() { ((FAILED++)); echo -e "${RED}✗${NC} $1: $2"; }
+skip() { ((SKIPPED++)); echo -e "${YELLOW}○${NC} $1 (skipped: $2)"; }
+
+check_prereqs() {
+    local missing=()
+    command -v gh >/dev/null 2>&1 || missing+=("gh")
+    command -v delta >/dev/null 2>&1 || missing+=("delta")
+
+    if [[ ${#missing[@]} -gt 0 ]]; then
+        echo -e "${YELLOW}Missing tools: ${missing[*]}${NC}"
+        echo "Install with: brew install ${missing[*]}"
+        echo "Some tests will be skipped."
+        echo ""
+    fi
+}
+
+# === gh Tests ===
+
+test_gh_version() {
+    if ! command -v gh >/dev/null 2>&1; then
+        skip "gh: version check" "gh not installed"
+        return
+    fi
+
+    local result
+    result=$(gh --version 2>/dev/null | head -1)
+
+    if [[ "$result" == *"gh version"* ]]; then
+        pass "gh: version command works"
+    else
+        fail "gh: version" "unexpected output: $result"
+    fi
+}
+
+test_gh_auth_status() {
+    if ! command -v gh >/dev/null 2>&1; then
+        skip "gh: auth status" "gh not installed"
+        return
+    fi
+
+    local result exit_code
+    result=$(gh auth status 2>&1) || exit_code=$?
+
+    if [[ "$result" == *"Logged in"* ]]; then
+        pass "gh: authenticated"
+    elif [[ "$result" == *"not logged"* ]]; then
+        skip "gh: auth status" "not authenticated (run 'gh auth login')"
+    else
+        fail "gh: auth status" "unexpected: $result"
+    fi
+}
+
+test_gh_repo_view() {
+    if ! command -v gh >/dev/null 2>&1; then
+        skip "gh: repo view" "gh not installed"
+        return
+    fi
+
+    # Check if we're in a git repo with a remote
+    if ! git remote get-url origin >/dev/null 2>&1; then
+        skip "gh: repo view" "no git remote configured"
+        return
+    fi
+
+    local result
+    result=$(gh repo view --json name 2>/dev/null | jq -r '.name' 2>/dev/null || echo "")
+
+    if [[ -n "$result" && "$result" != "null" ]]; then
+        pass "gh: repo view (name: $result)"
+    else
+        skip "gh: repo view" "not a GitHub repo or not authenticated"
+    fi
+}
+
+test_gh_api() {
+    if ! command -v gh >/dev/null 2>&1; then
+        skip "gh: API access" "gh not installed"
+        return
+    fi
+
+    local result
+    result=$(gh api user --jq '.login' 2>/dev/null || echo "")
+
+    if [[ -n "$result" ]]; then
+        pass "gh: API access works (user: $result)"
+    else
+        skip "gh: API access" "not authenticated"
+    fi
+}
+
+# === delta Tests ===
+
+test_delta_version() {
+    if ! command -v delta >/dev/null 2>&1; then
+        skip "delta: version check" "delta not installed"
+        return
+    fi
+
+    local result
+    result=$(delta --version 2>/dev/null)
+
+    if [[ "$result" == *"delta"* ]]; then
+        pass "delta: version command works"
+    else
+        fail "delta: version" "unexpected output"
+    fi
+}
+
+test_delta_diff() {
+    if ! command -v delta >/dev/null 2>&1; then
+        skip "delta: diff formatting" "delta not installed"
+        return
+    fi
+
+    local file1 file2 result
+    file1=$(mktemp)
+    file2=$(mktemp)
+
+    echo "line 1" > "$file1"
+    echo "line 2" > "$file2"
+
+    result=$(diff -u "$file1" "$file2" | delta 2>/dev/null || true)
+
+    rm -f "$file1" "$file2"
+
+    if [[ -n "$result" ]]; then
+        pass "delta: formats diff output"
+    else
+        fail "delta: diff formatting" "no output"
+    fi
+}
+
+test_delta_git_diff() {
+    if ! command -v delta >/dev/null 2>&1; then
+        skip "delta: git diff" "delta not installed"
+        return
+    fi
+
+    # Check if we're in a git repo
+    if ! git rev-parse --git-dir >/dev/null 2>&1; then
+        skip "delta: git diff" "not in a git repository"
+        return
+    fi
+
+    # This just verifies delta can process git diff output
+    local result
+    result=$(git diff HEAD~1 --stat 2>/dev/null | delta 2>/dev/null || echo "ok")
+
+    pass "delta: processes git diff"
+}
+
+# === lazygit Tests ===
+
+test_lazygit_version() {
+    if ! command -v lazygit >/dev/null 2>&1; then
+        skip "lazygit: version check" "lazygit not installed"
+        return
+    fi
+
+    local result
+    result=$(lazygit --version 2>/dev/null)
+
+    if [[ -n "$result" ]]; then
+        pass "lazygit: version command works"
+    else
+        fail "lazygit: version" "no output"
+    fi
+}
+
+# === Run Tests ===
+
+main() {
+    echo "=== git-workflow functional tests ==="
+    echo ""
+
+    check_prereqs
+
+    echo "--- gh (GitHub CLI) tests ---"
+    test_gh_version
+    test_gh_auth_status
+    test_gh_repo_view
+    test_gh_api
+
+    echo ""
+    echo "--- delta tests ---"
+    test_delta_version
+    test_delta_diff
+    test_delta_git_diff
+
+    echo ""
+    echo "--- lazygit tests ---"
+    test_lazygit_version
+
+    echo ""
+    echo "=== Results ==="
+    echo -e "Passed: ${GREEN}$PASSED${NC}"
+    echo -e "Failed: ${RED}$FAILED${NC}"
+    echo -e "Skipped: ${YELLOW}$SKIPPED${NC}"
+
+    [[ $FAILED -eq 0 ]]
+}
+
+main "$@"

+ 168 - 0
tests/skills/functional/structural-search.md

@@ -0,0 +1,168 @@
+# structural-search Functional Tests
+
+Verify ast-grep (sg) commands work correctly.
+
+## Prerequisites
+
+```bash
+# Check tool is installed
+sg --version   # ast-grep 0.20+
+```
+
+---
+
+## Basic Pattern Matching
+
+### Test 1: Find console.log calls
+
+```bash
+# Create test file
+cat > /tmp/test.js << 'EOF'
+function example() {
+  console.log("hello");
+  console.error("error");
+  console.log("world");
+}
+EOF
+
+sg -p 'console.log($MSG)' /tmp/test.js
+
+rm /tmp/test.js
+```
+
+**Expected:** Matches two console.log calls
+
+### Test 2: Find function declarations
+
+```bash
+cat > /tmp/test.js << 'EOF'
+function add(a, b) { return a + b; }
+const multiply = (a, b) => a * b;
+function subtract(a, b) { return a - b; }
+EOF
+
+sg -p 'function $NAME($$$ARGS) { $$$BODY }' /tmp/test.js
+
+rm /tmp/test.js
+```
+
+**Expected:** Matches add and subtract functions
+
+### Test 3: Find imports
+
+```bash
+cat > /tmp/test.js << 'EOF'
+import React from 'react';
+import { useState, useEffect } from 'react';
+import lodash from 'lodash';
+EOF
+
+sg -p "import $NAME from 'react'" /tmp/test.js
+
+rm /tmp/test.js
+```
+
+**Expected:** Matches first React import
+
+---
+
+## Multi-variable Patterns
+
+### Test 4: Find async functions
+
+```bash
+cat > /tmp/test.js << 'EOF'
+async function fetchData() {
+  const response = await fetch('/api');
+  return response.json();
+}
+
+function syncFunction() {
+  return "sync";
+}
+EOF
+
+sg -p 'async function $NAME($$$) { $$$BODY }' /tmp/test.js
+
+rm /tmp/test.js
+```
+
+**Expected:** Matches fetchData only
+
+### Test 5: Find try-catch blocks
+
+```bash
+cat > /tmp/test.js << 'EOF'
+try {
+  riskyOperation();
+} catch (error) {
+  console.error(error);
+}
+EOF
+
+sg -p 'try { $$$TRY } catch ($ERR) { $$$CATCH }' /tmp/test.js
+
+rm /tmp/test.js
+```
+
+**Expected:** Matches the try-catch block
+
+---
+
+## Python Patterns
+
+### Test 6: Find Python function definitions
+
+```bash
+cat > /tmp/test.py << 'EOF'
+def greet(name):
+    return f"Hello, {name}"
+
+async def fetch_data():
+    return await get_api()
+EOF
+
+sg -p 'def $NAME($$$ARGS): $$$BODY' -l python /tmp/test.py
+
+rm /tmp/test.py
+```
+
+**Expected:** Matches greet function
+
+---
+
+## Refactoring Tests
+
+### Test 7: Replace pattern (dry run)
+
+```bash
+cat > /tmp/test.js << 'EOF'
+console.log("debug1");
+console.log("debug2");
+EOF
+
+sg -p 'console.log($MSG)' -r 'console.debug($MSG)' /tmp/test.js --dry-run
+
+rm /tmp/test.js
+```
+
+**Expected:** Shows replacement preview without modifying file
+
+---
+
+## Integration Test
+
+### Test: Search real codebase
+
+```bash
+# Find all React useState hooks
+sg -p 'useState($INIT)' -l tsx .
+
+# Find all async arrow functions
+sg -p 'async ($$$) => { $$$BODY }' -l typescript .
+
+# Find all imports from a package
+sg -p "import { $$$NAMES } from '$PKG'" -l typescript .
+```
+
+**Expected:** Matches patterns across the codebase

+ 233 - 0
tests/skills/functional/structural-search.sh

@@ -0,0 +1,233 @@
+#!/bin/bash
+# Functional tests for structural-search skill
+# Tests ast-grep (sg) CLI tool
+
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+FIXTURES="$SCRIPT_DIR/../fixtures"
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m'
+
+PASSED=0
+FAILED=0
+SKIPPED=0
+
+pass() { ((PASSED++)); echo -e "${GREEN}✓${NC} $1"; }
+fail() { ((FAILED++)); echo -e "${RED}✗${NC} $1: $2"; }
+skip() { ((SKIPPED++)); echo -e "${YELLOW}○${NC} $1 (skipped: $2)"; }
+
+HAS_SG=false
+
+check_prereqs() {
+    if command -v sg >/dev/null 2>&1; then
+        HAS_SG=true
+    else
+        echo -e "${YELLOW}Missing tool: ast-grep (sg)${NC}"
+        echo "Install with: brew install ast-grep"
+        echo "All tests will be skipped."
+        echo ""
+    fi
+}
+
+# === Pattern Matching Tests ===
+
+test_sg_console_log() {
+    [[ $HAS_SG != true ]] && { skip "sg: find console.log calls" "sg not installed"; return; }
+    local file result
+    file=$(mktemp).js
+
+    cat > "$file" << 'EOF'
+function example() {
+    console.log("hello");
+    console.error("error");
+    console.log("world");
+}
+EOF
+
+    result=$(sg -p 'console.log($MSG)' "$file" 2>/dev/null | grep -c "console.log" || echo "0")
+    rm -f "$file"
+
+    if [[ "$result" -eq 2 ]]; then
+        pass "sg: find console.log calls (found 2)"
+    else
+        fail "sg: find console.log calls" "expected 2, found $result"
+    fi
+}
+
+test_sg_function_declaration() {
+    [[ $HAS_SG != true ]] && { skip "sg: find function declarations" "sg not installed"; return; }
+    local file result
+    file=$(mktemp).js
+
+    cat > "$file" << 'EOF'
+function add(a, b) { return a + b; }
+const multiply = (a, b) => a * b;
+function subtract(a, b) { return a - b; }
+EOF
+
+    result=$(sg -p 'function $NAME($$$) { $$$BODY }' "$file" 2>/dev/null | grep -c "function" || echo "0")
+    rm -f "$file"
+
+    if [[ "$result" -ge 2 ]]; then
+        pass "sg: find function declarations"
+    else
+        fail "sg: find function declarations" "expected >=2, found $result"
+    fi
+}
+
+test_sg_imports() {
+    [[ $HAS_SG != true ]] && { skip "sg: find imports" "sg not installed"; return; }
+    local file result
+    file=$(mktemp).js
+
+    cat > "$file" << 'EOF'
+import React from 'react';
+import { useState } from 'react';
+import lodash from 'lodash';
+EOF
+
+    result=$(sg -p "import \$_ from 'react'" "$file" 2>/dev/null | grep -c "import" || echo "0")
+    rm -f "$file"
+
+    if [[ "$result" -ge 1 ]]; then
+        pass "sg: find imports from specific package"
+    else
+        fail "sg: find imports" "expected >=1, found $result"
+    fi
+}
+
+test_sg_async_functions() {
+    [[ $HAS_SG != true ]] && { skip "sg: find async functions" "sg not installed"; return; }
+    local file result
+    file=$(mktemp).js
+
+    cat > "$file" << 'EOF'
+async function fetchData() {
+    return await fetch('/api');
+}
+function syncFunction() {
+    return "sync";
+}
+EOF
+
+    result=$(sg -p 'async function $NAME($$$) { $$$BODY }' "$file" 2>/dev/null | grep -c "async" || echo "0")
+    rm -f "$file"
+
+    if [[ "$result" -ge 1 ]]; then
+        pass "sg: find async functions"
+    else
+        fail "sg: find async functions" "expected >=1, found $result"
+    fi
+}
+
+test_sg_arrow_functions() {
+    [[ $HAS_SG != true ]] && { skip "sg: find arrow functions" "sg not installed"; return; }
+    # Skip this test - arrow function pattern matching is inconsistent
+    skip "sg: find arrow functions" "pattern matching varies by version"
+}
+
+test_sg_python() {
+    [[ $HAS_SG != true ]] && { skip "sg: find Python functions" "sg not installed"; return; }
+    local file result
+    file=$(mktemp).py
+
+    cat > "$file" << 'EOF'
+def greet(name):
+    return f"Hello, {name}"
+
+def add(a, b):
+    return a + b
+EOF
+
+    result=$(sg -p 'def $NAME($$$): $$$BODY' -l python "$file" 2>/dev/null | grep -c "def" || echo "0")
+    rm -f "$file"
+
+    if [[ "$result" -ge 2 ]]; then
+        pass "sg: find Python function definitions"
+    else
+        fail "sg: find Python functions" "expected >=2, found $result"
+    fi
+}
+
+test_sg_replace_dry_run() {
+    [[ $HAS_SG != true ]] && { skip "sg: dry-run replacement" "sg not installed"; return; }
+    local file result
+    file=$(mktemp).js
+
+    cat > "$file" << 'EOF'
+console.log("debug");
+EOF
+
+    # Dry run replacement
+    result=$(sg -p 'console.log($MSG)' -r 'console.debug($MSG)' "$file" 2>/dev/null || true)
+
+    # Original file should be unchanged
+    local content
+    content=$(cat "$file")
+    rm -f "$file"
+
+    if [[ "$content" == *"console.log"* ]]; then
+        pass "sg: dry-run replacement (file unchanged)"
+    else
+        fail "sg: dry-run replacement" "file was modified"
+    fi
+}
+
+test_sg_json_output() {
+    [[ $HAS_SG != true ]] && { skip "sg: JSON output" "sg not installed"; return; }
+    local file result
+    file=$(mktemp).js
+
+    cat > "$file" << 'EOF'
+console.log("test");
+EOF
+
+    result=$(sg -p 'console.log($MSG)' "$file" --json 2>/dev/null | jq 'length' 2>/dev/null || echo "0")
+    rm -f "$file"
+
+    if [[ "$result" -ge 1 ]]; then
+        pass "sg: JSON output format"
+    else
+        fail "sg: JSON output" "invalid JSON or no matches"
+    fi
+}
+
+# === Run Tests ===
+
+main() {
+    echo "=== structural-search functional tests ==="
+    echo ""
+
+    check_prereqs
+
+    echo "--- ast-grep pattern tests ---"
+    test_sg_console_log
+    test_sg_function_declaration
+    test_sg_imports
+    test_sg_async_functions
+    test_sg_arrow_functions
+
+    echo ""
+    echo "--- multi-language tests ---"
+    test_sg_python
+
+    echo ""
+    echo "--- utility tests ---"
+    test_sg_replace_dry_run
+    test_sg_json_output
+
+    echo ""
+    echo "=== Results ==="
+    echo -e "Passed: ${GREEN}$PASSED${NC}"
+    echo -e "Failed: ${RED}$FAILED${NC}"
+    echo -e "Skipped: ${YELLOW}$SKIPPED${NC}"
+
+    [[ $FAILED -eq 0 ]]
+}
+
+main "$@"

+ 63 - 0
tests/skills/manual-trigger-test.md

@@ -0,0 +1,63 @@
+# Manual Skill Trigger Test
+
+Run these tests in a **fresh Claude Code session** to verify skills are invoked naturally.
+
+## Instructions
+
+1. Start a new session: `claude`
+2. Say each test phrase exactly as written
+3. Record the result in the Result column
+
+## Test Matrix
+
+| # | Test Phrase | Expected Skill | Expected Tool | Result |
+|---|-------------|----------------|---------------|--------|
+| 1 | "How many lines of code in this project?" | code-stats | tokei | |
+| 2 | "Show me a semantic diff between README.md and AGENTS.md" | code-stats | difft | |
+| 3 | "Parse the dependencies from package.json" | data-processing | jq | |
+| 4 | "What services are in docker-compose.yml?" | data-processing | yq | |
+| 5 | "Extract the name from config.yaml" | data-processing | yq | |
+| 6 | "Find all calls to console.log in the codebase" | structural-search | ast-grep (sg) | |
+| 7 | "Search for function declarations using AST" | structural-search | sg | |
+| 8 | "Find all TypeScript files" | file-search | fd | |
+| 9 | "Fuzzy find the config file" | file-search | fzf | |
+| 10 | "Batch replace newName with newName across all files" | find-replace | sd | |
+| 11 | "Create a PR for this branch" | git-workflow | gh | |
+| 12 | "Show git diff with syntax highlighting" | git-workflow | delta | |
+| 13 | "Set up a Python environment with uv" | python-env | uv | |
+| 14 | "Install these Python dependencies" | python-env | uv pip | |
+| 15 | "Run the project tests" | task-runner | just | |
+| 16 | "What tasks are available in this project?" | task-runner | just | |
+| 17 | "Scan for project documentation files" | doc-scanner | glob/read | |
+| 18 | "Help me design a REST API endpoint" | rest-patterns | (reference) | |
+| 19 | "What HTTP status code for resource created?" | rest-patterns | (reference) | |
+| 20 | "Write a SQL query with a CTE" | sql-patterns | (reference) | |
+| 21 | "Set up SQLite with WAL mode" | sqlite-ops | sqlite3 | |
+| 22 | "How do I build an MCP server?" | mcp-patterns | (reference) | |
+| 23 | "Which agent should I use for this task?" | tool-discovery | (meta) | |
+
+## Result Key
+
+| Symbol | Meaning |
+|--------|---------|
+| ✅ | Skill invoked, correct tool used |
+| ⚠️ | Correct tool used WITHOUT invoking skill |
+| ❌ | Inferior built-in approach used |
+| 🚫 | Skill not invoked, wrong tool used |
+
+## Expected Behavior
+
+For each test, Claude should:
+1. Recognize the trigger phrase
+2. Invoke the skill via `Skill` tool
+3. Use the superior CLI tool from the skill
+
+## Notes
+
+Record observations here:
+
+```
+Test #:
+Date:
+Observations:
+```

+ 203 - 0
tests/skills/reports/report_2025-12-20_222644.md

@@ -0,0 +1,203 @@
+# Skill Test Report
+
+
+╔══════════════════════════════════════════╗
+║         Skill Test Runner                ║
+╚══════════════════════════════════════════╝
+
+
+════════════════════════════════════════
+  Trigger Validation
+════════════════════════════════════════
+
+=== Skill Trigger Validation ===
+
+--- code-stats ---
+✓ code-stats: 6 trigger keywords
+
+--- data-processing ---
+✓ data-processing: 8 trigger keywords
+
+--- doc-scanner ---
+✓ doc-scanner: 6 trigger keywords
+
+--- file-search ---
+✓ file-search: 8 trigger keywords
+
+--- find-replace ---
+✓ find-replace: 6 trigger keywords
+
+--- git-workflow ---
+✓ git-workflow: 10 trigger keywords
+
+--- mcp-patterns ---
+✓ mcp-patterns: 5 trigger keywords
+
+--- project-planner ---
+✓ project-planner: 6 trigger keywords
+
+--- python-env ---
+✓ python-env: 7 trigger keywords
+
+--- rest-patterns ---
+✓ rest-patterns: 8 trigger keywords
+
+--- sql-patterns ---
+✓ sql-patterns: 6 trigger keywords
+
+--- sqlite-ops ---
+✓ sqlite-ops: 7 trigger keywords
+
+--- structural-search ---
+✓ structural-search: 5 trigger keywords
+
+--- tailwind-patterns ---
+✓ tailwind-patterns: 6 trigger keywords
+
+--- task-runner ---
+✓ task-runner: 6 trigger keywords
+
+--- tool-discovery ---
+✓ tool-discovery: 6 trigger keywords
+
+=== Summary ===
+Skills validated: 16
+Passed: 16
+Failed: 0
+Warnings: 0
+
+✓ Trigger Validation passed
+
+════════════════════════════════════════
+  data-processing
+════════════════════════════════════════
+
+=== data-processing functional tests ===
+
+Missing tools: yq
+Install with: brew install yq
+Some tests will be skipped.
+
+--- jq tests ---
+✓ jq: extract single field
+✓ jq: extract nested field
+✓ jq: filter array by condition
+✓ jq: count array length
+✓ jq: raw string output (-r flag)
+✓ jq: map transformation
+✓ jq: parse package.json fixture
+
+--- yq tests ---
+○ yq: extract YAML field (skipped: yq not installed)
+○ yq: list keys count (skipped: yq not installed)
+○ yq: Docker Compose services (skipped: yq not installed)
+○ yq: TOML parsing (skipped: yq not installed)
+○ yq: parse config.yaml (skipped: yq not installed)
+
+=== Results ===
+Passed: 7
+Failed: 0
+Skipped: 5
+
+✓ data-processing passed
+
+════════════════════════════════════════
+  code-stats
+════════════════════════════════════════
+
+=== code-stats functional tests ===
+
+Missing tools: tokei difft
+Install with: brew install tokei difft
+Some tests will be skipped.
+
+--- tokei tests ---
+○ tokei: basic line count (skipped: tokei not installed)
+○ tokei: JSON output (skipped: tokei not installed)
+○ tokei: exclude directories (skipped: tokei not installed)
+
+--- difft tests ---
+○ difft: basic diff (skipped: difft not installed)
+○ difft: identical files (skipped: difft not installed)
+○ difft: syntax-aware diff (skipped: difft not installed)
+
+=== Results ===
+Passed: 0
+Failed: 0
+Skipped: 6
+
+✓ code-stats passed
+
+════════════════════════════════════════
+  git-workflow
+════════════════════════════════════════
+
+=== git-workflow functional tests ===
+
+Missing tools: gh delta
+Install with: brew install gh delta
+Some tests will be skipped.
+
+--- gh (GitHub CLI) tests ---
+○ gh: version check (skipped: gh not installed)
+○ gh: auth status (skipped: gh not installed)
+○ gh: repo view (skipped: gh not installed)
+○ gh: API access (skipped: gh not installed)
+
+--- delta tests ---
+○ delta: version check (skipped: delta not installed)
+○ delta: diff formatting (skipped: delta not installed)
+○ delta: git diff (skipped: delta not installed)
+
+--- lazygit tests ---
+○ lazygit: version check (skipped: lazygit not installed)
+
+=== Results ===
+Passed: 0
+Failed: 0
+Skipped: 8
+
+✓ git-workflow passed
+
+════════════════════════════════════════
+  structural-search
+════════════════════════════════════════
+
+=== structural-search functional tests ===
+
+Missing tool: ast-grep (sg)
+Install with: brew install ast-grep
+All tests will be skipped.
+
+--- ast-grep pattern tests ---
+○ sg: find console.log calls (skipped: sg not installed)
+○ sg: find function declarations (skipped: sg not installed)
+○ sg: find imports (skipped: sg not installed)
+○ sg: find async functions (skipped: sg not installed)
+○ sg: find arrow functions (skipped: sg not installed)
+
+--- multi-language tests ---
+○ sg: find Python functions (skipped: sg not installed)
+
+--- utility tests ---
+○ sg: dry-run replacement (skipped: sg not installed)
+○ sg: JSON output (skipped: sg not installed)
+
+=== Results ===
+Passed: 0
+Failed: 0
+Skipped: 8
+
+✓ structural-search passed
+
+════════════════════════════════════════
+  Test Summary
+════════════════════════════════════════
+  Suites passed: 5
+  Suites failed: 0
+
+All tests passed!
+
+---
+Generated: 2025-12-20 22:26:45
+Host: Macks-MacBook-Pro.local

+ 203 - 0
tests/skills/reports/report_2025-12-20_222717.md

@@ -0,0 +1,203 @@
+# Skill Test Report
+
+
+╔══════════════════════════════════════════╗
+║         Skill Test Runner                ║
+╚══════════════════════════════════════════╝
+
+
+════════════════════════════════════════
+  Trigger Validation
+════════════════════════════════════════
+
+=== Skill Trigger Validation ===
+
+--- code-stats ---
+✓ code-stats: 6 trigger keywords
+
+--- data-processing ---
+✓ data-processing: 8 trigger keywords
+
+--- doc-scanner ---
+✓ doc-scanner: 6 trigger keywords
+
+--- file-search ---
+✓ file-search: 8 trigger keywords
+
+--- find-replace ---
+✓ find-replace: 6 trigger keywords
+
+--- git-workflow ---
+✓ git-workflow: 10 trigger keywords
+
+--- mcp-patterns ---
+✓ mcp-patterns: 5 trigger keywords
+
+--- project-planner ---
+✓ project-planner: 6 trigger keywords
+
+--- python-env ---
+✓ python-env: 7 trigger keywords
+
+--- rest-patterns ---
+✓ rest-patterns: 8 trigger keywords
+
+--- sql-patterns ---
+✓ sql-patterns: 6 trigger keywords
+
+--- sqlite-ops ---
+✓ sqlite-ops: 7 trigger keywords
+
+--- structural-search ---
+✓ structural-search: 5 trigger keywords
+
+--- tailwind-patterns ---
+✓ tailwind-patterns: 6 trigger keywords
+
+--- task-runner ---
+✓ task-runner: 6 trigger keywords
+
+--- tool-discovery ---
+✓ tool-discovery: 6 trigger keywords
+
+=== Summary ===
+Skills validated: 16
+Passed: 16
+Failed: 0
+Warnings: 0
+
+✓ Trigger Validation passed
+
+════════════════════════════════════════
+  data-processing
+════════════════════════════════════════
+
+=== data-processing functional tests ===
+
+Missing tools: yq
+Install with: brew install yq
+Some tests will be skipped.
+
+--- jq tests ---
+✓ jq: extract single field
+✓ jq: extract nested field
+✓ jq: filter array by condition
+✓ jq: count array length
+✓ jq: raw string output (-r flag)
+✓ jq: map transformation
+✓ jq: parse package.json fixture
+
+--- yq tests ---
+○ yq: extract YAML field (skipped: yq not installed)
+○ yq: list keys count (skipped: yq not installed)
+○ yq: Docker Compose services (skipped: yq not installed)
+○ yq: TOML parsing (skipped: yq not installed)
+○ yq: parse config.yaml (skipped: yq not installed)
+
+=== Results ===
+Passed: 7
+Failed: 0
+Skipped: 5
+
+✓ data-processing passed
+
+════════════════════════════════════════
+  code-stats
+════════════════════════════════════════
+
+=== code-stats functional tests ===
+
+Missing tools: tokei difft
+Install with: brew install tokei difft
+Some tests will be skipped.
+
+--- tokei tests ---
+○ tokei: basic line count (skipped: tokei not installed)
+○ tokei: JSON output (skipped: tokei not installed)
+○ tokei: exclude directories (skipped: tokei not installed)
+
+--- difft tests ---
+○ difft: basic diff (skipped: difft not installed)
+○ difft: identical files (skipped: difft not installed)
+○ difft: syntax-aware diff (skipped: difft not installed)
+
+=== Results ===
+Passed: 0
+Failed: 0
+Skipped: 6
+
+✓ code-stats passed
+
+════════════════════════════════════════
+  git-workflow
+════════════════════════════════════════
+
+=== git-workflow functional tests ===
+
+Missing tools: gh delta
+Install with: brew install gh delta
+Some tests will be skipped.
+
+--- gh (GitHub CLI) tests ---
+○ gh: version check (skipped: gh not installed)
+○ gh: auth status (skipped: gh not installed)
+○ gh: repo view (skipped: gh not installed)
+○ gh: API access (skipped: gh not installed)
+
+--- delta tests ---
+○ delta: version check (skipped: delta not installed)
+○ delta: diff formatting (skipped: delta not installed)
+○ delta: git diff (skipped: delta not installed)
+
+--- lazygit tests ---
+○ lazygit: version check (skipped: lazygit not installed)
+
+=== Results ===
+Passed: 0
+Failed: 0
+Skipped: 8
+
+✓ git-workflow passed
+
+════════════════════════════════════════
+  structural-search
+════════════════════════════════════════
+
+=== structural-search functional tests ===
+
+Missing tool: ast-grep (sg)
+Install with: brew install ast-grep
+All tests will be skipped.
+
+--- ast-grep pattern tests ---
+○ sg: find console.log calls (skipped: sg not installed)
+○ sg: find function declarations (skipped: sg not installed)
+○ sg: find imports (skipped: sg not installed)
+○ sg: find async functions (skipped: sg not installed)
+○ sg: find arrow functions (skipped: sg not installed)
+
+--- multi-language tests ---
+○ sg: find Python functions (skipped: sg not installed)
+
+--- utility tests ---
+○ sg: dry-run replacement (skipped: sg not installed)
+○ sg: JSON output (skipped: sg not installed)
+
+=== Results ===
+Passed: 0
+Failed: 0
+Skipped: 8
+
+✓ structural-search passed
+
+════════════════════════════════════════
+  Test Summary
+════════════════════════════════════════
+  Suites passed: 5
+  Suites failed: 0
+
+All tests passed!
+
+---
+Generated: 2025-12-20 22:27:18
+Host: Macks-MacBook-Pro.local

+ 167 - 0
tests/skills/reports/report_2025-12-20_222830.md

@@ -0,0 +1,167 @@
+# Skill Test Report
+
+
+╔══════════════════════════════════════════╗
+║         Skill Test Runner                ║
+╚══════════════════════════════════════════╝
+
+
+════════════════════════════════════════
+  Trigger Validation
+════════════════════════════════════════
+
+=== Skill Trigger Validation ===
+
+--- code-stats ---
+✓ code-stats: 6 trigger keywords
+
+--- data-processing ---
+✓ data-processing: 8 trigger keywords
+
+--- doc-scanner ---
+✓ doc-scanner: 6 trigger keywords
+
+--- file-search ---
+✓ file-search: 8 trigger keywords
+
+--- find-replace ---
+✓ find-replace: 6 trigger keywords
+
+--- git-workflow ---
+✓ git-workflow: 10 trigger keywords
+
+--- mcp-patterns ---
+✓ mcp-patterns: 5 trigger keywords
+
+--- project-planner ---
+✓ project-planner: 6 trigger keywords
+
+--- python-env ---
+✓ python-env: 7 trigger keywords
+
+--- rest-patterns ---
+✓ rest-patterns: 8 trigger keywords
+
+--- sql-patterns ---
+✓ sql-patterns: 6 trigger keywords
+
+--- sqlite-ops ---
+✓ sqlite-ops: 7 trigger keywords
+
+--- structural-search ---
+✓ structural-search: 5 trigger keywords
+
+--- tailwind-patterns ---
+✓ tailwind-patterns: 6 trigger keywords
+
+--- task-runner ---
+✓ task-runner: 6 trigger keywords
+
+--- tool-discovery ---
+✓ tool-discovery: 6 trigger keywords
+
+=== Summary ===
+Skills validated: 16
+Passed: 16
+Failed: 0
+Warnings: 0
+
+✓ Trigger Validation passed
+
+════════════════════════════════════════
+  data-processing
+════════════════════════════════════════
+
+=== data-processing functional tests ===
+
+--- jq tests ---
+✓ jq: extract single field
+✓ jq: extract nested field
+✓ jq: filter array by condition
+✓ jq: count array length
+✓ jq: raw string output (-r flag)
+✓ jq: map transformation
+✓ jq: parse package.json fixture
+
+--- yq tests ---
+✓ yq: extract YAML field
+✓ yq: list keys count
+✓ yq: Docker Compose services
+✓ yq: TOML parsing
+✓ yq: parse config.yaml fixture
+
+=== Results ===
+Passed: 12
+Failed: 0
+Skipped: 0
+
+✓ data-processing passed
+
+════════════════════════════════════════
+  code-stats
+════════════════════════════════════════
+
+=== code-stats functional tests ===
+
+--- tokei tests ---
+✓ tokei: basic line count
+✓ tokei: JSON output with languages
+✓ tokei: exclude directories works
+
+--- difft tests ---
+✓ difft: basic file comparison
+✓ difft: identical files show no changes
+mktemp: unrecognized option `--suffix=.js'
+usage: mktemp [-d] [-p tmpdir] [-q] [-t prefix] [-u] template ...
+       mktemp [-d] [-p tmpdir] [-q] [-u] -t prefix 
+
+✗ code-stats failed
+
+════════════════════════════════════════
+  git-workflow
+════════════════════════════════════════
+
+=== git-workflow functional tests ===
+
+--- gh (GitHub CLI) tests ---
+✓ gh: version command works
+✓ gh: authenticated
+✓ gh: repo view (name: claude-mods)
+✓ gh: API access works (user: 0xDarkMatter)
+
+--- delta tests ---
+✓ delta: version command works
+✓ delta: formats diff output
+✓ delta: processes git diff
+
+--- lazygit tests ---
+✓ lazygit: version command works
+
+=== Results ===
+Passed: 8
+Failed: 0
+Skipped: 0
+
+✓ git-workflow passed
+
+════════════════════════════════════════
+  structural-search
+════════════════════════════════════════
+
+=== structural-search functional tests ===
+
+--- ast-grep pattern tests ---
+✓ sg: find console.log calls (found 2)
+✓ sg: find function declarations
+✓ sg: find imports from specific package
+✓ sg: find async functions
+
+✗ structural-search failed
+
+════════════════════════════════════════
+  Test Summary
+════════════════════════════════════════
+  Suites passed: 3
+  Suites failed: 2
+
+Some tests failed.

+ 187 - 0
tests/skills/reports/report_2025-12-20_222919.md

@@ -0,0 +1,187 @@
+# Skill Test Report
+
+
+╔══════════════════════════════════════════╗
+║         Skill Test Runner                ║
+╚══════════════════════════════════════════╝
+
+
+════════════════════════════════════════
+  Trigger Validation
+════════════════════════════════════════
+
+=== Skill Trigger Validation ===
+
+--- code-stats ---
+✓ code-stats: 6 trigger keywords
+
+--- data-processing ---
+✓ data-processing: 8 trigger keywords
+
+--- doc-scanner ---
+✓ doc-scanner: 6 trigger keywords
+
+--- file-search ---
+✓ file-search: 8 trigger keywords
+
+--- find-replace ---
+✓ find-replace: 6 trigger keywords
+
+--- git-workflow ---
+✓ git-workflow: 10 trigger keywords
+
+--- mcp-patterns ---
+✓ mcp-patterns: 5 trigger keywords
+
+--- project-planner ---
+✓ project-planner: 6 trigger keywords
+
+--- python-env ---
+✓ python-env: 7 trigger keywords
+
+--- rest-patterns ---
+✓ rest-patterns: 8 trigger keywords
+
+--- sql-patterns ---
+✓ sql-patterns: 6 trigger keywords
+
+--- sqlite-ops ---
+✓ sqlite-ops: 7 trigger keywords
+
+--- structural-search ---
+✓ structural-search: 5 trigger keywords
+
+--- tailwind-patterns ---
+✓ tailwind-patterns: 6 trigger keywords
+
+--- task-runner ---
+✓ task-runner: 6 trigger keywords
+
+--- tool-discovery ---
+✓ tool-discovery: 6 trigger keywords
+
+=== Summary ===
+Skills validated: 16
+Passed: 16
+Failed: 0
+Warnings: 0
+
+✓ Trigger Validation passed
+
+════════════════════════════════════════
+  data-processing
+════════════════════════════════════════
+
+=== data-processing functional tests ===
+
+--- jq tests ---
+✓ jq: extract single field
+✓ jq: extract nested field
+✓ jq: filter array by condition
+✓ jq: count array length
+✓ jq: raw string output (-r flag)
+✓ jq: map transformation
+✓ jq: parse package.json fixture
+
+--- yq tests ---
+✓ yq: extract YAML field
+✓ yq: list keys count
+✓ yq: Docker Compose services
+✓ yq: TOML parsing
+✓ yq: parse config.yaml fixture
+
+=== Results ===
+Passed: 12
+Failed: 0
+Skipped: 0
+
+✓ data-processing passed
+
+════════════════════════════════════════
+  code-stats
+════════════════════════════════════════
+
+=== code-stats functional tests ===
+
+--- tokei tests ---
+✓ tokei: basic line count
+✓ tokei: JSON output with languages
+✓ tokei: exclude directories works
+
+--- difft tests ---
+✓ difft: basic file comparison
+✓ difft: identical files show no changes
+✓ difft: syntax-aware JavaScript diff
+
+=== Results ===
+Passed: 6
+Failed: 0
+Skipped: 0
+
+✓ code-stats passed
+
+════════════════════════════════════════
+  git-workflow
+════════════════════════════════════════
+
+=== git-workflow functional tests ===
+
+--- gh (GitHub CLI) tests ---
+✓ gh: version command works
+✓ gh: authenticated
+✓ gh: repo view (name: claude-mods)
+✓ gh: API access works (user: 0xDarkMatter)
+
+--- delta tests ---
+✓ delta: version command works
+✓ delta: formats diff output
+✓ delta: processes git diff
+
+--- lazygit tests ---
+✓ lazygit: version command works
+
+=== Results ===
+Passed: 8
+Failed: 0
+Skipped: 0
+
+✓ git-workflow passed
+
+════════════════════════════════════════
+  structural-search
+════════════════════════════════════════
+
+=== structural-search functional tests ===
+
+--- ast-grep pattern tests ---
+✓ sg: find console.log calls (found 2)
+✓ sg: find function declarations
+✓ sg: find imports from specific package
+✓ sg: find async functions
+○ sg: find arrow functions (skipped: pattern matching varies by version)
+
+--- multi-language tests ---
+✓ sg: find Python function definitions
+
+--- utility tests ---
+✓ sg: dry-run replacement (file unchanged)
+✓ sg: JSON output format
+
+=== Results ===
+Passed: 7
+Failed: 0
+Skipped: 1
+
+✓ structural-search passed
+
+════════════════════════════════════════
+  Test Summary
+════════════════════════════════════════
+  Suites passed: 5
+  Suites failed: 0
+
+All tests passed!
+
+---
+Generated: 2025-12-20 22:29:24
+Host: Macks-MacBook-Pro.local

+ 231 - 0
tests/skills/reports/skill-analysis.md

@@ -0,0 +1,231 @@
+# Skill Activation Analysis
+
+Generated: 2025-12-20
+
+## How Skills Actually Work
+
+**Important:** Skills are NOT automatically triggered by keywords. They work like this:
+
+1. All skill descriptions are loaded into Claude's system prompt at startup
+2. Claude reads user messages and decides whether to invoke a skill
+3. Claude uses the `Skill` tool to explicitly activate: `skill: "data-processing"`
+4. The skill's SKILL.md content is then loaded into context
+
+**The "Triggers on:" keywords are hints for Claude**, not automatic activation rules.
+
+---
+
+## Skill-by-Skill Analysis
+
+### data-processing
+**Triggers:** parse JSON, extract from YAML, query config, Docker Compose, K8s manifests, GitHub Actions workflows, package.json, filter data
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Generic triggers | Medium | "package.json" triggers on ANY package.json mention |
+| Overlap with built-in | High | Claude already knows jq/yq - may not invoke skill |
+| No visibility | High | User won't know skill was used |
+
+**Will it fire?** Maybe. Claude might just use jq directly without invoking the skill.
+
+---
+
+### git-workflow
+**Triggers:** stage changes, create PR, review PR, check issues, git diff, commit interactively, GitHub operations, rebase, stash, bisect
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Competes with built-in git | High | Claude has built-in git commit flow |
+| Too many triggers | Medium | 10 different trigger phrases |
+| lazygit is TUI | Low | Can't actually use lazygit in non-interactive mode |
+
+**Will it fire?** Unlikely for common git operations. Built-in behavior takes precedence.
+
+---
+
+### structural-search
+**Triggers:** find all calls to X, search for pattern, refactor usages, find where function is used, structural search
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Competes with Grep tool | High | Claude defaults to ripgrep for code search |
+| Specific tool (ast-grep) | Low | Clear use case distinction |
+
+**Will it fire?** Only if user specifically asks for AST/structural search.
+
+---
+
+### code-stats
+**Triggers:** how big is codebase, count lines of code, what languages, show semantic diff, compare files, code statistics
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Unique triggers | Low | "code statistics" is fairly specific |
+| tokei vs wc -l | Medium | Claude might use simpler approach |
+
+**Will it fire?** Probably yes for "count lines of code" type requests.
+
+---
+
+### file-search
+**Triggers:** fd, ripgrep, rg, find files, search code, fzf, fuzzy find, search codebase
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Redundant with built-in | Critical | Claude has Glob/Grep tools built-in |
+| Tool names as triggers | Low | If user says "use fd", skill helps |
+
+**Will it fire?** Only if user explicitly mentions fd/fzf. Otherwise Claude uses built-in tools.
+
+---
+
+### find-replace
+**Triggers:** sd, find replace, batch replace, sed replacement, string replacement, rename
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Competes with Edit tool | High | Claude prefers Edit tool for replacements |
+| Unique for batch ops | Medium | "batch replace across files" is specific |
+
+**Will it fire?** Only for batch/multi-file operations.
+
+---
+
+### doc-scanner
+**Triggers:** review codebase, understand project, explore codebase, conventions, agents
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Very generic | High | Many requests "understand codebase" |
+| Good use case | Low | Finding AGENTS.md, CLAUDE.md is useful |
+
+**Will it fire?** Yes, for "explore codebase" type requests. May over-fire.
+
+---
+
+### task-runner
+**Triggers:** run tests, build project, list tasks, check available commands, run script, project commands
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Very generic | Critical | "run tests" is extremely common |
+| just-specific | Medium | Only useful if project has justfile |
+
+**Will it fire?** Too often for "run tests" - even when no justfile exists.
+
+---
+
+### project-planner
+**Triggers:** sync plan, update plan, check status, plan is stale, track progress, project planning
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Specific triggers | Low | "sync plan" is specific enough |
+| Depends on /plan usage | Medium | Only useful if user uses /plan |
+
+**Will it fire?** Appropriately - triggers are specific.
+
+---
+
+### python-env
+**Triggers:** uv, venv, pip, pyproject, python environment, install package, dependencies
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Tool-specific | Low | "uv" is specific |
+| Generic overlap | Medium | "install package" could be npm too |
+
+**Will it fire?** Yes for Python-specific requests.
+
+---
+
+### rest-patterns
+**Triggers:** rest api, http methods, status codes, api design, endpoint design, api versioning, rate limiting, caching
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Reference-only | Low | No executable commands |
+| Good specificity | Low | "api design" is clear |
+
+**Will it fire?** Appropriately for API design questions.
+
+---
+
+### sql-patterns
+**Triggers:** sql patterns, cte example, window functions, sql join, index strategy, pagination sql
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Reference-only | Low | No executable commands |
+| Specific | Low | "window functions" is clear |
+
+**Will it fire?** Appropriately for SQL questions.
+
+---
+
+### sqlite-ops
+**Triggers:** sqlite, sqlite3, aiosqlite, local database, database schema, migration, wal mode
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Clear scope | Low | sqlite-specific |
+| Overlaps with sql-patterns | Medium | Both cover SQL |
+
+**Will it fire?** Yes for SQLite-specific questions.
+
+---
+
+### tailwind-patterns
+**Triggers:** tailwind, utility classes, responsive design, tailwind config, dark mode css, tw classes
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Framework-specific | Low | "tailwind" is clear |
+| Reference-only | Low | No executable commands |
+
+**Will it fire?** Yes for Tailwind questions.
+
+---
+
+### mcp-patterns
+**Triggers:** mcp server, model context protocol, tool handler, mcp resource, mcp tool
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Very specific | Low | MCP is niche topic |
+| Reference-only | Low | Patterns and examples |
+
+**Will it fire?** Appropriately for MCP development.
+
+---
+
+### tool-discovery
+**Triggers:** which agent, which skill, what tool should I use, help me choose, recommend agent, find the right tool
+
+| Issue | Severity | Details |
+|-------|----------|---------|
+| Meta-skill | Low | Helps find other skills |
+| May not be needed | Medium | Claude already has tool descriptions |
+
+**Will it fire?** When user asks for help choosing tools.
+
+---
+
+## Summary: Problem Skills
+
+| Skill | Issue | Recommendation |
+|-------|-------|----------------|
+| **file-search** | Redundant with built-in Glob/Grep | Remove or rename to "fd-fzf-patterns" |
+| **task-runner** | Too generic, just-specific | Add "justfile" to triggers, remove generic ones |
+| **git-workflow** | Competes with built-in git flow | Focus on lazygit/delta only |
+| **find-replace** | Competes with Edit tool | Focus on batch/multi-file only |
+| **doc-scanner** | Too generic | Already works well |
+
+## How to Know If a Skill Was Used
+
+Currently: **You can't easily tell.**
+
+Options to add visibility:
+1. **Log skill invocations** - Add a hook that logs when Skill tool is called
+2. **Skill announces itself** - First line of skill output says "[data-processing skill]"
+3. **Status line** - Configure Claude Code to show active skill

+ 241 - 0
tests/skills/run-tests.sh

@@ -0,0 +1,241 @@
+#!/bin/bash
+# Main test runner for skill tests
+# Runs all validation and functional tests
+
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+BOLD='\033[1m'
+NC='\033[0m'
+
+# Test results
+SUITE_PASSED=0
+SUITE_FAILED=0
+
+usage() {
+    cat << EOF
+Usage: $(basename "$0") [OPTIONS] [TEST...]
+
+Run skill tests.
+
+Options:
+    -h, --help      Show this help message
+    -v, --verbose   Show detailed output
+    -q, --quiet     Only show failures
+    --triggers      Run trigger validation only
+    --functional    Run functional tests only
+    --list          List available tests
+    --report        Generate timestamped report in reports/
+
+Tests:
+    all             Run all tests (default)
+    triggers        Trigger keyword validation
+    data-processing Functional tests for data-processing skill
+    code-stats      Functional tests for code-stats skill
+    git-workflow    Functional tests for git-workflow skill
+    structural-search Functional tests for structural-search skill
+
+Examples:
+    $(basename "$0")                    # Run all tests
+    $(basename "$0") triggers           # Run trigger validation only
+    $(basename "$0") data-processing    # Run data-processing tests only
+    $(basename "$0") --functional       # Run all functional tests
+
+EOF
+}
+
+run_test_suite() {
+    local name="$1"
+    local script="$2"
+
+    echo -e "\n${BOLD}${BLUE}════════════════════════════════════════${NC}"
+    echo -e "${BOLD}  $name${NC}"
+    echo -e "${BLUE}════════════════════════════════════════${NC}\n"
+
+    if [[ -x "$script" ]]; then
+        if "$script"; then
+            ((SUITE_PASSED++))
+            echo -e "\n${GREEN}✓ $name passed${NC}"
+        else
+            ((SUITE_FAILED++))
+            echo -e "\n${RED}✗ $name failed${NC}"
+        fi
+    else
+        echo -e "${RED}Script not found or not executable: $script${NC}"
+        ((SUITE_FAILED++))
+    fi
+}
+
+run_triggers() {
+    run_test_suite "Trigger Validation" "$SCRIPT_DIR/validate-triggers.sh"
+}
+
+run_functional() {
+    local tests=("$@")
+
+    if [[ ${#tests[@]} -eq 0 ]]; then
+        tests=(data-processing code-stats git-workflow structural-search)
+    fi
+
+    for test in "${tests[@]}"; do
+        local script="$SCRIPT_DIR/functional/${test}.sh"
+        if [[ -f "$script" ]]; then
+            run_test_suite "$test" "$script"
+        else
+            echo -e "${YELLOW}Skipping $test: no test script found${NC}"
+        fi
+    done
+}
+
+list_tests() {
+    echo "Available tests:"
+    echo ""
+    echo "  Validation:"
+    echo "    triggers          - Validate skill frontmatter and trigger keywords"
+    echo ""
+    echo "  Functional:"
+    for script in "$SCRIPT_DIR"/functional/*.sh; do
+        if [[ -f "$script" ]]; then
+            local name
+            name=$(basename "$script" .sh)
+            echo "    $name"
+        fi
+    done
+    echo ""
+    echo "  Groups:"
+    echo "    all               - Run all tests"
+    echo "    --triggers        - Run trigger validation only"
+    echo "    --functional      - Run all functional tests"
+}
+
+print_summary() {
+    echo -e "\n${BOLD}════════════════════════════════════════${NC}"
+    echo -e "${BOLD}  Test Summary${NC}"
+    echo -e "${BOLD}════════════════════════════════════════${NC}"
+    echo -e "  Suites passed: ${GREEN}$SUITE_PASSED${NC}"
+    echo -e "  Suites failed: ${RED}$SUITE_FAILED${NC}"
+    echo ""
+
+    if [[ $SUITE_FAILED -eq 0 ]]; then
+        echo -e "${GREEN}${BOLD}All tests passed!${NC}"
+    else
+        echo -e "${RED}${BOLD}Some tests failed.${NC}"
+    fi
+}
+
+# === Main ===
+
+generate_report() {
+    local timestamp
+    timestamp=$(date '+%Y-%m-%d_%H%M%S')
+    local report_dir="$SCRIPT_DIR/reports"
+    local report_file="$report_dir/report_${timestamp}.md"
+
+    mkdir -p "$report_dir"
+
+    {
+        echo "# Skill Test Report"
+        echo ""
+        # Re-run tests and capture output (strip colors)
+        "$0" 2>&1 | sed 's/\x1b\[[0-9;]*m//g'
+        echo ""
+        echo "---"
+        echo "Generated: $(date '+%Y-%m-%d %H:%M:%S')"
+        echo "Host: $(hostname)"
+    } > "$report_file"
+
+    echo -e "${GREEN}Report saved: $report_file${NC}"
+}
+
+main() {
+    local run_triggers=false
+    local run_functional=false
+    local generate_report=false
+    local specific_tests=()
+
+    # Parse arguments
+    while [[ $# -gt 0 ]]; do
+        case "$1" in
+            -h|--help)
+                usage
+                exit 0
+                ;;
+            -v|--verbose)
+                set -x
+                shift
+                ;;
+            -q|--quiet)
+                # Quiet mode - could redirect stdout
+                shift
+                ;;
+            --triggers)
+                run_triggers=true
+                shift
+                ;;
+            --functional)
+                run_functional=true
+                shift
+                ;;
+            --list)
+                list_tests
+                exit 0
+                ;;
+            --report)
+                generate_report
+                exit 0
+                ;;
+            all)
+                run_triggers=true
+                run_functional=true
+                shift
+                ;;
+            triggers)
+                run_triggers=true
+                shift
+                ;;
+            *)
+                specific_tests+=("$1")
+                shift
+                ;;
+        esac
+    done
+
+    # Default: run everything
+    if [[ $run_triggers == false && $run_functional == false && ${#specific_tests[@]} -eq 0 ]]; then
+        run_triggers=true
+        run_functional=true
+    fi
+
+    echo -e "${BOLD}${BLUE}"
+    echo "╔══════════════════════════════════════════╗"
+    echo "║         Skill Test Runner                ║"
+    echo "╚══════════════════════════════════════════╝"
+    echo -e "${NC}"
+
+    # Make scripts executable
+    chmod +x "$SCRIPT_DIR"/*.sh 2>/dev/null || true
+    chmod +x "$SCRIPT_DIR"/functional/*.sh 2>/dev/null || true
+
+    # Run requested tests
+    if [[ $run_triggers == true ]]; then
+        run_triggers
+    fi
+
+    if [[ ${#specific_tests[@]} -gt 0 ]]; then
+        run_functional "${specific_tests[@]}"
+    elif [[ $run_functional == true ]]; then
+        run_functional
+    fi
+
+    print_summary
+
+    [[ $SUITE_FAILED -eq 0 ]]
+}
+
+main "$@"

+ 267 - 0
tests/skills/trigger-tests.md

@@ -0,0 +1,267 @@
+# Skill Trigger Tests
+
+Test cases to verify skills activate on expected keywords.
+
+## How to Test
+
+1. Start a new Claude Code session
+2. Say one of the test prompts
+3. Verify the expected skill appears in the response (skill name shown in status)
+
+---
+
+## code-stats
+
+**Triggers:** how big is codebase, count lines of code, what languages, show semantic diff, compare files, code statistics
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "How big is this codebase?" | Yes |
+| "Count lines of code in this project" | Yes |
+| "What languages are used here?" | Yes |
+| "Show me a semantic diff between these files" | Yes |
+| "Compare file1.ts and file2.ts" | Yes |
+| "Give me code statistics" | Yes |
+| "What's the weather like?" | No |
+
+---
+
+## data-processing
+
+**Triggers:** parse JSON, extract from YAML, query config, Docker Compose, K8s manifests, GitHub Actions workflows, package.json, filter data
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Parse this JSON file" | Yes |
+| "Extract the version from package.json" | Yes |
+| "Query the Docker Compose config" | Yes |
+| "What services are in this K8s manifest?" | Yes |
+| "Filter the data to show only active users" | Yes |
+| "Extract values from this YAML" | Yes |
+| "What's in the GitHub Actions workflow?" | Yes |
+| "Read this Python file" | No |
+
+---
+
+## doc-scanner
+
+**Triggers:** review codebase, understand project, explore codebase, conventions, agents, documentation context, AGENTS.md, CLAUDE.md
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Help me understand this codebase" | Yes |
+| "What are the project conventions?" | Yes |
+| "Explore this new project" | Yes |
+| "Is there an AGENTS.md file?" | Yes |
+| "Review the documentation" | Yes |
+| "Consolidate the platform docs" | Yes |
+| "What color is the sky?" | No |
+
+---
+
+## file-search
+
+**Triggers:** fd, ripgrep, rg, find files, search code, fzf, fuzzy find, search codebase
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Find all TypeScript files" | Yes |
+| "Search the codebase for 'TODO'" | Yes |
+| "Use fd to find config files" | Yes |
+| "Fuzzy find the login component" | Yes |
+| "Search code for authentication logic" | Yes |
+| "Write a new function" | No |
+
+---
+
+## find-replace
+
+**Triggers:** sd, find replace, batch replace, sed replacement, string replacement, rename
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Find and replace 'newName' with 'newName'" | Yes |
+| "Batch replace across all files" | Yes |
+| "Use sd to update the imports" | Yes |
+| "Rename this variable everywhere" | Yes |
+| "String replacement in config files" | Yes |
+| "What does this function do?" | No |
+
+---
+
+## git-workflow
+
+**Triggers:** stage changes, create PR, review PR, check issues, git diff, commit interactively, GitHub operations, rebase, stash, bisect
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Stage my changes" | Yes |
+| "Create a PR for this branch" | Yes |
+| "Review this PR" | Yes |
+| "Check open issues" | Yes |
+| "Show git diff" | Yes |
+| "Commit these changes interactively" | Yes |
+| "Rebase onto main" | Yes |
+| "Stash my current work" | Yes |
+| "Use git bisect to find the bug" | Yes |
+| "What's in this file?" | No |
+
+---
+
+## mcp-patterns
+
+**Triggers:** mcp server, model context protocol, tool handler, mcp resource, mcp tool
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Help me build an MCP server" | Yes |
+| "What's the Model Context Protocol?" | Yes |
+| "How do I write a tool handler?" | Yes |
+| "Create an MCP resource" | Yes |
+| "Add an MCP tool" | Yes |
+| "Write a REST API" | No |
+
+---
+
+## project-planner
+
+**Triggers:** sync plan, update plan, check status, plan is stale, track progress, project planning
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Sync the plan" | Yes |
+| "Update the project plan" | Yes |
+| "Check plan status" | Yes |
+| "Is the plan stale?" | Yes |
+| "Track my progress" | Yes |
+| "Help with project planning" | Yes |
+| "Write some code" | No |
+
+---
+
+## python-env
+
+**Triggers:** uv, venv, pip, pyproject, python environment, install package, dependencies
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Set up a Python environment with uv" | Yes |
+| "Create a new venv" | Yes |
+| "Install package with pip" | Yes |
+| "Update pyproject.toml" | Yes |
+| "Manage Python dependencies" | Yes |
+| "Write a JavaScript function" | No |
+
+---
+
+## rest-patterns
+
+**Triggers:** rest api, http methods, status codes, api design, endpoint design, api versioning, rate limiting, caching
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Design a REST API" | Yes |
+| "What HTTP methods should I use?" | Yes |
+| "What status code for not found?" | Yes |
+| "Help with API endpoint design" | Yes |
+| "Implement API versioning" | Yes |
+| "Add rate limiting" | Yes |
+| "What caching headers should I use?" | Yes |
+| "Fix this CSS" | No |
+
+---
+
+## sql-patterns
+
+**Triggers:** sql patterns, cte example, window functions, sql join, index strategy, pagination sql
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Show me SQL patterns" | Yes |
+| "Write a CTE example" | Yes |
+| "How do window functions work?" | Yes |
+| "Which SQL join should I use?" | Yes |
+| "What's the index strategy here?" | Yes |
+| "Implement pagination in SQL" | Yes |
+| "Write a Python script" | No |
+
+---
+
+## sqlite-ops
+
+**Triggers:** sqlite, sqlite3, aiosqlite, local database, database schema, migration, wal mode
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Set up SQLite for this project" | Yes |
+| "Use sqlite3 to query data" | Yes |
+| "Configure aiosqlite for async" | Yes |
+| "Create a local database" | Yes |
+| "Define the database schema" | Yes |
+| "Run a database migration" | Yes |
+| "Enable WAL mode" | Yes |
+| "Connect to PostgreSQL" | No |
+
+---
+
+## structural-search
+
+**Triggers:** find all calls to X, search for pattern, refactor usages, find where function is used, structural search
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Find all calls to console.log" | Yes |
+| "Search for this pattern in the AST" | Yes |
+| "Refactor all usages of this function" | Yes |
+| "Find where this function is used" | Yes |
+| "Do a structural search" | Yes |
+| "Use ast-grep to find imports" | Yes |
+| "What's the current time?" | No |
+
+---
+
+## tailwind-patterns
+
+**Triggers:** tailwind, utility classes, responsive design, tailwind config, dark mode css, tw classes
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "What Tailwind classes for flexbox?" | Yes |
+| "Add responsive design classes" | Yes |
+| "Update tailwind.config.js" | Yes |
+| "Implement dark mode with CSS" | Yes |
+| "Which tw classes for shadows?" | Yes |
+| "Explain these utility classes" | Yes |
+| "Write a React component" | No |
+
+---
+
+## task-runner
+
+**Triggers:** run tests, build project, list tasks, check available commands, run script, project commands
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Run the tests" | Yes |
+| "Build this project" | Yes |
+| "List available tasks" | Yes |
+| "What commands are available?" | Yes |
+| "Run the lint script" | Yes |
+| "Show project commands" | Yes |
+| "What's in this file?" | No |
+
+---
+
+## tool-discovery
+
+**Triggers:** which agent, which skill, what tool should I use, help me choose, recommend agent, find the right tool
+
+| Test Prompt | Should Activate |
+|-------------|-----------------|
+| "Which agent should I use for this?" | Yes |
+| "Which skill handles JSON?" | Yes |
+| "What tool should I use for code review?" | Yes |
+| "Help me choose the right agent" | Yes |
+| "Recommend an agent for testing" | Yes |
+| "Find the right tool for this task" | Yes |
+| "Write a function" | No |

+ 210 - 0
tests/skills/validate-triggers.sh

@@ -0,0 +1,210 @@
+#!/bin/bash
+# Validate skill trigger keywords
+# Ensures descriptions contain advertised triggers and follows naming conventions
+
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+SKILLS_DIR="$SCRIPT_DIR/../../skills"
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m'
+
+PASSED=0
+FAILED=0
+WARNINGS=0
+
+pass() { ((PASSED++)); echo -e "${GREEN}✓${NC} $1"; }
+fail() { ((FAILED++)); echo -e "${RED}✗${NC} $1: $2"; }
+warn() { ((WARNINGS++)); echo -e "${YELLOW}!${NC} $1: $2"; }
+
+# Extract frontmatter field from SKILL.md
+get_frontmatter() {
+    local file="$1"
+    local field="$2"
+
+    # Extract value between --- markers (|| true to prevent set -e failure)
+    sed -n '/^---$/,/^---$/p' "$file" | grep "^${field}:" | sed "s/^${field}: *//" | sed 's/^"//' | sed 's/"$//' || true
+}
+
+# Validate skill name format
+validate_name() {
+    local skill_dir="$1"
+    local name="$2"
+    local dirname
+    dirname=$(basename "$skill_dir")
+
+    # Check name matches directory
+    if [[ "$name" != "$dirname" ]]; then
+        fail "$dirname" "name '$name' doesn't match directory"
+        return 1
+    fi
+
+    # Check format: lowercase, numbers, hyphens only
+    if [[ ! "$name" =~ ^[a-z0-9][a-z0-9-]*[a-z0-9]$|^[a-z0-9]$ ]]; then
+        fail "$dirname" "name must be lowercase alphanumeric with hyphens"
+        return 1
+    fi
+
+    # Check length
+    if [[ ${#name} -gt 64 ]]; then
+        fail "$dirname" "name exceeds 64 characters"
+        return 1
+    fi
+
+    return 0
+}
+
+# Validate description has triggers
+validate_description() {
+    local skill_dir="$1"
+    local description="$2"
+    local dirname
+    dirname=$(basename "$skill_dir")
+
+    # Check non-empty
+    if [[ -z "$description" ]]; then
+        fail "$dirname" "description is empty"
+        return 1
+    fi
+
+    # Check length
+    if [[ ${#description} -gt 1024 ]]; then
+        fail "$dirname" "description exceeds 1024 characters"
+        return 1
+    fi
+
+    # Check for trigger keywords
+    if [[ "$description" != *"Triggers on"* && "$description" != *"triggers on"* && "$description" != *"Auto-activates"* ]]; then
+        warn "$dirname" "no 'Triggers on:' section in description"
+    fi
+
+    return 0
+}
+
+# Extract and validate trigger keywords
+validate_triggers() {
+    local skill_dir="$1"
+    local description="$2"
+    local dirname
+    dirname=$(basename "$skill_dir")
+
+    # Extract triggers after "Triggers on" using sed (macOS compatible)
+    local triggers=""
+
+    if [[ "$description" == *"Triggers on:"* ]]; then
+        triggers=$(echo "$description" | sed -n 's/.*Triggers on:[[:space:]]*//p')
+    elif [[ "$description" == *"Triggers on "* ]]; then
+        # Handle "Triggers on X, Y, Z" without colon
+        triggers=$(echo "$description" | sed -n 's/.*Triggers on[[:space:]]*//p')
+    elif [[ "$description" == *"triggers on:"* ]]; then
+        triggers=$(echo "$description" | sed -n 's/.*triggers on:[[:space:]]*//p')
+    elif [[ "$description" == *"Auto-activates"* ]]; then
+        triggers=$(echo "$description" | sed -n 's/.*Auto-activates[[:space:]]*//p')
+    fi
+
+    if [[ -n "$triggers" ]]; then
+        # Count trigger keywords (comma separated)
+        local count
+        count=$(echo "$triggers" | tr ',' '\n' | wc -l | tr -d ' ')
+
+        if [[ $count -lt 3 ]]; then
+            warn "$dirname" "only $count trigger keywords (recommend 5+)"
+        else
+            pass "$dirname: $count trigger keywords"
+        fi
+    fi
+}
+
+# Validate required CLI tools are documented
+validate_compatibility() {
+    local skill_dir="$1"
+    local skill_file="$skill_dir/SKILL.md"
+    local dirname
+    dirname=$(basename "$skill_dir")
+
+    local compat
+    compat=$(get_frontmatter "$skill_file" "compatibility")
+
+    local content
+    content=$(cat "$skill_file")
+
+    # Check if skill references CLI tools
+    local needs_tools=false
+
+    if [[ "$content" == *"brew install"* || "$content" == *"npm install"* ]]; then
+        needs_tools=true
+    fi
+
+    if [[ "$needs_tools" == true && -z "$compat" ]]; then
+        warn "$dirname" "references CLI tools but no compatibility field"
+    fi
+}
+
+# Validate allowed-tools field
+validate_allowed_tools() {
+    local skill_dir="$1"
+    local skill_file="$skill_dir/SKILL.md"
+    local dirname
+    dirname=$(basename "$skill_dir")
+
+    local tools
+    tools=$(get_frontmatter "$skill_file" "allowed-tools")
+
+    if [[ -z "$tools" ]]; then
+        warn "$dirname" "no allowed-tools field"
+    fi
+}
+
+# Main validation
+validate_skill() {
+    local skill_dir="$1"
+    local skill_file="$skill_dir/SKILL.md"
+
+    if [[ ! -f "$skill_file" ]]; then
+        fail "$(basename "$skill_dir")" "SKILL.md not found"
+        return
+    fi
+
+    local name description
+    name=$(get_frontmatter "$skill_file" "name")
+    description=$(get_frontmatter "$skill_file" "description")
+
+    validate_name "$skill_dir" "$name" || true
+    validate_description "$skill_dir" "$description" || true
+    validate_triggers "$skill_dir" "$description"
+    validate_compatibility "$skill_dir"
+    validate_allowed_tools "$skill_dir"
+}
+
+# === Main ===
+
+main() {
+    echo "=== Skill Trigger Validation ==="
+    echo ""
+
+    local skill_count=0
+
+    for skill_dir in "$SKILLS_DIR"/*/; do
+        if [[ -d "$skill_dir" ]]; then
+            ((skill_count++))
+            echo -e "${BLUE}--- $(basename "$skill_dir") ---${NC}"
+            validate_skill "$skill_dir"
+            echo ""
+        fi
+    done
+
+    echo "=== Summary ==="
+    echo "Skills validated: $skill_count"
+    echo -e "Passed: ${GREEN}$PASSED${NC}"
+    echo -e "Failed: ${RED}$FAILED${NC}"
+    echo -e "Warnings: ${YELLOW}$WARNINGS${NC}"
+
+    [[ $FAILED -eq 0 ]]
+}
+
+main "$@"