diff --git a/CLAUDE.md b/CLAUDE.md index 44f2376..c743463 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -38,7 +38,7 @@ Commands with Teams Variant ship as `{name}.md` (parallel subagents) and `{name} **Build-time asset distribution**: Skills and agents are stored once in `shared/skills/` and `shared/agents/`, then copied to each plugin at build time based on `plugin.json` manifests. This eliminates duplication in git. -**Working Memory**: Three shell-script hooks (`scripts/hooks/`) provide automatic session continuity. Toggleable via `devflow memory --enable/--disable/--status` or `devflow init --memory/--no-memory`. Stop hook → reads last turn from session transcript (`~/.claude/projects/{encoded-cwd}/{session_id}.jsonl`), spawns background `claude -p --model haiku` to update `.memory/WORKING-MEMORY.md` with structured sections (`## Now`, `## Progress`, `## Decisions`, `## Modified Files`, `## Context`, `## Session Log`; throttled: skips if triggered <2min ago; concurrent sessions serialize via mkdir-based lock). SessionStart hook → injects previous memory + git state as `additionalContext` on `/clear`, startup, or compact (warns if >1h stale; injects pre-compact memory snapshot when compaction happened mid-session). PreCompact hook → saves git state + WORKING-MEMORY.md snapshot + bootstraps minimal WORKING-MEMORY.md if none exists. Zero-ceremony context preservation. +**Working Memory**: Four shell-script hooks (`scripts/hooks/`) provide automatic session continuity. Toggleable via `devflow memory --enable/--disable/--status` or `devflow init --memory/--no-memory`. UserPromptSubmit (`prompt-capture-memory`) captures user prompt to `.memory/.pending-turns.jsonl` queue. Stop hook captures `assistant_message` (on `end_turn` only) to same queue, then spawns throttled background `claude -p --model haiku` updater (skips if triggered <2min ago; concurrent sessions serialize via mkdir-based lock). Background updater uses `mv`-based atomic handoff to process all pending turns in batch (capped at 10 most recent), with crash recovery via `.pending-turns.processing` file. Updates `.memory/WORKING-MEMORY.md` with structured sections (`## Now`, `## Progress`, `## Decisions`, `## Modified Files`, `## Context`, `## Session Log`). SessionStart hook → injects previous memory + git state as `additionalContext` on `/clear`, startup, or compact (warns if >1h stale; injects pre-compact memory snapshot when compaction happened mid-session). PreCompact hook → saves git state + WORKING-MEMORY.md snapshot + bootstraps minimal WORKING-MEMORY.md if none exists. Disabling memory removes all four hooks. Use `devflow memory --clear` to clean up pending queue files across projects. Zero-ceremony context preservation. **Ambient Mode**: Three-layer architecture for always-on intent classification. SessionStart hook (`session-start-classification`) reads lean classification rules (`~/.claude/skills/devflow:router/references/classification-rules.md`, ~30 lines) and injects as `additionalContext` — once per session, deterministic, zero model overhead. UserPromptSubmit hook (`preamble`) injects a one-sentence prompt per message triggering classification + router loading via Skill tool. Router SKILL.md is a pure skill lookup table (~50 lines) loaded on-demand only for GUIDED/ORCHESTRATED depth — maps intent×depth to domain and orchestration skills. Toggleable via `devflow ambient --enable/--disable/--status` or `devflow init`. @@ -57,7 +57,7 @@ devflow/ ├── plugins/devflow-*/ # 17 plugins (8 core + 9 optional language/ecosystem) ├── docs/reference/ # Detailed reference documentation ├── scripts/ # Helper scripts (statusline, docs-helpers) -│ └── hooks/ # Working Memory + ambient + learning hooks (stop, session-start-memory, session-start-classification, pre-compact, preamble, session-end-learning, stop-update-learning [deprecated], background-learning) +│ └── hooks/ # Working Memory + ambient + learning hooks (prompt-capture-memory, stop-update-memory, background-memory-update, session-start-memory, session-start-classification, pre-compact-memory, preamble, session-end-learning, stop-update-learning [deprecated], background-learning, get-mtime) ├── src/cli/ # TypeScript CLI (init, list, uninstall, ambient, learn, flags) ├── .claude-plugin/ # Marketplace registry ├── .docs/ # Project docs (reviews, design) — per-project @@ -105,7 +105,7 @@ Working memory files live in a dedicated `.memory/` directory: ``` .memory/ -├── WORKING-MEMORY.md # Auto-maintained by Stop hook (overwritten each session) +├── WORKING-MEMORY.md # Auto-maintained by background updater (queue-based, updated in batch) ├── backup.json # Pre-compact git state snapshot ├── learning-log.jsonl # Learning observations (JSONL, one entry per line) ├── learning.json # Project-level learning config (max runs, throttle, model, debug — no enabled field) @@ -113,6 +113,8 @@ Working memory files live in a dedicated `.memory/` directory: ├── .learning-session-count # Session IDs pending batch (one per line) ├── .learning-batch-ids # Session IDs for current batch run ├── .learning-notified-at # New artifact notification marker (epoch timestamp) +├── .pending-turns.jsonl # Queue of captured user/assistant turns (JSONL, ephemeral) +├── .pending-turns.processing # Atomic handoff during background processing (transient) └── knowledge/ ├── decisions.md # Architectural decisions (ADR-NNN, append-only) └── pitfalls.md # Known pitfalls (PF-NNN, area-specific gotchas) diff --git a/docs/reference/file-organization.md b/docs/reference/file-organization.md index fd5a437..85da5b4 100644 --- a/docs/reference/file-organization.md +++ b/docs/reference/file-organization.md @@ -46,10 +46,13 @@ devflow/ │ ├── stop-update-memory # Stop hook: writes WORKING-MEMORY.md │ ├── session-start-memory # SessionStart hook: injects memory + git state │ ├── pre-compact-memory # PreCompact hook: saves git state backup -│ ├── preamble # UserPromptSubmit hook: ambient skill injection +│ ├── prompt-capture-memory # UserPromptSubmit hook: captures prompts to queue +│ ├── background-memory-update # Background: queue-based WORKING-MEMORY.md updater +│ ├── preamble # UserPromptSubmit hook: ambient skill injection (zero file I/O) │ ├── session-end-learning # SessionEnd hook: batched learning trigger │ ├── stop-update-learning # Stop hook: deprecated stub (upgrade via devflow learn) │ ├── background-learning # Background: pattern detection via Sonnet +│ ├── get-mtime # Shared helper: portable mtime (BSD/GNU stat) │ ├── json-helper.cjs # Node.js jq-equivalent operations │ └── json-parse # Shell wrapper: jq with node fallback └── src/ @@ -144,7 +147,7 @@ Skills and agents are **not duplicated** in git. Instead: Included settings: - `statusLine` - Configurable HUD with presets (replaces legacy statusline.sh) -- `hooks` - Working Memory hooks (Stop, SessionStart, PreCompact) + Learning Stop hook +- `hooks` - Working Memory hooks (UserPromptSubmit, Stop, SessionStart, PreCompact) + Learning Stop hook - `env.ENABLE_TOOL_SEARCH` - Deferred MCP tool loading (~85% token savings) - `env.ENABLE_LSP_TOOL` - Language Server Protocol support - `env.CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS` - Agent Teams for peer-to-peer collaboration @@ -153,17 +156,21 @@ Included settings: ## Working Memory Hooks -Three hooks in `scripts/hooks/` provide automatic session continuity. Toggleable via `devflow memory --enable/--disable/--status` or `devflow init --memory/--no-memory`. +Four hooks in `scripts/hooks/` provide automatic session continuity. Toggleable via `devflow memory --enable/--disable/--status` or `devflow init --memory/--no-memory`. -A fourth hook (`session-end-learning`) provides self-learning. Toggleable via `devflow learn --enable/--disable/--status` or `devflow init --learn/--no-learn`: +A fifth hook (`session-end-learning`) provides self-learning. Toggleable via `devflow learn --enable/--disable/--status` or `devflow init --learn/--no-learn`: | Hook | Event | File | Purpose | |------|-------|------|---------| -| `stop-update-memory` | Stop | `.memory/WORKING-MEMORY.md` | Throttled (skips if <2min fresh). Slim instruction after first write. | +| `prompt-capture-memory` | UserPromptSubmit | `.memory/.pending-turns.jsonl` | Captures user prompts to queue. Zero classification overhead. | +| `stop-update-memory` | Stop | `.memory/.pending-turns.jsonl` | Captures assistant turns to queue. Throttled (skips if <2min fresh). Spawns background updater. | +| `background-memory-update` | (background) | `.memory/WORKING-MEMORY.md` | Queue-based updater spawned by stop-update-memory. Reads queued turns + git state, writes WORKING-MEMORY.md via `claude -p --model haiku`. | | `session-start-memory` | SessionStart | reads WORKING-MEMORY.md | Injects previous memory + git state as `additionalContext`. Warns if >1h stale. Injects pre-compact snapshot when compaction occurred mid-session. | | `pre-compact-memory` | PreCompact | `.memory/backup.json` | Saves git state + WORKING-MEMORY.md snapshot. Bootstraps minimal WORKING-MEMORY.md if none exists. | -**Flow**: Session ends → Stop hook checks throttle (skips if <2min fresh) → spawns background updater → background updater reads session transcript + git state → fresh `claude -p --model haiku` writes WORKING-MEMORY.md. On `/clear` or new session → SessionStart injects memory as `additionalContext` (system context, not user-visible) with staleness warning if >1h old. +**Flow**: User sends prompt → UserPromptSubmit hook (prompt-capture-memory) appends user turn to `.memory/.pending-turns.jsonl`. Session ends → Stop hook appends assistant turn to queue, checks throttle (skips if <2min fresh), spawns background updater → background updater reads queued turns + git state → fresh `claude -p --model haiku` writes WORKING-MEMORY.md. On `/clear` or new session → SessionStart injects memory as `additionalContext` (system context, not user-visible) with staleness warning if >1h old. + +`devflow memory --disable` removes all four hooks. Use `devflow memory --clear` to clean up pending queue files (`.pending-turns.jsonl`, `.pending-turns.processing`) across all projects. Hooks auto-create `.memory/` on first run — no manual setup needed per project. diff --git a/scripts/hooks/background-memory-update b/scripts/hooks/background-memory-update index 22e6cc2..6c9643a 100755 --- a/scripts/hooks/background-memory-update +++ b/scripts/hooks/background-memory-update @@ -2,16 +2,21 @@ # Background Working Memory Updater # Called by stop-update-memory as a detached background process. -# Reads the last turn from the session transcript, then uses a fresh `claude -p` -# invocation to update .memory/WORKING-MEMORY.md. +# Reads queued turns from .memory/.pending-turns.jsonl, then uses a fresh +# `claude -p` invocation to update .memory/WORKING-MEMORY.md. # On failure: logs error, does nothing (stale memory is better than fake data). set -e CWD="$1" -SESSION_ID="$2" -MEMORY_FILE="$3" -CLAUDE_BIN="$4" +CLAUDE_BIN="$2" + +if [ ! -d "$CWD" ]; then + echo "background-memory-update: CWD does not exist: $CWD" >&2 + exit 1 +fi + +MEMORY_FILE="$CWD/.memory/WORKING-MEMORY.md" # Source JSON parsing helpers (jq with node fallback) SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" @@ -21,6 +26,9 @@ source "$SCRIPT_DIR/log-paths" LOG_FILE="$(devflow_log_dir "$CWD")/.working-memory-update.log" LOCK_DIR="$CWD/.memory/.working-memory.lock" +QUEUE_FILE="$CWD/.memory/.pending-turns.jsonl" +PROCESSING_FILE="$CWD/.memory/.pending-turns.processing" + # --- Logging --- log() { @@ -36,16 +44,9 @@ rotate_log() { # --- Stale Lock Recovery --- -# Portable mtime in epoch seconds -get_mtime() { - if stat --version &>/dev/null 2>&1; then - stat -c %Y "$1" - else - stat -f %m "$1" - fi -} +source "$SCRIPT_DIR/get-mtime" -STALE_THRESHOLD=300 # 5 min — generous vs 30-60s normal runtime +STALE_THRESHOLD=300 # 5 min break_stale_lock() { if [ ! -d "$LOCK_DIR" ]; then return; fi @@ -59,7 +60,7 @@ break_stale_lock() { fi } -# --- Locking (mkdir-based, POSIX-atomic) --- +# --- Locking --- acquire_lock() { local timeout=90 @@ -79,74 +80,141 @@ cleanup() { } trap cleanup EXIT -# --- Transcript Extraction --- +# --- Main --- -extract_last_turn() { - # Compute transcript path: Claude Code stores transcripts at - # ~/.claude/projects/{cwd-with-slashes-replaced-by-hyphens}/{session_id}.jsonl - local encoded_cwd - encoded_cwd=$(echo "$CWD" | sed 's|^/||' | tr '/' '-') - local transcript="$HOME/.claude/projects/-${encoded_cwd}/${SESSION_ID}.jsonl" +log "Starting queue-based update" - if [ ! -f "$transcript" ]; then - log "Transcript not found at $transcript" - return 1 - fi +break_stale_lock + +if ! acquire_lock; then + log "Lock timeout after 90s — skipping update" + trap - EXIT + exit 0 +fi + +rotate_log - # Extract last user and assistant text from JSONL - # Each line is a JSON object with "type" field - local last_user last_assistant - - last_user=$(grep '"type":"user"' "$transcript" 2>/dev/null \ - | tail -3 \ - | while IFS= read -r line; do printf '%s\n' "$line" | head -c 100000 | json_extract_messages; done \ - | awk 'NF' \ - | tail -1) - - last_assistant=$(grep '"type":"assistant"' "$transcript" 2>/dev/null \ - | tail -3 \ - | while IFS= read -r line; do printf '%s\n' "$line" | head -c 100000 | json_extract_messages; done \ - | awk 'NF' \ - | tail -1) - - # Truncate to ~4000 chars total to keep token cost low - if [ ${#last_user} -gt 2000 ]; then - last_user="${last_user:0:2000}... [truncated]" +# --- Crash recovery: if .processing exists from a failed previous run --- +if [ -f "$PROCESSING_FILE" ]; then + log "Found leftover .processing file from previous crash — recovering" + if [ -f "$QUEUE_FILE" ]; then + # Append new queue entries to the leftover processing file + cat "$QUEUE_FILE" >> "$PROCESSING_FILE" + rm "$QUEUE_FILE" + log "Merged new queue entries into recovery batch" fi - if [ ${#last_assistant} -gt 2000 ]; then - last_assistant="${last_assistant:0:2000}... [truncated]" + # Cap processing file to prevent unbounded growth from persistent Write failures + # NOTE: same 200/100 threshold in stop-update-memory + PROC_LINES=$(wc -l < "$PROCESSING_FILE" | tr -d ' ') + if [ "$PROC_LINES" -gt 200 ]; then + tail -100 "$PROCESSING_FILE" > "$PROCESSING_FILE.tmp" && mv "$PROCESSING_FILE.tmp" "$PROCESSING_FILE" + log "Processing file overflow: truncated from $PROC_LINES to 100 lines" fi - - if [ -z "$last_user" ] && [ -z "$last_assistant" ]; then - log "No text content found in transcript" - return 1 +else + # Normal path: atomic handoff + if [ ! -f "$QUEUE_FILE" ]; then + log "No pending turns in queue — skipping" + exit 0 fi + mv "$QUEUE_FILE" "$PROCESSING_FILE" +fi - LAST_USER_TEXT="$last_user" - LAST_ASSISTANT_TEXT="$last_assistant" - return 0 -} +# Count entries +TOTAL_LINES=$(wc -l < "$PROCESSING_FILE" | tr -d ' ') +log "Processing $TOTAL_LINES queued entries" -# --- Main --- +if [ "$TOTAL_LINES" -eq 0 ]; then + rm -f "$PROCESSING_FILE" + log "Processing file empty — skipping" + exit 0 +fi -# Wait for parent session to flush transcript -sleep 3 +# --- Build turns from queue (cap at 10 most recent turns) --- +# A "turn" is a user+assistant pair. Read entries in order, pair adjacent user→assistant. -log "Starting update for session $SESSION_ID" +TURNS_TEXT="" +TURN_COUNT=0 +MAX_TURNS=10 -# Break stale locks from previous zombie processes -break_stale_lock +# Take last entries if too many (each turn = 2 lines, so keep last MAX_TURNS*2 lines) +MAX_LINES=$((MAX_TURNS * 2)) +if [ "$TOTAL_LINES" -gt "$MAX_LINES" ]; then + ENTRIES=$(tail -"$MAX_LINES" "$PROCESSING_FILE") + log "Capped to last $MAX_LINES entries (from $TOTAL_LINES)" +else + ENTRIES=$(cat "$PROCESSING_FILE") +fi -# Acquire lock (other sessions may be updating concurrently) -if ! acquire_lock; then - log "Lock timeout after 90s — skipping update for session $SESSION_ID" - trap - EXIT +# Single-pass extraction: extract role+content as TSV in one jq/node invocation. +# Newlines within content are collapsed to a space to preserve TSV line integrity. +if [ "$_HAS_JQ" = "true" ]; then + EXTRACTED=$(jq -r '(.role // "") + "\t" + ((.content // "") | gsub("\n"; " "))' <<< "$ENTRIES" 2>/dev/null) +else + EXTRACTED=$(node -e ' + const lines = require("fs").readFileSync("/dev/stdin","utf8").split("\n"); + for (const line of lines) { + if (!line.trim()) continue; + try { + const obj = JSON.parse(line); + const role = obj.role || ""; + const content = (obj.content || "").replace(/\n/g, " "); + process.stdout.write(role + "\t" + content + "\n"); + } catch {} + } + ' <<< "$ENTRIES" 2>/dev/null) +fi + +CURRENT_USER="" +while IFS=$'\t' read -r ROLE CONTENT; do + [ -z "$ROLE" ] && continue + + if [ "$ROLE" = "user" ]; then + # If we had an orphan user (no assistant followed), emit it solo + if [ -n "$CURRENT_USER" ]; then + TURN_COUNT=$((TURN_COUNT + 1)) + TURNS_TEXT="${TURNS_TEXT} +Turn ${TURN_COUNT}: +User: ${CURRENT_USER} +" + fi + CURRENT_USER="$CONTENT" + elif [ "$ROLE" = "assistant" ]; then + TURN_COUNT=$((TURN_COUNT + 1)) + if [ -n "$CURRENT_USER" ]; then + TURNS_TEXT="${TURNS_TEXT} +Turn ${TURN_COUNT}: +User: ${CURRENT_USER} +Assistant: ${CONTENT} +" + CURRENT_USER="" + else + # Orphan assistant (no preceding user) + TURNS_TEXT="${TURNS_TEXT} +Turn ${TURN_COUNT}: +Assistant: ${CONTENT} +" + fi + fi +done <<< "$EXTRACTED" + +# Flush any trailing orphan user +if [ -n "$CURRENT_USER" ]; then + TURN_COUNT=$((TURN_COUNT + 1)) + TURNS_TEXT="${TURNS_TEXT} +Turn ${TURN_COUNT}: +User: ${CURRENT_USER} +" +fi + +if [ "$TURN_COUNT" -eq 0 ]; then + rm -f "$PROCESSING_FILE" + log "No parseable turns in queue — skipping" exit 0 fi -rotate_log +log "Built $TURN_COUNT turns from queue" -# Read existing memory for merge context +# --- Read existing memory --- EXISTING_MEMORY="" PRE_UPDATE_MTIME=0 if [ -f "$MEMORY_FILE" ]; then @@ -154,7 +222,7 @@ if [ -f "$MEMORY_FILE" ]; then PRE_UPDATE_MTIME=$(get_mtime "$MEMORY_FILE") fi -# Gather git state (always available, used as fallback too) +# --- Gather git state --- GIT_STATE="" if cd "$CWD" 2>/dev/null && git rev-parse --git-dir >/dev/null 2>&1; then GIT_STATUS=$(git status --short 2>/dev/null | head -20) @@ -169,32 +237,14 @@ Diff summary: ${GIT_DIFF}" fi -# Extract last turn from transcript (or fall back to git-only) -LAST_USER_TEXT="" -LAST_ASSISTANT_TEXT="" -EXCHANGE_SECTION="" - -if extract_last_turn; then - log "--- Extracted user text (${#LAST_USER_TEXT} chars) ---" - log "$LAST_USER_TEXT" - log "--- Extracted assistant text (${#LAST_ASSISTANT_TEXT} chars) ---" - log "$LAST_ASSISTANT_TEXT" - log "--- End transcript extraction ---" - EXCHANGE_SECTION="Last exchange: -User: ${LAST_USER_TEXT} -Assistant: ${LAST_ASSISTANT_TEXT}" -else - log "Falling back to git-state-only context" - EXCHANGE_SECTION="(Session transcript not available — using git state only)" -fi - -# Build prompt for fresh claude -p invocation +# --- Build prompt --- PROMPT="You are a working memory updater. Your ONLY job is to update the file at ${MEMORY_FILE} using the Write tool. Do it immediately — do not ask questions or explain. Current working memory: ${EXISTING_MEMORY:-"(no existing content)"} -${EXCHANGE_SECTION} +Recent session turns: +${TURNS_TEXT} Git state: ${GIT_STATE:-"(not a git repo)"} @@ -212,7 +262,7 @@ log "--- Full prompt being passed to claude -p ---" log "$PROMPT" log "--- End prompt ---" -# Run fresh claude -p (no --resume, no conversation confusion) +# --- Run claude -p --- TIMEOUT=120 DEVFLOW_BG_UPDATER=1 "$CLAUDE_BIN" -p \ @@ -223,32 +273,33 @@ DEVFLOW_BG_UPDATER=1 "$CLAUDE_BIN" -p \ >> "$LOG_FILE" 2>&1 & CLAUDE_PID=$! -# Watchdog: kill claude if it exceeds timeout ( sleep "$TIMEOUT" && kill "$CLAUDE_PID" 2>/dev/null ) & WATCHDOG_PID=$! if wait "$CLAUDE_PID" 2>/dev/null; then - # Validate the file was actually modified (detect silent Write failures) if [ -f "$MEMORY_FILE" ]; then NEW_MTIME=$(get_mtime "$MEMORY_FILE") if [ "$NEW_MTIME" -gt "$PRE_UPDATE_MTIME" ]; then - log "Update completed for session $SESSION_ID" + log "Update completed successfully" else - log "Update finished but file was not modified for session $SESSION_ID (possible Write tool failure)" + log "Update finished but file was not modified (possible Write tool failure)" fi else - log "Update finished but file does not exist for session $SESSION_ID" + log "Update finished but file does not exist" fi + # Clean up on success (exit 0) even if Write was not invoked — + # retrying same turns won't help; new turns are captured in the queue + rm -f "$PROCESSING_FILE" else EXIT_CODE=$? if [ "$EXIT_CODE" -gt 128 ]; then - log "Update timed out (killed after ${TIMEOUT}s) for session $SESSION_ID" + log "Update timed out (killed after ${TIMEOUT}s)" else - log "Update failed for session $SESSION_ID (exit code $EXIT_CODE)" + log "Update failed (exit code $EXIT_CODE)" fi + # Leave .processing file for crash recovery on next run fi -# Clean up watchdog kill "$WATCHDOG_PID" 2>/dev/null || true wait "$WATCHDOG_PID" 2>/dev/null || true diff --git a/scripts/hooks/get-mtime b/scripts/hooks/get-mtime new file mode 100755 index 0000000..363f479 --- /dev/null +++ b/scripts/hooks/get-mtime @@ -0,0 +1,23 @@ +#!/bin/bash +# Portable file mtime extraction (BSD/GNU stat compatible) +# Usage: source "$SCRIPT_DIR/get-mtime" then mtime=$(get_mtime "/path/to/file") +# Returns: Unix epoch seconds of file's last modification time + +# Cache platform detection on first call to avoid repeated capability probes +_GET_MTIME_STAT_TYPE="" + +get_mtime() { + local file="$1" + if [ -z "$_GET_MTIME_STAT_TYPE" ]; then + if stat --version &>/dev/null 2>&1; then + _GET_MTIME_STAT_TYPE="gnu" + else + _GET_MTIME_STAT_TYPE="bsd" + fi + fi + if [ "$_GET_MTIME_STAT_TYPE" = "gnu" ]; then + stat -c %Y "$file" 2>/dev/null # GNU (Linux) + else + stat -f %m "$file" 2>/dev/null # BSD (macOS) + fi +} diff --git a/scripts/hooks/json-helper.cjs b/scripts/hooks/json-helper.cjs index 8da9f5e..3cc1612 100755 --- a/scripts/hooks/json-helper.cjs +++ b/scripts/hooks/json-helper.cjs @@ -14,6 +14,7 @@ // construct [--arg k v] Build JSON object with args // update-field [--json] Set field on stdin JSON (--json parses value) // update-fields Apply multiple field updates from stdin JSON +// extract-cwd-prompt Extract cwd + prompt fields, NUL-byte delimited // extract-text-messages Extract text content from Claude message format // merge-evidence Flatten, dedupe, limit to 10 from stdin JSON // slurp-sort [limit] Read JSONL, sort by field desc, limit results @@ -223,6 +224,18 @@ try { break; } + case 'extract-cwd-prompt': { + // Extract cwd and prompt from hook JSON in one pass. + // Outputs: cwd + ASCII SOH (0x01) + prompt (no trailing newline). + // Caller splits with: cut -d$'\001' -f1 and cut -d$'\001' -f2- + // SOH is used (not NUL) for bash 3.2 compatibility with cut. + const input = JSON.parse(readStdin()); + const cwd = input.cwd || ''; + const prompt = input.prompt || ''; + process.stdout.write(cwd + '\x01' + prompt); + break; + } + case 'extract-text-messages': { const input = JSON.parse(readStdin()); const content = input?.message?.content; diff --git a/scripts/hooks/json-parse b/scripts/hooks/json-parse index 2c109fe..cd5f80a 100755 --- a/scripts/hooks/json-parse +++ b/scripts/hooks/json-parse @@ -157,6 +157,24 @@ json_array_item() { fi } +# --- Multi-field batched extraction --- + +# Extract cwd and prompt from stdin JSON in a single subprocess. +# Uses ASCII SOH (U+0001) as delimiter — safe for prompts containing tabs or backslashes, +# and compatible with bash 3.2 parameter expansion (unlike NUL byte). +# Caller pattern (zero extra subprocesses for the split): +# FIELDS=$(printf '%s' "$INPUT" | json_extract_cwd_prompt) +# CWD="${FIELDS%%$'\001'*}" +# PROMPT="${FIELDS#*$'\001'}" +# This replaces the @tsv/@cut pattern, which corrupts tab chars in prompts to "\t". +json_extract_cwd_prompt() { + if [ "$_HAS_JQ" = "true" ]; then + jq -r '(.cwd // "") + "\u0001" + (.prompt // "")' 2>/dev/null + else + node "$_JSON_HELPER" extract-cwd-prompt + fi +} + # --- Transcript extraction --- # Extract text messages from Claude message JSON. Usage: printf '%s\n' '{"message":...}' | json_extract_messages diff --git a/scripts/hooks/pre-compact-memory b/scripts/hooks/pre-compact-memory index 08ef19e..84348c6 100644 --- a/scripts/hooks/pre-compact-memory +++ b/scripts/hooks/pre-compact-memory @@ -14,7 +14,7 @@ if [ "$_JSON_AVAILABLE" = "false" ]; then exit 0; fi INPUT=$(cat) -CWD=$(echo "$INPUT" | json_field "cwd" "") +CWD=$(printf '%s' "$INPUT" | json_field "cwd" "") if [ -z "$CWD" ]; then exit 0 fi diff --git a/scripts/hooks/preamble b/scripts/hooks/preamble index 1b58f7c..234c050 100755 --- a/scripts/hooks/preamble +++ b/scripts/hooks/preamble @@ -13,13 +13,14 @@ if [ "$_JSON_AVAILABLE" = "false" ]; then exit 0; fi INPUT=$(cat) -CWD=$(echo "$INPUT" | json_field "cwd" "") -if [ -z "$CWD" ]; then +FIELDS=$(printf '%s' "$INPUT" | json_extract_cwd_prompt) +CWD="${FIELDS%%$'\001'*}" +PROMPT="${FIELDS#*$'\001'}" + +if [ -z "$CWD" ] || [ ! -d "$CWD" ]; then exit 0 fi -PROMPT=$(echo "$INPUT" | json_field "prompt" "") - # Skip slash commands — they have their own orchestration if [[ "$PROMPT" == /* ]]; then exit 0 diff --git a/scripts/hooks/prompt-capture-memory b/scripts/hooks/prompt-capture-memory new file mode 100755 index 0000000..4b3d082 --- /dev/null +++ b/scripts/hooks/prompt-capture-memory @@ -0,0 +1,42 @@ +#!/bin/bash + +# Working Memory: Prompt Capture Hook (UserPromptSubmit) +# Captures user prompts to .memory/.pending-turns.jsonl queue. +# Registered/removed with memory hooks — does not run when memory disabled. + +set -e + +# Break feedback loop: background updater's haiku session triggers hooks +if [ "${DEVFLOW_BG_UPDATER:-}" = "1" ]; then exit 0; fi + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +source "$SCRIPT_DIR/json-parse" +if [ "$_JSON_AVAILABLE" = "false" ]; then exit 0; fi + +INPUT=$(cat) + +FIELDS=$(printf '%s' "$INPUT" | json_extract_cwd_prompt) +CWD="${FIELDS%%$'\001'*}" +PROMPT="${FIELDS#*$'\001'}" + +if [ -z "$CWD" ] || [ ! -d "$CWD" ]; then exit 0; fi + +source "$SCRIPT_DIR/ensure-memory-gitignore" "$CWD" || exit 0 + +if [ -z "$PROMPT" ]; then exit 0; fi + +# Truncate to 2000 chars +if [ ${#PROMPT} -gt 2000 ]; then + PROMPT="${PROMPT:0:2000}... [truncated]" +fi + +TS=$(date +%s) +if [ "$_HAS_JQ" = "true" ]; then + jq -n -c --arg role "user" --arg content "$PROMPT" --argjson ts "$TS" \ + '{role: $role, content: $content, ts: $ts}' >> "$CWD/.memory/.pending-turns.jsonl" +else + node -e "process.stdout.write(JSON.stringify({role:'user',content:process.argv[1],ts:parseInt(process.argv[2])})+'\n')" \ + -- "$PROMPT" "$TS" >> "$CWD/.memory/.pending-turns.jsonl" +fi + +exit 0 diff --git a/scripts/hooks/session-end-learning b/scripts/hooks/session-end-learning index 6846e05..315c853 100755 --- a/scripts/hooks/session-end-learning +++ b/scripts/hooks/session-end-learning @@ -24,7 +24,7 @@ if [ "$_JSON_AVAILABLE" = "false" ]; then exit 0; fi # Read hook input from stdin (Claude passes JSON with cwd, session_id, etc.) INPUT=$(cat) -CWD=$(echo "$INPUT" | json_field "cwd" "") +CWD=$(printf '%s' "$INPUT" | json_field "cwd" "") [ -z "$CWD" ] && exit 0 MEMORY_DIR="$CWD/.memory" @@ -67,7 +67,7 @@ if [ ! -d "$PROJECTS_DIR" ]; then fi # Extract session ID from hook JSON (preferred), fall back to most recent transcript -SESSION_ID=$(echo "$INPUT" | json_field "session_id" "") +SESSION_ID=$(printf '%s' "$INPUT" | json_field "session_id" "") if [ -n "$SESSION_ID" ] && [ -f "$PROJECTS_DIR/${SESSION_ID}.jsonl" ]; then TRANSCRIPT="$PROJECTS_DIR/${SESSION_ID}.jsonl" else diff --git a/scripts/hooks/session-start-classification b/scripts/hooks/session-start-classification index f498f60..aa13500 100755 --- a/scripts/hooks/session-start-classification +++ b/scripts/hooks/session-start-classification @@ -12,7 +12,7 @@ if [ "$_JSON_AVAILABLE" = "false" ]; then exit 0; fi INPUT=$(cat) -CWD=$(echo "$INPUT" | json_field "cwd" "") +CWD=$(printf '%s' "$INPUT" | json_field "cwd" "") if [ -z "$CWD" ]; then exit 0; fi CLASSIFICATION_RULES="$HOME/.claude/skills/devflow:router/references/classification-rules.md" diff --git a/scripts/hooks/session-start-memory b/scripts/hooks/session-start-memory index e790136..dd0c8a5 100644 --- a/scripts/hooks/session-start-memory +++ b/scripts/hooks/session-start-memory @@ -13,7 +13,7 @@ if [ "$_JSON_AVAILABLE" = "false" ]; then exit 0; fi INPUT=$(cat) -CWD=$(echo "$INPUT" | json_field "cwd" "") +CWD=$(printf '%s' "$INPUT" | json_field "cwd" "") if [ -z "$CWD" ]; then exit 0 fi @@ -28,11 +28,8 @@ if [ -f "$MEMORY_FILE" ]; then MEMORY_CONTENT=$(cat "$MEMORY_FILE") # Compute staleness warning - if stat --version &>/dev/null 2>&1; then - FILE_MTIME=$(stat -c %Y "$MEMORY_FILE") - else - FILE_MTIME=$(stat -f %m "$MEMORY_FILE") - fi + source "$SCRIPT_DIR/get-mtime" + FILE_MTIME=$(get_mtime "$MEMORY_FILE") NOW=$(date +%s) AGE=$(( NOW - FILE_MTIME )) diff --git a/scripts/hooks/stop-update-memory b/scripts/hooks/stop-update-memory index d8e695c..d5f13c3 100755 --- a/scripts/hooks/stop-update-memory +++ b/scripts/hooks/stop-update-memory @@ -1,88 +1,142 @@ #!/bin/bash # Working Memory: Stop Hook -# Spawns a background process to update .memory/WORKING-MEMORY.md asynchronously. -# The session ends immediately — no visible edit in the TUI. +# Captures assistant responses to .memory/.pending-turns.jsonl queue, +# then spawns background updater (throttled) to process accumulated turns. # On failure: does nothing (stale memory is better than fake data). set -e # Break feedback loop: background updater's headless session triggers stop hook on exit. -# DEVFLOW_BG_UPDATER is set by background-memory-update before invoking claude. if [ "${DEVFLOW_BG_UPDATER:-}" = "1" ]; then exit 0; fi -# Resolve script directory once (used for json-parse, ensure-memory-gitignore, and updater) +# Resolve script directory once SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" -# JSON parsing (jq with node fallback) — silently no-op if neither available +# JSON parsing (jq with node fallback) source "$SCRIPT_DIR/json-parse" if [ "$_JSON_AVAILABLE" = "false" ]; then exit 0; fi INPUT=$(cat) -# Resolve project directory — bail if missing -CWD=$(echo "$INPUT" | json_field "cwd" "") -if [ -z "$CWD" ]; then - exit 0 +# Resolve project directory and stop reason in one subprocess (consistent with +# batched extraction pattern used in UserPromptSubmit hooks). +# Uses ASCII SOH (0x01) as delimiter for bash 3.2 compatibility with cut. +if [ "$_HAS_JQ" = "true" ]; then + _FIELDS=$(printf '%s' "$INPUT" | jq -r '(.cwd // "") + "\u0001" + (.stop_reason // "")') +else + _FIELDS=$(printf '%s' "$INPUT" | node -e " + const j=JSON.parse(require('fs').readFileSync('/dev/stdin','utf8')); + process.stdout.write((j.cwd||'')+'\x01'+(j.stop_reason||''))") fi +CWD="${_FIELDS%%$'\001'*}" +STOP_REASON="${_FIELDS#*$'\001'}" + +if [ -z "$CWD" ] || [ ! -d "$CWD" ]; then exit 0; fi -# Auto-create .memory/ and ensure .gitignore entries (idempotent after first run) +# Auto-create .memory/ and ensure .gitignore entries source "$SCRIPT_DIR/ensure-memory-gitignore" "$CWD" || exit 0 -# Logging (shared log file with background updater; [stop-hook] prefix distinguishes) -MEMORY_FILE="$CWD/.memory/WORKING-MEMORY.md" +source "$SCRIPT_DIR/get-mtime" + +# Logging source "$SCRIPT_DIR/log-paths" LOG_FILE="$(devflow_log_dir "$CWD")/.working-memory-update.log" log() { echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] [stop-hook] $1" >> "$LOG_FILE"; } -# Throttle: skip if stop hook was triggered within the last 2 minutes -# Uses a marker file touched BEFORE spawning the updater — prevents race condition -# where multiple hooks see stale WORKING-MEMORY.md mtime and all bypass throttle. +# --- Filter: only capture end_turn stops --- +if [ "$STOP_REASON" != "end_turn" ]; then + exit 0 +fi + +# --- Extract assistant_message (handles both string and content array) --- +ASSISTANT_MSG="" +if [ "$_HAS_JQ" = "true" ]; then + ASSISTANT_MSG=$(printf '%s' "$INPUT" | jq -r ' + if (.assistant_message | type) == "string" then .assistant_message + elif (.assistant_message | type) == "array" then + [.assistant_message[] | select(.type == "text") | .text] | join("\n") + else "" end + ' 2>/dev/null || true) +else + # Node fallback: extract assistant_message, try as string first + ASSISTANT_MSG=$(printf '%s' "$INPUT" | node -e " + let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{ + try{const o=JSON.parse(d);const m=o.assistant_message; + if(typeof m==='string'){process.stdout.write(m)} + else if(Array.isArray(m)){process.stdout.write(m.filter(b=>b.type==='text').map(b=>b.text).join('\n'))} + }catch(e){} + })" 2>/dev/null || true) +fi + +# Skip if empty +if [ -z "$ASSISTANT_MSG" ]; then + log "Skipped: empty assistant_message" + exit 0 +fi + +# Truncate to 2000 chars +if [ ${#ASSISTANT_MSG} -gt 2000 ]; then + ASSISTANT_MSG="${ASSISTANT_MSG:0:2000}... [truncated]" +fi + +# --- Append to queue --- +QUEUE_FILE="$CWD/.memory/.pending-turns.jsonl" +TS=$(date +%s) +if [ "$_HAS_JQ" = "true" ]; then + jq -n -c --arg role "assistant" --arg content "$ASSISTANT_MSG" --argjson ts "$TS" \ + '{role: $role, content: $content, ts: $ts}' >> "$QUEUE_FILE" +else + node -e "process.stdout.write(JSON.stringify({role:'assistant',content:process.argv[1],ts:parseInt(process.argv[2])})+'\n')" \ + -- "$ASSISTANT_MSG" "$TS" >> "$QUEUE_FILE" +fi + +# Queue overflow safety: if >200 lines, keep last 100 +# NOTE: same 200/100 threshold in background-memory-update +if [ -f "$QUEUE_FILE" ]; then + LINE_COUNT=$(wc -l < "$QUEUE_FILE" | tr -d ' ') + if [ "$LINE_COUNT" -gt 200 ]; then + tail -100 "$QUEUE_FILE" > "$QUEUE_FILE.tmp" && mv "$QUEUE_FILE.tmp" "$QUEUE_FILE" + log "Queue overflow: truncated from $LINE_COUNT to 100 lines" + fi +fi + +log "Captured assistant turn (${#ASSISTANT_MSG} chars)" + +# --- Throttle: only spawn background updater every 2 minutes --- TRIGGER_MARKER="$CWD/.memory/.working-memory-last-trigger" if [ -f "$TRIGGER_MARKER" ]; then - if stat --version &>/dev/null 2>&1; then - MARKER_MTIME=$(stat -c %Y "$TRIGGER_MARKER") - else - MARKER_MTIME=$(stat -f %m "$TRIGGER_MARKER") - fi + MARKER_MTIME=$(get_mtime "$TRIGGER_MARKER") NOW=$(date +%s) AGE=$(( NOW - MARKER_MTIME )) if [ "$AGE" -lt 120 ]; then - log "Skipped: triggered ${AGE}s ago (throttled)" + log "Throttled: triggered ${AGE}s ago (capture saved, processing deferred)" exit 0 fi fi -# Resolve claude binary — if not found, skip (graceful degradation) +# Resolve claude binary CLAUDE_BIN=$(command -v claude 2>/dev/null || true) if [ -z "$CLAUDE_BIN" ]; then - log "Skipped: claude binary not found" - exit 0 -fi - -# Extract session ID from hook input -SESSION_ID=$(echo "$INPUT" | json_field "session_id" "") -if [ -z "$SESSION_ID" ]; then - log "Skipped: no session_id in hook input" + log "Skipped spawn: claude binary not found" exit 0 fi -# Resolve the background updater script (same directory as this hook) +# Resolve updater script UPDATER="$SCRIPT_DIR/background-memory-update" if [ ! -x "$UPDATER" ]; then - log "Skipped: updater not found/not executable at $UPDATER" + log "Skipped spawn: updater not found/not executable at $UPDATER" exit 0 fi -# Touch marker BEFORE spawning updater — prevents race with concurrent hooks +# Touch marker BEFORE spawning (prevents race with concurrent hooks) touch "$TRIGGER_MARKER" -# Spawn background updater — detached, no effect on session exit -nohup "$UPDATER" "$CWD" "$SESSION_ID" "$MEMORY_FILE" "$CLAUDE_BIN" \ +# Spawn background updater — 2 args (down from 4) +nohup "$UPDATER" "$CWD" "$CLAUDE_BIN" \ /dev/null 2>&1 & disown -log "Spawned background updater: session=$SESSION_ID cwd=$CWD memory=$MEMORY_FILE claude=$CLAUDE_BIN updater=$UPDATER" +log "Spawned background updater: cwd=$CWD claude=$CLAUDE_BIN" -# Allow stop immediately (no JSON output = proceed) exit 0 diff --git a/src/cli/commands/ambient.ts b/src/cli/commands/ambient.ts index 8550f9b..8d1a5ea 100644 --- a/src/cli/commands/ambient.ts +++ b/src/cli/commands/ambient.ts @@ -133,10 +133,10 @@ export function removeAmbientHook(settingsJson: string): string { } /** - * Check if the ambient hook (legacy or current) is registered in settings JSON. + * Check if the ambient hook (legacy or current) is registered in settings JSON or parsed Settings object. */ -export function hasAmbientHook(settingsJson: string): boolean { - const settings: Settings = JSON.parse(settingsJson); +export function hasAmbientHook(input: string | Settings): boolean { + const settings: Settings = typeof input === 'string' ? JSON.parse(input) : input; const hasPreamble = settings.hooks?.UserPromptSubmit?.some((matcher) => matcher.hooks.some((h) => diff --git a/src/cli/commands/learn.ts b/src/cli/commands/learn.ts index 6f81cba..ac27ba3 100644 --- a/src/cli/commands/learn.ts +++ b/src/cli/commands/learn.ts @@ -134,11 +134,11 @@ export function removeLearningHook(settingsJson: string): string { } /** - * Check if the learning hook is registered in settings JSON. + * Check if the learning hook is registered in settings JSON or parsed Settings object. * Returns 'current' for SessionEnd hook, 'legacy' for old Stop hook, or false if absent. */ -export function hasLearningHook(settingsJson: string): 'current' | 'legacy' | false { - const settings: Settings = JSON.parse(settingsJson); +export function hasLearningHook(input: string | Settings): 'current' | 'legacy' | false { + const settings: Settings = typeof input === 'string' ? JSON.parse(input) : input; const hasSessionEnd = settings.hooks?.SessionEnd?.some((matcher) => matcher.hooks.some((h) => h.command.includes(LEARNING_HOOK_MARKER)), @@ -364,16 +364,20 @@ export const learnCommand = new Command('learn') // --- --list --- if (options.list) { - let observations: LearningObservation[]; - let invalidCount: number; + let logExists = true; try { - const logContent = await fs.readFile(logPath, 'utf-8'); - ({ observations, invalidCount } = loadAndCountObservations(logContent)); + await fs.access(logPath); } catch { + logExists = false; + } + + if (!logExists) { p.log.info('No observations yet. Learning log not found.'); return; } + const { observations, invalidCount } = await readObservations(logPath); + if (observations.length === 0) { p.log.info('No observations recorded yet.'); return; diff --git a/src/cli/commands/memory.ts b/src/cli/commands/memory.ts index aa3c906..a877460 100644 --- a/src/cli/commands/memory.ts +++ b/src/cli/commands/memory.ts @@ -4,26 +4,28 @@ import * as path from 'path'; import * as p from '@clack/prompts'; import color from 'picocolors'; import { getClaudeDirectory, getDevFlowDirectory } from '../utils/paths.js'; -import { createMemoryDir, migrateMemoryFiles } from '../utils/post-install.js'; +import { createMemoryDir, migrateMemoryFiles, discoverProjectGitRoots } from '../utils/post-install.js'; +import { getGitRoot } from '../utils/git.js'; import type { HookMatcher, Settings } from '../utils/hooks.js'; /** - * Map of hook event type → filename marker for the 3 memory hooks. + * Map of hook event type → filename marker for the 4 memory hooks. */ const MEMORY_HOOK_CONFIG: Record = { + UserPromptSubmit: 'prompt-capture-memory', Stop: 'stop-update-memory', SessionStart: 'session-start-memory', PreCompact: 'pre-compact-memory', }; /** - * Add all 3 memory hooks (Stop, SessionStart, PreCompact) to settings JSON. - * Idempotent — skips hooks that already exist. Returns unchanged JSON if all 3 present. + * Add all 4 memory hooks (UserPromptSubmit, Stop, SessionStart, PreCompact) to settings JSON. + * Idempotent — skips hooks that already exist. Returns unchanged JSON if all 4 present. */ export function addMemoryHooks(settingsJson: string, devflowDir: string): string { const settings: Settings = JSON.parse(settingsJson); - if (hasMemoryHooks(settingsJson)) { + if (hasMemoryHooks(settings)) { return settingsJson; } @@ -31,8 +33,6 @@ export function addMemoryHooks(settingsJson: string, devflowDir: string): string settings.hooks = {}; } - let changed = false; - for (const [hookType, marker] of Object.entries(MEMORY_HOOK_CONFIG)) { const existing = settings.hooks[hookType] ?? []; const alreadyPresent = existing.some((matcher) => @@ -56,24 +56,21 @@ export function addMemoryHooks(settingsJson: string, devflowDir: string): string } settings.hooks[hookType].push(newEntry); - changed = true; } } - if (!changed) { - return settingsJson; - } - return JSON.stringify(settings, null, 2) + '\n'; } /** - * Remove all memory hooks (Stop, SessionStart, PreCompact) from settings JSON. + * Remove all memory hooks (UserPromptSubmit, Stop, SessionStart, PreCompact) from settings JSON. + * Accepts either a JSON string or a parsed Settings object (consistent with hasMemoryHooks/countMemoryHooks). * Idempotent — returns unchanged JSON if no memory hooks present. * Preserves non-memory hooks. Cleans empty arrays/objects. */ -export function removeMemoryHooks(settingsJson: string): string { - const settings: Settings = JSON.parse(settingsJson); +export function removeMemoryHooks(input: string | Settings): string { + const settingsJson = typeof input === 'string' ? input : JSON.stringify(input); + const settings: Settings = typeof input === 'string' ? JSON.parse(input) : structuredClone(input); if (!settings.hooks) { return settingsJson; @@ -112,17 +109,18 @@ export function removeMemoryHooks(settingsJson: string): string { } /** - * Check if ALL 3 memory hooks are registered in settings JSON. + * Check if ALL 4 memory hooks are registered in settings JSON or parsed Settings object. */ -export function hasMemoryHooks(settingsJson: string): boolean { - return countMemoryHooks(settingsJson) === 3; +export function hasMemoryHooks(input: string | Settings): boolean { + return countMemoryHooks(input) === Object.keys(MEMORY_HOOK_CONFIG).length; } /** - * Count how many of the 3 memory hooks are present (0-3). + * Count how many of the 4 memory hooks are present (0-4). + * Accepts either a JSON string or a parsed Settings object. */ -export function countMemoryHooks(settingsJson: string): number { - const settings: Settings = JSON.parse(settingsJson); +export function countMemoryHooks(input: string | Settings): number { + const settings: Settings = typeof input === 'string' ? JSON.parse(input) : input; if (!settings.hooks) { return 0; @@ -144,27 +142,143 @@ interface MemoryOptions { enable?: boolean; disable?: boolean; status?: boolean; + clear?: boolean; +} + +/** + * Returns true if the given project root contains a `.memory/` directory. + * Treats unexpected errors (e.g. EACCES) as absent to avoid false positives. + */ +export async function hasMemoryDir(root: string): Promise { + try { + await fs.access(path.join(root, '.memory')); + return true; + } catch (err: unknown) { + const code = (err as NodeJS.ErrnoException).code; + if (code === 'ENOENT' || code === 'ENOTDIR') { + return false; + } + // Unexpected error (e.g. EACCES) — log and treat as absent to avoid false positives + console.warn(`[memory] Unexpected error checking .memory/ in ${root}: ${(err as Error).message}`); + return false; + } +} + +/** + * Filters the provided git root paths to those that contain a `.memory/` directory. + */ +export async function filterProjectsWithMemory(gitRoots: string[]): Promise { + const checks = await Promise.all(gitRoots.map(async (root) => ({ root, has: await hasMemoryDir(root) }))); + return checks.filter((c) => c.has).map((c) => c.root); +} + +/** + * Clean up memory queue files from the given project paths. + * Skips projects where the background updater lock is held to avoid data loss. + * Returns the count of projects from which at least one file was removed. + */ +export async function cleanQueueFiles(projectPaths: string[]): Promise<{ cleaned: number; projects: string[] }> { + const results = await Promise.all( + projectPaths.map(async (project) => { + const memDir = path.join(project, '.memory'); + const lockDir = path.join(memDir, '.working-memory.lock'); + try { + await fs.access(lockDir); + // Lock directory exists — background updater is active; skip to avoid data loss + return null; + } catch { + // No lock — safe to proceed + } + const [q, pr] = await Promise.all([ + fs.unlink(path.join(memDir, '.pending-turns.jsonl')).then(() => true).catch(() => false), + fs.unlink(path.join(memDir, '.pending-turns.processing')).then(() => true).catch(() => false), + ]); + return (q || pr) ? project : null; + }), + ); + const cleanedProjects = results.filter((p): p is string => p !== null); + return { cleaned: cleanedProjects.length, projects: cleanedProjects }; } export const memoryCommand = new Command('memory') - .description('Enable or disable working memory (session context preservation)') - .option('--enable', 'Add Stop/SessionStart/PreCompact hooks') + .description('Enable, disable, or clean up working memory (session context preservation)') + .option('--enable', 'Add UserPromptSubmit/Stop/SessionStart/PreCompact hooks') .option('--disable', 'Remove memory hooks') .option('--status', 'Show current state') + .option('--clear', 'Clean up queue files from projects') .action(async (options: MemoryOptions) => { - const hasFlag = options.enable || options.disable || options.status; + const hasFlag = options.enable || options.disable || options.status || options.clear; if (!hasFlag) { p.intro(color.bgCyan(color.white(' Working Memory '))); p.note( `${color.cyan('devflow memory --enable')} Add memory hooks\n` + `${color.cyan('devflow memory --disable')} Remove memory hooks\n` + - `${color.cyan('devflow memory --status')} Check current state`, + `${color.cyan('devflow memory --status')} Check current state\n` + + `${color.cyan('devflow memory --clear')} Clean up queue files`, 'Usage', ); p.outro(color.dim('Memory hooks provide automatic session context preservation')); return; } + if (options.clear) { + p.intro(color.bgCyan(color.white(' Memory Cleanup '))); + + // Discover current project and all known projects in parallel + const [gitRoots, gitRoot] = await Promise.all([discoverProjectGitRoots(), getGitRoot()]); + const [projectsWithMemory, currentProjectHasMem] = await Promise.all([ + filterProjectsWithMemory(gitRoots), + gitRoot ? hasMemoryDir(gitRoot) : Promise.resolve(false), + ]); + + const currentProject = gitRoot && currentProjectHasMem ? gitRoot : null; + + // Add current project if not already in list + const allProjects = currentProject && !projectsWithMemory.includes(currentProject) + ? [currentProject, ...projectsWithMemory] + : projectsWithMemory; + + if (allProjects.length === 0) { + p.log.info('No projects with .memory/ found'); + return; + } + + let targets: string[]; + if (!process.stdin.isTTY) { + // Non-interactive: clean all projects without prompting + p.log.info('Non-interactive mode detected, cleaning all projects'); + targets = allProjects; + } else { + const scope = await p.select({ + message: 'Clean up queue files from:', + options: [ + ...(currentProject ? [{ value: 'local' as const, label: `Current project (${currentProject})` }] : []), + { + value: 'all' as const, + label: `All projects (${allProjects.length} found)`, + hint: allProjects.map(proj => path.basename(proj)).join(', '), + }, + ], + }); + + if (p.isCancel(scope)) { + p.cancel('Cancelled'); + return; + } + + targets = scope === 'local' && currentProject ? [currentProject] : allProjects; + } + + const { cleaned, projects: cleanedProjects } = await cleanQueueFiles(targets); + for (const project of cleanedProjects) { + p.log.info(color.dim(`Cleaned: ${project}`)); + } + p.log.success(cleaned > 0 + ? `Cleaned queue files from ${cleaned} project${cleaned > 1 ? 's' : ''}` + : 'No queue files found to clean'); + return; + } + const claudeDir = getClaudeDirectory(); const settingsPath = path.join(claudeDir, 'settings.json'); @@ -182,12 +296,13 @@ export const memoryCommand = new Command('memory') if (options.status) { const count = countMemoryHooks(settingsContent); - if (count === 3) { - p.log.info(`Working memory: ${color.green('enabled')} (3/3 hooks)`); + const total = Object.keys(MEMORY_HOOK_CONFIG).length; + if (count === total) { + p.log.info(`Working memory: ${color.green('enabled')} (${total}/${total} hooks)`); } else if (count === 0) { p.log.info(`Working memory: ${color.dim('disabled')}`); } else { - p.log.info(`Working memory: ${color.yellow(`partial (${count}/3 hooks)`)} — run --enable to fix`); + p.log.info(`Working memory: ${color.yellow(`partial (${count}/${total} hooks)`)} — run --enable to fix`); } return; } @@ -195,25 +310,26 @@ export const memoryCommand = new Command('memory') const devflowDir = getDevFlowDirectory(); if (options.enable) { - const updated = addMemoryHooks(settingsContent, devflowDir); - if (updated === settingsContent) { + if (hasMemoryHooks(settingsContent)) { p.log.info('Working memory already enabled'); return; } + const updated = addMemoryHooks(settingsContent, devflowDir); await fs.writeFile(settingsPath, updated, 'utf-8'); await createMemoryDir(false); await migrateMemoryFiles(true); - p.log.success('Working memory enabled — Stop/SessionStart/PreCompact hooks registered'); + p.log.success('Working memory enabled — UserPromptSubmit/Stop/SessionStart/PreCompact hooks registered'); p.log.info(color.dim('Session context will be automatically preserved across conversations')); } if (options.disable) { - const updated = removeMemoryHooks(settingsContent); - if (updated === settingsContent) { + if (countMemoryHooks(settingsContent) === 0) { p.log.info('Working memory already disabled'); return; } + const updated = removeMemoryHooks(settingsContent); await fs.writeFile(settingsPath, updated, 'utf-8'); p.log.success('Working memory disabled — hooks removed'); + p.log.info(color.dim('Run devflow memory --clear to clean up queue files')); } }); diff --git a/tests/memory.test.ts b/tests/memory.test.ts index b4690ef..808e333 100644 --- a/tests/memory.test.ts +++ b/tests/memory.test.ts @@ -3,23 +3,25 @@ import { promises as fs } from 'fs'; import * as path from 'path'; import * as os from 'os'; import { exec } from 'child_process'; -import { addMemoryHooks, removeMemoryHooks, hasMemoryHooks, countMemoryHooks } from '../src/cli/commands/memory.js'; +import { addMemoryHooks, removeMemoryHooks, hasMemoryHooks, countMemoryHooks, cleanQueueFiles, hasMemoryDir, filterProjectsWithMemory } from '../src/cli/commands/memory.js'; import { createMemoryDir, migrateMemoryFiles } from '../src/cli/utils/post-install.js'; describe('addMemoryHooks', () => { - it('adds all 3 hook types to empty settings', () => { + it('adds all 4 hook types to empty settings', () => { const result = addMemoryHooks('{}', '/home/user/.devflow'); const settings = JSON.parse(result); + expect(settings.hooks.UserPromptSubmit).toHaveLength(1); expect(settings.hooks.Stop).toHaveLength(1); expect(settings.hooks.SessionStart).toHaveLength(1); expect(settings.hooks.PreCompact).toHaveLength(1); + expect(settings.hooks.UserPromptSubmit[0].hooks[0].command).toContain('prompt-capture-memory'); expect(settings.hooks.Stop[0].hooks[0].command).toContain('stop-update-memory'); expect(settings.hooks.SessionStart[0].hooks[0].command).toContain('session-start-memory'); expect(settings.hooks.PreCompact[0].hooks[0].command).toContain('pre-compact-memory'); }); - it('preserves existing hooks (UserPromptSubmit/ambient untouched)', () => { + it('preserves existing ambient preamble hook when adding memory hooks', () => { const input = JSON.stringify({ hooks: { UserPromptSubmit: [{ hooks: [{ type: 'command', command: 'preamble' }] }], @@ -28,8 +30,10 @@ describe('addMemoryHooks', () => { const result = addMemoryHooks(input, '/home/user/.devflow'); const settings = JSON.parse(result); - expect(settings.hooks.UserPromptSubmit).toHaveLength(1); + // Ambient preamble preserved alongside prompt-capture-memory + expect(settings.hooks.UserPromptSubmit).toHaveLength(2); expect(settings.hooks.UserPromptSubmit[0].hooks[0].command).toBe('preamble'); + expect(settings.hooks.UserPromptSubmit[1].hooks[0].command).toContain('prompt-capture-memory'); expect(settings.hooks.Stop).toHaveLength(1); expect(settings.hooks.SessionStart).toHaveLength(1); expect(settings.hooks.PreCompact).toHaveLength(1); @@ -45,6 +49,7 @@ describe('addMemoryHooks', () => { it('adds only missing hooks when partial state (1 hook missing)', () => { const input = JSON.stringify({ hooks: { + UserPromptSubmit: [{ hooks: [{ type: 'command', command: '/path/prompt-capture-memory', timeout: 10 }] }], Stop: [{ hooks: [{ type: 'command', command: '/path/stop-update-memory', timeout: 10 }] }], SessionStart: [{ hooks: [{ type: 'command', command: '/path/session-start-memory', timeout: 10 }] }], }, @@ -53,6 +58,7 @@ describe('addMemoryHooks', () => { const settings = JSON.parse(result); // Existing hooks preserved + expect(settings.hooks.UserPromptSubmit).toHaveLength(1); expect(settings.hooks.Stop).toHaveLength(1); expect(settings.hooks.SessionStart).toHaveLength(1); // Missing hook added @@ -60,6 +66,30 @@ describe('addMemoryHooks', () => { expect(settings.hooks.PreCompact[0].hooks[0].command).toContain('pre-compact-memory'); }); + it('adds UserPromptSubmit prompt-capture-memory alongside existing preamble (upgrade path)', () => { + // Simulate a 3-hook install (pre-upgrade) that already has ambient preamble + const input = JSON.stringify({ + hooks: { + UserPromptSubmit: [{ hooks: [{ type: 'command', command: '/path/preamble' }] }], + Stop: [{ hooks: [{ type: 'command', command: '/path/stop-update-memory', timeout: 10 }] }], + SessionStart: [{ hooks: [{ type: 'command', command: '/path/session-start-memory', timeout: 10 }] }], + PreCompact: [{ hooks: [{ type: 'command', command: '/path/pre-compact-memory', timeout: 10 }] }], + }, + }); + const result = addMemoryHooks(input, '/home/user/.devflow'); + const settings = JSON.parse(result); + + // prompt-capture-memory added; preamble kept + expect(settings.hooks.UserPromptSubmit).toHaveLength(2); + const commands = settings.hooks.UserPromptSubmit.map((m: { hooks: { command: string }[] }) => m.hooks[0].command); + expect(commands.some((c: string) => c.includes('preamble'))).toBe(true); + expect(commands.some((c: string) => c.includes('prompt-capture-memory'))).toBe(true); + // Other hooks unchanged + expect(settings.hooks.Stop).toHaveLength(1); + expect(settings.hooks.SessionStart).toHaveLength(1); + expect(settings.hooks.PreCompact).toHaveLength(1); + }); + it('creates hooks object if missing', () => { const input = JSON.stringify({ statusLine: { type: 'command' } }); const result = addMemoryHooks(input, '/home/user/.devflow'); @@ -73,6 +103,8 @@ describe('addMemoryHooks', () => { const result = addMemoryHooks('{}', '/custom/path/.devflow'); const settings = JSON.parse(result); + expect(settings.hooks.UserPromptSubmit[0].hooks[0].command).toContain('/custom/path/.devflow/scripts/hooks/run-hook'); + expect(settings.hooks.UserPromptSubmit[0].hooks[0].command).toContain('prompt-capture-memory'); expect(settings.hooks.Stop[0].hooks[0].command).toContain('/custom/path/.devflow/scripts/hooks/run-hook'); expect(settings.hooks.Stop[0].hooks[0].command).toContain('stop-update-memory'); expect(settings.hooks.SessionStart[0].hooks[0].command).toContain('run-hook'); @@ -98,6 +130,7 @@ describe('addMemoryHooks', () => { const result = addMemoryHooks('{}', '/home/user/.devflow'); const settings = JSON.parse(result); + expect(settings.hooks.UserPromptSubmit[0].hooks[0].timeout).toBe(10); expect(settings.hooks.Stop[0].hooks[0].timeout).toBe(10); expect(settings.hooks.SessionStart[0].hooks[0].timeout).toBe(10); expect(settings.hooks.PreCompact[0].hooks[0].timeout).toBe(10); @@ -105,7 +138,7 @@ describe('addMemoryHooks', () => { }); describe('removeMemoryHooks', () => { - it('removes all 3 hook types', () => { + it('removes all 4 hook types', () => { const withHooks = addMemoryHooks('{}', '/home/user/.devflow'); const result = removeMemoryHooks(withHooks); const settings = JSON.parse(result); @@ -113,10 +146,13 @@ describe('removeMemoryHooks', () => { expect(settings.hooks).toBeUndefined(); }); - it('preserves other hooks (UserPromptSubmit)', () => { + it('preserves ambient preamble when removing memory hooks (preamble != prompt-capture-memory)', () => { const input = JSON.stringify({ hooks: { - UserPromptSubmit: [{ hooks: [{ type: 'command', command: 'preamble' }] }], + UserPromptSubmit: [ + { hooks: [{ type: 'command', command: 'preamble' }] }, + { hooks: [{ type: 'command', command: '/path/prompt-capture-memory' }] }, + ], Stop: [{ hooks: [{ type: 'command', command: '/path/stop-update-memory' }] }], SessionStart: [{ hooks: [{ type: 'command', command: '/path/session-start-memory' }] }], PreCompact: [{ hooks: [{ type: 'command', command: '/path/pre-compact-memory' }] }], @@ -125,7 +161,9 @@ describe('removeMemoryHooks', () => { const result = removeMemoryHooks(input); const settings = JSON.parse(result); + // Ambient preamble preserved; prompt-capture-memory removed expect(settings.hooks.UserPromptSubmit).toHaveLength(1); + expect(settings.hooks.UserPromptSubmit[0].hooks[0].command).toBe('preamble'); expect(settings.hooks.Stop).toBeUndefined(); expect(settings.hooks.SessionStart).toBeUndefined(); expect(settings.hooks.PreCompact).toBeUndefined(); @@ -164,7 +202,7 @@ describe('removeMemoryHooks', () => { const input = JSON.stringify({ hooks: { Stop: [{ hooks: [{ type: 'command', command: '/path/stop-update-memory' }] }], - // SessionStart and PreCompact already missing + // UserPromptSubmit, SessionStart, PreCompact already missing }, }); const result = removeMemoryHooks(input); @@ -177,6 +215,7 @@ describe('removeMemoryHooks', () => { const input = JSON.stringify({ statusLine: { type: 'command' }, hooks: { + UserPromptSubmit: [{ hooks: [{ type: 'command', command: '/path/prompt-capture-memory' }] }], Stop: [{ hooks: [{ type: 'command', command: '/path/stop-update-memory' }] }], SessionStart: [{ hooks: [{ type: 'command', command: '/path/session-start-memory' }] }], PreCompact: [{ hooks: [{ type: 'command', command: '/path/pre-compact-memory' }] }], @@ -187,10 +226,22 @@ describe('removeMemoryHooks', () => { expect(settings.statusLine).toEqual({ type: 'command' }); }); + + it('toggle cycle: enable → disable → enable produces clean state', () => { + const enabled = addMemoryHooks('{}', '/home/user/.devflow'); + const disabled = removeMemoryHooks(enabled); + const reEnabled = addMemoryHooks(disabled, '/home/user/.devflow'); + const settings = JSON.parse(reEnabled); + + expect(settings.hooks.UserPromptSubmit).toHaveLength(1); + expect(settings.hooks.Stop).toHaveLength(1); + expect(settings.hooks.SessionStart).toHaveLength(1); + expect(settings.hooks.PreCompact).toHaveLength(1); + }); }); describe('hasMemoryHooks', () => { - it('returns true when all 3 present', () => { + it('returns true when all 4 present', () => { const withHooks = addMemoryHooks('{}', '/home/user/.devflow'); expect(hasMemoryHooks(withHooks)).toBe(true); }); @@ -199,16 +250,27 @@ describe('hasMemoryHooks', () => { expect(hasMemoryHooks('{}')).toBe(false); }); - it('returns false when partial (1 or 2 of 3)', () => { + it('returns false when partial (1 of 4)', () => { + const input = JSON.stringify({ + hooks: { + Stop: [{ hooks: [{ type: 'command', command: '/path/stop-update-memory' }] }], + }, + }); + expect(hasMemoryHooks(input)).toBe(false); + }); + + it('returns false when partial (3 of 4 — old install missing UserPromptSubmit)', () => { const input = JSON.stringify({ hooks: { Stop: [{ hooks: [{ type: 'command', command: '/path/stop-update-memory' }] }], + SessionStart: [{ hooks: [{ type: 'command', command: '/path/session-start-memory' }] }], + PreCompact: [{ hooks: [{ type: 'command', command: '/path/pre-compact-memory' }] }], }, }); expect(hasMemoryHooks(input)).toBe(false); }); - it('returns false for non-memory hooks only', () => { + it('returns false for ambient preamble only (not a memory hook)', () => { const input = JSON.stringify({ hooks: { UserPromptSubmit: [{ hooks: [{ type: 'command', command: 'preamble' }] }], @@ -219,16 +281,16 @@ describe('hasMemoryHooks', () => { }); describe('countMemoryHooks', () => { - it('returns 3 when all present', () => { + it('returns 4 when all present', () => { const withHooks = addMemoryHooks('{}', '/home/user/.devflow'); - expect(countMemoryHooks(withHooks)).toBe(3); + expect(countMemoryHooks(withHooks)).toBe(4); }); it('returns 0 when none present', () => { expect(countMemoryHooks('{}')).toBe(0); }); - it('returns correct partial count', () => { + it('returns correct partial count (2 of 4)', () => { const input = JSON.stringify({ hooks: { Stop: [{ hooks: [{ type: 'command', command: '/path/stop-update-memory' }] }], @@ -237,6 +299,19 @@ describe('countMemoryHooks', () => { }); expect(countMemoryHooks(input)).toBe(2); }); + + it('does not count ambient preamble as prompt-capture-memory', () => { + const input = JSON.stringify({ + hooks: { + UserPromptSubmit: [{ hooks: [{ type: 'command', command: '/path/preamble' }] }], + Stop: [{ hooks: [{ type: 'command', command: '/path/stop-update-memory' }] }], + SessionStart: [{ hooks: [{ type: 'command', command: '/path/session-start-memory' }] }], + PreCompact: [{ hooks: [{ type: 'command', command: '/path/pre-compact-memory' }] }], + }, + }); + // preamble does not match 'prompt-capture-memory' marker + expect(countMemoryHooks(input)).toBe(3); + }); }); describe('createMemoryDir', () => { @@ -408,145 +483,225 @@ describe('migrateMemoryFiles', () => { }); }); -describe('knowledge file format', () => { +describe('countMemoryHooks accepts parsed Settings', () => { + it('accepts a parsed Settings object (not just JSON string)', () => { + const settings = { + hooks: { + UserPromptSubmit: [{ hooks: [{ type: 'command' as const, command: '/path/prompt-capture-memory', timeout: 10 }] }], + Stop: [{ hooks: [{ type: 'command' as const, command: '/path/stop-update-memory', timeout: 10 }] }], + SessionStart: [{ hooks: [{ type: 'command' as const, command: '/path/session-start-memory', timeout: 10 }] }], + PreCompact: [{ hooks: [{ type: 'command' as const, command: '/path/pre-compact-memory', timeout: 10 }] }], + }, + }; + expect(countMemoryHooks(settings)).toBe(4); + expect(hasMemoryHooks(settings)).toBe(true); + }); + + it('accepts parsed Settings with no hooks', () => { + const settings = {}; + expect(countMemoryHooks(settings)).toBe(0); + expect(hasMemoryHooks(settings)).toBe(false); + }); + + it('accepts parsed Settings with partial hooks', () => { + const settings = { + hooks: { + Stop: [{ hooks: [{ type: 'command' as const, command: '/path/stop-update-memory', timeout: 10 }] }], + SessionStart: [{ hooks: [{ type: 'command' as const, command: '/path/session-start-memory', timeout: 10 }] }], + }, + }; + expect(countMemoryHooks(settings)).toBe(2); + expect(hasMemoryHooks(settings)).toBe(false); + }); +}); + +describe('hasMemoryDir', () => { let tmpDir: string; beforeEach(async () => { - tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'devflow-test-')); - await fs.mkdir(path.join(tmpDir, '.memory', 'knowledge'), { recursive: true }); + tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'devflow-hasMemoryDir-')); }); afterEach(async () => { await fs.rm(tmpDir, { recursive: true, force: true }); }); - it('parses TL;DR from decisions.md comment header', async () => { - const content = '\n# Architectural Decisions'; - await fs.writeFile(path.join(tmpDir, '.memory', 'knowledge', 'decisions.md'), content); + it('returns true when .memory/ directory exists', async () => { + await fs.mkdir(path.join(tmpDir, '.memory'), { recursive: true }); + expect(await hasMemoryDir(tmpDir)).toBe(true); + }); + + it('returns false when .memory/ directory does not exist', async () => { + expect(await hasMemoryDir(tmpDir)).toBe(false); + }); + + it('returns false when root itself does not exist', async () => { + expect(await hasMemoryDir(path.join(tmpDir, 'nonexistent'))).toBe(false); + }); +}); + +describe('filterProjectsWithMemory', () => { + let tmpDir: string; - const firstLine = (await fs.readFile(path.join(tmpDir, '.memory', 'knowledge', 'decisions.md'), 'utf-8')).split('\n')[0]; - const tldr = firstLine.replace('', ''); + beforeEach(async () => { + tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'devflow-filterProjects-')); + }); + + afterEach(async () => { + await fs.rm(tmpDir, { recursive: true, force: true }); + }); - expect(tldr).toBe('2 decisions. Key: ADR-001 Result types, ADR-002 Single-coder'); + it('returns empty array when no git roots provided', async () => { + expect(await filterProjectsWithMemory([])).toEqual([]); }); - it('parses TL;DR from pitfalls.md comment header', async () => { - const content = '\n# Known Pitfalls'; - await fs.writeFile(path.join(tmpDir, '.memory', 'knowledge', 'pitfalls.md'), content); + it('returns only projects that have .memory/', async () => { + const projA = path.join(tmpDir, 'projA'); + const projB = path.join(tmpDir, 'projB'); + const projC = path.join(tmpDir, 'projC'); + await fs.mkdir(path.join(projA, '.memory'), { recursive: true }); + await fs.mkdir(projB, { recursive: true }); // no .memory/ + await fs.mkdir(path.join(projC, '.memory'), { recursive: true }); - const firstLine = (await fs.readFile(path.join(tmpDir, '.memory', 'knowledge', 'pitfalls.md'), 'utf-8')).split('\n')[0]; - const tldr = firstLine.replace('', ''); + const result = await filterProjectsWithMemory([projA, projB, projC]); + expect(result).toEqual([projA, projC]); + }); - expect(tldr).toBe('1 pitfall. Key: PF-001 Synthesizer glob'); + it('returns empty array when no projects have .memory/', async () => { + const projA = path.join(tmpDir, 'projA'); + await fs.mkdir(projA, { recursive: true }); + expect(await filterProjectsWithMemory([projA])).toEqual([]); }); +}); + +describe('cleanQueueFiles', () => { + let tmpDir: string; - it('extracts highest ADR number via regex', async () => { - const content = [ - '', - '# Architectural Decisions', - '', - '## ADR-001: First decision', - '- **Status**: Accepted', - '', - '## ADR-002: Second decision', - '- **Status**: Accepted', - '', - '## ADR-003: Third decision', - '- **Status**: Accepted', - ].join('\n'); - await fs.writeFile(path.join(tmpDir, '.memory', 'knowledge', 'decisions.md'), content); + beforeEach(async () => { + tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'devflow-cleanQueue-')); + }); - const fileContent = await fs.readFile(path.join(tmpDir, '.memory', 'knowledge', 'decisions.md'), 'utf-8'); - const matches = [...fileContent.matchAll(/^## ADR-(\d+)/gm)]; - const highest = matches.length > 0 ? Math.max(...matches.map(m => parseInt(m[1], 10))) : 0; + afterEach(async () => { + await fs.rm(tmpDir, { recursive: true, force: true }); + }); - expect(highest).toBe(3); + it('returns cleaned=0 when no projects provided', async () => { + const result = await cleanQueueFiles([]); + expect(result).toEqual({ cleaned: 0, projects: [] }); }); - it('returns 0 for empty file with no ADR entries', async () => { - const content = '\n# Architectural Decisions\n\nAppend-only.'; - await fs.writeFile(path.join(tmpDir, '.memory', 'knowledge', 'decisions.md'), content); + it('cleans both queue files when both exist', async () => { + const memDir = path.join(tmpDir, '.memory'); + await fs.mkdir(memDir, { recursive: true }); + await fs.writeFile(path.join(memDir, '.pending-turns.jsonl'), '{"role":"user"}'); + await fs.writeFile(path.join(memDir, '.pending-turns.processing'), '{"role":"user"}'); - const fileContent = await fs.readFile(path.join(tmpDir, '.memory', 'knowledge', 'decisions.md'), 'utf-8'); - const matches = [...fileContent.matchAll(/^## ADR-(\d+)/gm)]; - const highest = matches.length > 0 ? Math.max(...matches.map(m => parseInt(m[1], 10))) : 0; + const result = await cleanQueueFiles([tmpDir]); + expect(result.cleaned).toBe(1); + expect(result.projects).toEqual([tmpDir]); + await expect(fs.access(path.join(memDir, '.pending-turns.jsonl'))).rejects.toThrow(); + await expect(fs.access(path.join(memDir, '.pending-turns.processing'))).rejects.toThrow(); + }); + + it('cleans only .pending-turns.jsonl when only that file exists', async () => { + const memDir = path.join(tmpDir, '.memory'); + await fs.mkdir(memDir, { recursive: true }); + await fs.writeFile(path.join(memDir, '.pending-turns.jsonl'), '{"role":"user"}'); - expect(highest).toBe(0); + const result = await cleanQueueFiles([tmpDir]); + expect(result.cleaned).toBe(1); + await expect(fs.access(path.join(memDir, '.pending-turns.jsonl'))).rejects.toThrow(); }); - it('detects duplicate pitfall by Area + Issue match', async () => { - const content = [ - '', - '# Known Pitfalls', - '', - '## PF-001: Synthesizer review glob matched zero files', - '- **Area**: shared/agents/synthesizer.md', - '- **Issue**: Glob didn\'t match reviewer output filenames', - ].join('\n'); - await fs.writeFile(path.join(tmpDir, '.memory', 'knowledge', 'pitfalls.md'), content); + it('returns cleaned=0 when neither queue file exists', async () => { + const memDir = path.join(tmpDir, '.memory'); + await fs.mkdir(memDir, { recursive: true }); - const fileContent = await fs.readFile(path.join(tmpDir, '.memory', 'knowledge', 'pitfalls.md'), 'utf-8'); + const result = await cleanQueueFiles([tmpDir]); + expect(result).toEqual({ cleaned: 0, projects: [] }); + }); - // Check if an entry with matching Area and Issue already exists - const newArea = 'shared/agents/synthesizer.md'; - const newIssue = 'Glob didn\'t match reviewer output filenames'; - const isDuplicate = fileContent.includes(`**Area**: ${newArea}`) && fileContent.includes(`**Issue**: ${newIssue}`); + it('skips projects where lock directory is present', async () => { + const memDir = path.join(tmpDir, '.memory'); + await fs.mkdir(memDir, { recursive: true }); + await fs.writeFile(path.join(memDir, '.pending-turns.jsonl'), '{"role":"user"}'); + // Create the lock directory to simulate active background updater + await fs.mkdir(path.join(memDir, '.working-memory.lock'), { recursive: true }); - expect(isDuplicate).toBe(true); + const result = await cleanQueueFiles([tmpDir]); + expect(result).toEqual({ cleaned: 0, projects: [] }); + // File should remain untouched + await expect(fs.access(path.join(memDir, '.pending-turns.jsonl'))).resolves.toBeUndefined(); }); - it('gracefully handles missing knowledge files', async () => { - // Verify no error when reading non-existent knowledge files - const knowledgeDir = path.join(tmpDir, '.memory', 'knowledge'); - const decisionsPath = path.join(knowledgeDir, 'decisions.md'); - const pitfallsPath = path.join(knowledgeDir, 'pitfalls.md'); + it('cleans multiple projects in parallel', async () => { + const projA = path.join(tmpDir, 'projA'); + const projB = path.join(tmpDir, 'projB'); + const projC = path.join(tmpDir, 'projC'); - // Simulate the graceful degradation pattern from session-start hook - let tldrLines: string[] = []; - for (const kf of [decisionsPath, pitfallsPath]) { - try { - await fs.access(kf); - const firstLine = (await fs.readFile(kf, 'utf-8')).split('\n')[0]; - if (firstLine.startsWith('', '')); - } - } catch { - // File doesn't exist — skip silently - } + for (const proj of [projA, projB, projC]) { + await fs.mkdir(path.join(proj, '.memory'), { recursive: true }); + await fs.writeFile(path.join(proj, '.memory', '.pending-turns.jsonl'), '{"role":"user"}'); } - expect(tldrLines).toHaveLength(0); + const result = await cleanQueueFiles([projA, projB, projC]); + expect(result.cleaned).toBe(3); + expect(result.projects).toContain(projA); + expect(result.projects).toContain(projB); + expect(result.projects).toContain(projC); }); - it('updates TL;DR to reflect new entry count after append', async () => { - const content = [ - '', - '# Known Pitfalls', - '', - '## PF-001: Synthesizer review glob matched zero files', - '- **Area**: shared/agents/synthesizer.md', - '- **Issue**: Glob pattern mismatch', - ].join('\n'); - await fs.writeFile(path.join(tmpDir, '.memory', 'knowledge', 'pitfalls.md'), content); + it('cleans unlocked projects and skips locked ones in same batch', async () => { + const locked = path.join(tmpDir, 'locked'); + const unlocked = path.join(tmpDir, 'unlocked'); - // Simulate appending a new entry and updating TL;DR - let fileContent = await fs.readFile(path.join(tmpDir, '.memory', 'knowledge', 'pitfalls.md'), 'utf-8'); - const newEntry = '\n\n## PF-002: Race condition in background hook\n- **Area**: scripts/hooks/stop-update-memory\n- **Issue**: Concurrent writes to memory file'; - fileContent += newEntry; + await fs.mkdir(path.join(locked, '.memory', '.working-memory.lock'), { recursive: true }); + await fs.writeFile(path.join(locked, '.memory', '.pending-turns.jsonl'), 'data'); - // Update TL;DR - const matches = [...fileContent.matchAll(/^## PF-(\d+)/gm)]; - const count = matches.length; - const keys = matches.map(m => `PF-${m[1].padStart(3, '0')}`).join(', '); - fileContent = fileContent.replace(/^/, ``); + await fs.mkdir(path.join(unlocked, '.memory'), { recursive: true }); + await fs.writeFile(path.join(unlocked, '.memory', '.pending-turns.jsonl'), 'data'); - await fs.writeFile(path.join(tmpDir, '.memory', 'knowledge', 'pitfalls.md'), fileContent); + const result = await cleanQueueFiles([locked, unlocked]); + expect(result.cleaned).toBe(1); + expect(result.projects).toEqual([unlocked]); + }); +}); + +describe('removeMemoryHooks accepts parsed Settings', () => { + it('accepts a parsed Settings object and returns JSON string', () => { + const settings = { + hooks: { + UserPromptSubmit: [{ hooks: [{ type: 'command' as const, command: '/path/prompt-capture-memory', timeout: 10 }] }], + Stop: [{ hooks: [{ type: 'command' as const, command: '/path/stop-update-memory', timeout: 10 }] }], + SessionStart: [{ hooks: [{ type: 'command' as const, command: '/path/session-start-memory', timeout: 10 }] }], + PreCompact: [{ hooks: [{ type: 'command' as const, command: '/path/pre-compact-memory', timeout: 10 }] }], + }, + }; + const result = removeMemoryHooks(settings); + const parsed = JSON.parse(result); + expect(parsed.hooks).toBeUndefined(); + }); - const updated = await fs.readFile(path.join(tmpDir, '.memory', 'knowledge', 'pitfalls.md'), 'utf-8'); - const updatedTldr = updated.split('\n')[0]; + it('does not mutate the original Settings object when passed by reference', () => { + const settings = { + hooks: { + Stop: [{ hooks: [{ type: 'command' as const, command: '/path/stop-update-memory', timeout: 10 }] }], + }, + }; + removeMemoryHooks(settings); + // Original must be unchanged + expect(settings.hooks.Stop).toHaveLength(1); + }); - expect(updatedTldr).toBe(''); - expect(updated).toContain('## PF-002'); + it('consistent API: string and Settings produce same result', () => { + const settingsObj = { + hooks: { + Stop: [{ hooks: [{ type: 'command' as const, command: '/path/stop-update-memory', timeout: 10 }] }], + }, + }; + const resultFromObj = removeMemoryHooks(settingsObj); + const resultFromStr = removeMemoryHooks(JSON.stringify(settingsObj)); + expect(JSON.parse(resultFromObj)).toEqual(JSON.parse(resultFromStr)); }); }); diff --git a/tests/shell-hooks.test.ts b/tests/shell-hooks.test.ts index 3fc26cc..9b2be28 100644 --- a/tests/shell-hooks.test.ts +++ b/tests/shell-hooks.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect } from 'vitest'; +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; import { execSync } from 'child_process'; import * as path from 'path'; import * as fs from 'fs'; @@ -16,8 +16,10 @@ const HOOK_SCRIPTS = [ 'stop-update-memory', 'session-start-memory', 'pre-compact-memory', + 'prompt-capture-memory', 'preamble', 'json-parse', + 'get-mtime', ]; describe('shell hook syntax checks', () => { @@ -1080,10 +1082,6 @@ describe('json-helper.cjs filter-observations', () => { }); describe('session-end-learning structure', () => { - it('is included in bash -n syntax checks', () => { - expect(HOOK_SCRIPTS).toContain('session-end-learning'); - }); - it('starts with bash shebang and sources json-parse', () => { const scriptPath = path.join(HOOKS_DIR, 'session-end-learning'); const content = fs.readFileSync(scriptPath, 'utf8'); @@ -1186,3 +1184,338 @@ describe('json-parse wrapper', () => { expect(result).toBe('val'); }); }); + +describe('working memory queue behavior', () => { + const STOP_HOOK = path.join(HOOKS_DIR, 'stop-update-memory'); + const PREAMBLE_HOOK = path.join(HOOKS_DIR, 'preamble'); + const PROMPT_CAPTURE_HOOK = path.join(HOOKS_DIR, 'prompt-capture-memory'); + + let tmpDir: string; + + beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'devflow-queue-test-')); + }); + + afterEach(() => { + fs.rmSync(tmpDir, { recursive: true, force: true }); + }); + + it('stop_reason tool_use — no queue append', () => { + // Create .memory/ so the hook proceeds to the stop_reason check + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-session-001', + stop_reason: 'tool_use', + assistant_message: 'test response', + }); + + execSync(`bash "${STOP_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + expect(fs.existsSync(queueFile)).toBe(false); + }); + + it('stop_reason end_turn — appends assistant turn to queue', () => { + // Create .memory/ directory + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + // Touch throttle marker to prevent background spawn attempt + fs.writeFileSync(path.join(tmpDir, '.memory', '.working-memory-last-trigger'), ''); + + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-session-002', + stop_reason: 'end_turn', + assistant_message: 'test response', + }); + + execSync(`bash "${STOP_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + expect(fs.existsSync(queueFile)).toBe(true); + + const lines = fs.readFileSync(queueFile, 'utf-8').trim().split('\n').filter(Boolean); + expect(lines).toHaveLength(1); + + const entry = JSON.parse(lines[0]); + expect(entry.role).toBe('assistant'); + expect(entry.content).toBe('test response'); + expect(typeof entry.ts).toBe('number'); + }); + + it('prompt-capture-memory captures user prompt to queue', () => { + // Create .memory/ directory so capture is triggered + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-session-003', + prompt: 'implement the cache', + }); + + execSync(`bash "${PROMPT_CAPTURE_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + expect(fs.existsSync(queueFile)).toBe(true); + + const lines = fs.readFileSync(queueFile, 'utf-8').trim().split('\n').filter(Boolean); + expect(lines).toHaveLength(1); + + const entry = JSON.parse(lines[0]); + expect(entry.role).toBe('user'); + expect(entry.content).toBe('implement the cache'); + expect(typeof entry.ts).toBe('number'); + }); + + it('prompt-capture-memory with missing .memory/ — creates it via ensure-memory-gitignore, exit 0', () => { + // tmpDir exists but has no .memory/ subdirectory — ensure-memory-gitignore creates it + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-session-004a', + prompt: 'implement the cache', + }); + + expect(() => { + execSync(`bash "${PROMPT_CAPTURE_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + }).not.toThrow(); + + // Hook creates .memory/ and writes to queue + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + expect(fs.existsSync(queueFile)).toBe(true); + }); + + it('preamble does NOT write to queue — zero file I/O', () => { + // Create .memory/ to confirm preamble doesn't touch the queue even when .memory/ exists + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-session-004', + prompt: 'implement the cache', + }); + + // Should not throw (exit 0) + expect(() => { + execSync(`bash "${PREAMBLE_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + }).not.toThrow(); + + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + expect(fs.existsSync(queueFile)).toBe(false); + }); + + it('preamble with slash command — exits 0, no queue write', () => { + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-session-004b', + prompt: '/code-review', + }); + + expect(() => { + execSync(`bash "${PREAMBLE_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + }).not.toThrow(); + + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + expect(fs.existsSync(queueFile)).toBe(false); + }); + + it('queue JSONL format — each line is valid JSON with role, content, ts', () => { + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + + const now = Math.floor(Date.now() / 1000); + const entries = [ + { role: 'user', content: 'hello world', ts: now }, + { role: 'assistant', content: 'I will help you', ts: now + 1 }, + { role: 'user', content: 'thanks', ts: now + 2 }, + ]; + + fs.writeFileSync(queueFile, entries.map(e => JSON.stringify(e)).join('\n') + '\n'); + + const lines = fs.readFileSync(queueFile, 'utf-8').trim().split('\n').filter(Boolean); + expect(lines).toHaveLength(3); + + for (const line of lines) { + const parsed = JSON.parse(line); + expect(['user', 'assistant']).toContain(parsed.role); + expect(typeof parsed.content).toBe('string'); + expect(typeof parsed.ts).toBe('number'); + } + }); + + it('stop_reason end_turn — content array: joins text blocks, excludes tool_use', () => { + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + // Touch throttle marker to prevent background spawn attempt + fs.writeFileSync(path.join(tmpDir, '.memory', '.working-memory-last-trigger'), ''); + + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-session-005', + stop_reason: 'end_turn', + assistant_message: [ + { type: 'text', text: 'First part of response' }, + { type: 'tool_use', id: 'toolu_01', name: 'Read', input: { file_path: '/tmp/foo' } }, + { type: 'text', text: 'Second part of response' }, + ], + }); + + execSync(`bash "${STOP_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + expect(fs.existsSync(queueFile)).toBe(true); + + const lines = fs.readFileSync(queueFile, 'utf-8').trim().split('\n').filter(Boolean); + expect(lines).toHaveLength(1); + + const entry = JSON.parse(lines[0]); + expect(entry.role).toBe('assistant'); + // Both text blocks joined with newline; tool_use block excluded + expect(entry.content).toBe('First part of response\nSecond part of response'); + expect(typeof entry.ts).toBe('number'); + }); + + it('queue overflow — >200 lines truncated to last 100', () => { + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + // Touch throttle marker to prevent background spawn attempt + fs.writeFileSync(path.join(tmpDir, '.memory', '.working-memory-last-trigger'), ''); + + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + const now = Math.floor(Date.now() / 1000); + + // Pre-populate queue with 201 entries + const existingLines = Array.from({ length: 201 }, (_, i) => + JSON.stringify({ role: 'user', content: `entry ${i}`, ts: now + i }), + ); + fs.writeFileSync(queueFile, existingLines.join('\n') + '\n'); + + // Trigger stop hook — appends 1 more entry, then overflow check fires + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-session-006', + stop_reason: 'end_turn', + assistant_message: 'overflow trigger response', + }); + + execSync(`bash "${STOP_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + + // After overflow: 201 pre-existing + 1 new = 202 lines → truncated to last 100 + const resultLines = fs.readFileSync(queueFile, 'utf-8').trim().split('\n').filter(Boolean); + expect(resultLines).toHaveLength(100); + + // The new entry (the assistant turn) must be present as the last line + const lastEntry = JSON.parse(resultLines[resultLines.length - 1]); + expect(lastEntry.role).toBe('assistant'); + expect(lastEntry.content).toBe('overflow trigger response'); + }); + + it('prompt-capture-memory truncates prompts longer than 2000 chars', () => { + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + + const longPrompt = 'a'.repeat(3000); + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-trunc-001', + prompt: longPrompt, + }); + + execSync(`bash "${PROMPT_CAPTURE_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + const lines = fs.readFileSync(queueFile, 'utf-8').trim().split('\n').filter(Boolean); + expect(lines).toHaveLength(1); + + const entry = JSON.parse(lines[0]); + // Truncated at 2000 chars + '... [truncated]' suffix (15 chars) = 2015 + expect(entry.content.length).toBe(2015); + expect(entry.content).toContain('[truncated]'); + }); + + it('stop-update-memory truncates assistant content longer than 2000 chars', () => { + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + // Touch throttle marker to prevent background spawn attempt + fs.writeFileSync(path.join(tmpDir, '.memory', '.working-memory-last-trigger'), ''); + + const longMessage = 'b'.repeat(5000); + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-trunc-002', + stop_reason: 'end_turn', + assistant_message: longMessage, + }); + + execSync(`bash "${STOP_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + const lines = fs.readFileSync(queueFile, 'utf-8').trim().split('\n').filter(Boolean); + expect(lines).toHaveLength(1); + + const entry = JSON.parse(lines[0]); + // Truncated at 2000 chars + '... [truncated]' suffix (15 chars) = 2015 + expect(entry.content.length).toBe(2015); + expect(entry.content).toContain('[truncated]'); + }); + + it('stop-update-memory exits cleanly when DEVFLOW_BG_UPDATER=1', () => { + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-bg-guard-001', + stop_reason: 'end_turn', + assistant_message: 'should not be captured', + }); + + // Should not throw; no queue write expected + expect(() => { + execSync(`DEVFLOW_BG_UPDATER=1 bash "${STOP_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + }).not.toThrow(); + + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + expect(fs.existsSync(queueFile)).toBe(false); + }); + + it('prompt-capture-memory exits cleanly when DEVFLOW_BG_UPDATER=1', () => { + fs.mkdirSync(path.join(tmpDir, '.memory'), { recursive: true }); + + const input = JSON.stringify({ + cwd: tmpDir, + session_id: 'test-bg-guard-002', + prompt: 'should not be captured', + }); + + // Should not throw; no queue write expected + expect(() => { + execSync(`DEVFLOW_BG_UPDATER=1 bash "${PROMPT_CAPTURE_HOOK}"`, { input, stdio: ['pipe', 'pipe', 'pipe'] }); + }).not.toThrow(); + + const queueFile = path.join(tmpDir, '.memory', '.pending-turns.jsonl'); + expect(fs.existsSync(queueFile)).toBe(false); + }); +}); + +describe('get-mtime behavioral', () => { + it('returns a valid positive epoch timestamp for a real file', () => { + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'devflow-test-')); + const tmpFile = path.join(tmpDir, 'probe.txt'); + const getMtimeScript = path.join(HOOKS_DIR, 'get-mtime'); + + try { + fs.writeFileSync(tmpFile, 'probe'); + const result = execSync( + `bash -c 'source "${getMtimeScript}" && get_mtime "${tmpFile}"'`, + { stdio: 'pipe' } + ).toString().trim(); + + const epoch = parseInt(result, 10); + expect(Number.isInteger(epoch)).toBe(true); + expect(epoch).toBeGreaterThan(0); + // Sanity: must be after 2020-01-01 (epoch 1577836800) and before year 2100 (4102444800) + expect(epoch).toBeGreaterThan(1577836800); + expect(epoch).toBeLessThan(4102444800); + } finally { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); +});