diff --git a/.claude/settings.json b/.claude/settings.json index 5597f6e..eaf1d15 100644 --- a/.claude/settings.json +++ b/.claude/settings.json @@ -22,14 +22,24 @@ "hooks": { "PreToolUse": [ { - "matcher": "Write|Edit", - "hooks": ["node scripts/lib/check-gate-before-write.js"] + "matcher": "Write|Edit|MultiEdit", + "hooks": [ + { + "type": "command", + "command": "node scripts/lib/check-gate-before-write.js" + } + ] } ], "PostToolUse": [ { - "matcher": "Write|Edit", - "hooks": ["node scripts/lib/check-function-sizes.js"] + "matcher": "Write|Edit|MultiEdit", + "hooks": [ + { + "type": "command", + "command": "node scripts/lib/check-function-sizes.js" + } + ] } ] } diff --git a/.cursor/settings.json b/.cursor/settings.json new file mode 100644 index 0000000..b07a37d --- /dev/null +++ b/.cursor/settings.json @@ -0,0 +1,13 @@ +{ + "plugins": { + "linear": { + "enabled": true + }, + "neon-postgres": { + "enabled": true + }, + "vercel": { + "enabled": true + } + } +} diff --git a/.mcp.json b/.mcp.json new file mode 100644 index 0000000..61cb266 --- /dev/null +++ b/.mcp.json @@ -0,0 +1,11 @@ +{ + "mcpServers": { + "neon": { + "type": "http", + "url": "https://mcp.neon.tech/mcp", + "headers": { + "Authorization": "Bearer napi_mnnlxkt8gvdbtqtn6l19h7964qsjd5emwi4h0npwr9j8j0ch4pw2u302hl1rbfip" + } + } + } +} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index d12aa0f..34df864 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,190 @@ # Changelog +## 2026-04-04 — MoneyMirror Vercel Deploy Attempt: Runtime Fix Applied, Release Still Blocked + +**What:** Executed the `VIJ-20` production deploy attempt for `apps/money-mirror`, fixed one Vercel runtime incompatibility in code, created the Vercel project, and captured the remaining production blocker. + +**Vercel setup completed:** + +- Created project `money-mirror` under scope `vijay-sehgals-projects` +- Linked local app directory to the Vercel project +- Synced production env vars from `apps/money-mirror/.env.local` +- Corrected `NEXT_PUBLIC_APP_URL` to the real assigned alias: `https://money-mirror-rho.vercel.app` + +**Code fix applied:** + +- Replaced `apps/money-mirror/middleware.ts` with `apps/money-mirror/proxy.ts` +- Why: the initial production deploy failed because Vercel rejected `@neondatabase/auth/next/server` inside the Edge `middleware` runtime +- Result: Next 16 auth gating now runs through the Node `proxy` file convention instead of Edge middleware + +**Validation after the fix:** + +- `npm test` PASS (45 tests) +- `npx next build --webpack` PASS +- `npx tsc --noEmit` PASS after regenerating `.next/types` +- Vercel production build PASS + +**Remaining blocker:** + +- Public deployment URLs are protected by Vercel Authentication (`401 Authentication Required`) +- Even authenticated `vercel curl` requests still return `NOT_FOUND` for `/`, `/login`, `/dashboard`, and `/api/cron/weekly-recap` +- This indicates the release is still blocked by Vercel project/public routing configuration, not by the app build itself + +**Why:** The original remaining task for Phase 1 was deploy + production verification. That surfaced two separate production issues: one app/runtime issue (fixed) and one Vercel serving/protection issue (still open). + +--- + +## 2026-04-04 — MoneyMirror Phase 1 Live Smoke Complete + Gemini Timeout Fix + +**What:** Completed full Phase 1 rollout validation against live external services (Neon Auth, Gemini, Neon DB). + +**Neon schema migration:** + +- Applied 7 `ALTER TABLE` statements to live DB `steep-meadow-97750093` +- `profiles` gained `monthly_income_paisa BIGINT` +- `statements` gained `institution_name`, `statement_type` (with CHECK constraint), `due_date`, `payment_due_paisa`, `minimum_due_paisa`, `credit_limit_paisa` + +**Smoke test results (all via Playwright against `http://localhost:3000`):** + +- Dev server boot: Next.js 16 Turbopack ready in ~440ms ✅ +- Cron gate: 401 unauthenticated, 200 with `x-cron-secret` ✅ +- OTP login: email submitted, OTP entered, session established ✅ +- Bank account upload (Kotak Feb 2026): 24 transactions, ₹31,926 debits — DB confirmed `status=processed` ✅ +- Credit card upload (HDFC Feb–Mar 2026): 18 transactions, ₹16,245 spent, credit card fields rendered — DB confirmed ✅ + +**Bug fix — Gemini 2.5 Flash timeout:** + +- `gemini-2.5-flash` has thinking enabled by default in 2026. On 10K-char PDF text, this exceeded the 25s route timeout. +- Fixed in `apps/money-mirror/src/app/api/statement/parse/route.ts`: added `config: { thinkingConfig: { thinkingBudget: 0 } }` to the `generateContent` call. +- Response time reduced from >25s (timeout) to ~8s. + +**Linear cleanup (VIJ-11 children):** + +- VIJ-12 → Duplicate, VIJ-14 → Cancelled, VIJ-15 → Duplicate +- VIJ-13 restructured as `[MoneyMirror] Phase 1 Rollout Validation`, In Progress +- 6 child sub-issues created: VIJ-16 (schema, Done), VIJ-17 (OTP, Done), VIJ-18 (bank upload, Done), VIJ-19 (CC upload, Done), VIJ-20 (Vercel deploy, Todo), VIJ-21 (cron gate, Done) + +**Why:** DB schema drift blocked all live smoke. Gemini thinking mode silently broke PDF parsing on the happy path. + +--- + +## 2026-04-03 — MoneyMirror Phase 1 Rollout Validation Started + +**What:** Started the real rollout-validation pass for `apps/money-mirror` and synced the findings into Linear and repo state. + +- Created dedicated Linear follow-up issue `VIJ-13` for the live smoke and rollout checklist. +- Marked duplicate follow-up `VIJ-14` as Duplicate to keep one canonical validation thread. +- Verified local runtime behavior: + - `npm run dev` boots successfully outside the sandbox + - unauthenticated `GET /api/cron/weekly-recap` returns `401` + - authenticated `GET /api/cron/weekly-recap` with `x-cron-secret` returns `200 {"ok":true,"total":0,"succeeded":0,"failed":0}` +- Verified the target Neon DB is still behind the repo schema: + - `profiles` is missing `monthly_income_paisa` + - `statements` still uses `bank_name` + - `statements` is missing `institution_name`, `statement_type`, and the new credit-card due metadata columns + +**Why:** The repo now contains Phase 1 expansion code for explicit `bank_account` and `credit_card` flows, but the live DB has not yet been migrated. Until `apps/money-mirror/schema.sql` is applied, the real OTP/onboarding/upload smoke cannot validate the current app behavior. + +--- + +## 2026-04-03 — /learning Issue-009: 7 Engineering Rules Extracted + 5 Agent Files Updated + +**What:** Completed the /learning command for MoneyMirror (issue-009). Extracted 7 durable engineering rules from the postmortem and applied Prompt Autopsy changes to 5 agent/command files. + +**Engineering rules written to `knowledge/engineering-lessons.md`:** + +1. Dashboard/report pages linked from email CTAs must specify a first-load rehydration path (read path) — the post-mutation result path is insufficient. +2. Parent + child write sequences must declare an explicit atomicity strategy — partial success (`parent = processed, children = missing`) is never a terminal state. +3. Fan-out worker HTTP contracts must use non-2xx status for failure — master must not inspect JSON body for success/failure accounting. +4. Auth route fixes must update all callers in the same change — a route auth fix without caller verification is an incomplete fix. +5. `.env.local.example` must be generated by grepping `process.env.*` in source — not from memory. Key name divergence is a deploy blocker. +6. File size limits (route < 200 lines, page < 250 lines) must be applied during code generation — not discovered at pre-commit hook rejection. +7. Third-party library API must be verified against installed version before marking integration complete — training knowledge is not sufficient. + +**Agent/command files updated per Prompt Autopsy:** + +- `agents/backend-architect-agent.md`: 3 new Mandatory Pre-Approval Checklist items (10: rehydration path, 11: write atomicity, 12: fan-out HTTP contract) +- `agents/backend-engineer-agent.md`: 2 hard rules (auth caller cross-verification, file size budget at generation time) +- `agents/code-review-agent.md`: 2 new checks (authenticated route caller verification = CRITICAL, parent/child write sequence check = CRITICAL) +- `agents/qa-agent.md`: env var key name cross-check added as standalone QA dimension with grep-based verification +- `commands/execute-plan.md`: env var grep step in §8 completion checklist; read/write path checkpoint + third-party library verification in §5; file size budget rule as §5b + +**Also written:** `apps/money-mirror/CODEBASE-CONTEXT.md` — full AI context file for future agent sessions. + +**Why:** Recurring failure patterns from issue-009 (2nd consecutive parent/child write atomicity gap, 3rd consecutive env var naming issue, file size violations deferred 3 stages). Upstream enforcement needed to break these cycles. + +--- + +## 2026-04-03 — Shift-Left Infra Validation: Gate 0 + Empty ENV Detection + Execute-Plan Provisioning Checklist + +**What:** Three coordinated changes to eliminate the "tests pass, app broken locally" failure pattern discovered during issue-009 analysis. + +**Change 1 — `/deploy-check` Gate 0 (manual smoke test):** +Added a pre-flight checklist (Gate 0) that the PM runs manually before triggering `/deploy-check`. Six checkboxes: dev server boots, OTP login works, onboarding writes to DB, core feature works end-to-end, no 500 errors, no empty env vars. If any fail, fix before running the command. Updated the Output Format to include "Local Smoke Test (Gate 0 — PM confirmed)" as the first line. + +**Change 2 — `/deploy-check` ENV gate upgrade (empty value detection):** +The existing ENV gate only checked that variable names appeared in `.env.local.example`. Upgraded it to read `.env.local` directly and classify each variable as FILLED / EMPTY / MISSING. Empty values (`VAR=` or `VAR=""`) are now a blocking violation — previously they passed as "present". Variables explicitly marked `# Optional` are exempt. + +**Change 3 — `/execute-plan` + `backend-engineer-agent.md` infra provisioning:** +Added a 6-item infrastructure provisioning checklist to the execute-plan completion criteria. DB project created, schema applied and verified, auth provider provisioned (e.g., Neon Auth URL obtained), all non-optional env vars filled, Sentry project created and configured, `npm run dev` boots clean. These are now **hard deliverables** — not README suggestions. Also moved Sentry setup from deploy-check into execute-plan's Backend Implementation section: `npm install @sentry/nextjs`, wizard run, all 4 Sentry vars filled. The backend-engineer-agent.md `# Rules` section now lists all 6 as explicit hard constraints. + +**Why:** Issue-009 analysis revealed that `NEON_AUTH_BASE_URL` was empty in `.env.local` — OTP login would have failed immediately on local test. `RESEND_API_KEY` was also empty, meaning recap emails silently fail (returning 200 but not sending). The current ENV gate in deploy-check passed both because it only checked variable name presence in `.env.local.example`, not actual values. The root cause: infra setup (Neon Auth provisioning, Sentry project creation) has no enforcement point in the pipeline — it lives only as prose in README.md. Deploy-check discovered these gaps too late (after PR creation). The fix shifts this validation to execute-plan, where the engineer is still in implementation mode. + +**Files:** `commands/deploy-check.md` (Gate 0 added, §2 ENV gate upgraded, output format updated), `commands/execute-plan.md` (§2 Sentry setup added, §8 infra provisioning checklist added), `agents/backend-engineer-agent.md` (# Rules section updated with 6-item infra checklist) + +--- + +## 2026-04-02 — Proactive DB Schema + ENV Verification in /deploy-check + +**What:** Upgraded `/deploy-check` so that database schema application and ENV completeness are enforced as **blocking gates** during the command, not left as unchecked items in the PR body for a reviewer to discover. + +- **New §3a** (Database Schema Verification): Agent reads `schema.sql`, extracts all `CREATE TABLE` table names, then either queries `information_schema.tables` via MCP (Supabase/Neon) to verify each table exists, or — if MCP is unavailable — prints a blocking prompt listing every required table and instructs the user to apply the schema before continuing. Deployment is blocked if any table is missing or the user hasn't confirmed. +- **Updated §2** (ENV Completeness Check): Agent greps `apps//src/` for all `process.env.*` references and diffs against `.env.local.example`. Any var in code but missing from the example file is a **BLOCKING violation** that stops the command. +- **Updated §8 PR body**: Removed `[ ] Apply schema.sql` and `[ ] Set env vars` as reviewer TODOs — replaced with pre-checked `[x] Schema verified` and `[x] ENV verified` lines, because these are now confirmed before PR creation. + +**Why:** Across multiple pipeline cycles (issues 002–006, 009), the schema and ENV steps were only surfaced as PR checklist items that reviewers were expected to catch. This caused silent deploy failures: the PR was merged, the app was pushed, and only then did the missing tables or missing env vars surface. The gate must fire _before_ the PR is created. + +**Anti-pattern fixed**: "Schema applied post-PR = silent deploy failure" (engineering-lessons.md, issue-002 entry). + +**Files:** `commands/deploy-check.md` (§2 ENV check added, §3a schema verification added, §8 PR body updated), `CHANGELOG.md` + +--- + +## 2026-04-02 — Real-Time Feedback Capture + Mandatory Linear Sync + +**What:** Added two hard rules to the system that were previously missing: (1) PM feedback during any pipeline stage must be captured immediately into the relevant agent/command file and CHANGELOG — not deferred to `/learning`. (2) Linear sync checkpoints are now mandatory, not "recommended" — if a sync is skipped, the next command must run it before proceeding. + +**Why:** PM feedback during issue-009's pipeline was not being captured into the agent files in real time, creating risk that corrections would be lost if the cycle was abandoned or compacted. Linear syncs were being skipped because the language in CLAUDE.md and command-protocol.md said "recommended" rather than enforcing them. + +**Files:** `CLAUDE.md` (Real-Time Feedback Capture section added, Linear checkpoints changed from Recommended to Mandatory), `command-protocol.md` (Real-Time Feedback Capture Protocol section added, CHANGELOG Discipline section added, Linear checkpoints made mandatory) + +--- + +## 2026-04-02 — /review Command + Code Review Agent Upgrade (Zevi Gap Analysis) + +**What:** Strengthened `/review` command and `code-review-agent.md` based on a benchmark against Zevi Arnovitz's (Meta PM, Lenny's Podcast) code review command. Added what was genuinely better; kept our competitive advantages. + +**Added to both files:** + +- Formal severity ladder: CRITICAL / HIGH / MEDIUM / LOW with project-specific definitions (PostHog dual-emission explicitly labeled CRITICAL) +- Structured output format: `Looks Clean` pass-list + `Issues Found` with `file:line` format + `Summary` block with issue counts and recommendation +- Explicit production readiness checks: no `console.log`, no TODOs/FIXMEs, no hardcoded secrets, no `@ts-ignore` +- React/Hooks review step (scoped strictly to `"use client"` files): effect cleanup, dependency arrays, infinite loop patterns +- Client-side performance sub-checks under Step 5 (`useMemo`, `useCallback`, unnecessary re-renders) — also scoped to Client Components only + +**Preserved (our advantages Zevi doesn't have):** + +- PostHog dual-emission check as a named CRITICAL block with exact grep instructions +- Architecture check diffs against the actual plan doc (not generic "follows patterns") +- Knowledge file loading (`engineering-lessons.md` keeps postmortem rules active) +- Pipeline gate integration and quality gate enforcement + +**What we did NOT copy:** emoji formatting, generic architecture check, project-agnostic output. + +**Files:** `commands/review.md` (updated), `agents/code-review-agent.md` (updated) + +--- + ## 2026-04-01 — Linear PM Layer (Retroactive Sync + Auto-Bind) **What:** Full Linear integration layer added as a PM-facing workflow mirror. The repo remains the source of truth; Linear reflects state for stakeholder visibility. diff --git a/CLAUDE.md b/CLAUDE.md index 6af0746..cb0b2e4 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -87,17 +87,32 @@ Rules: - Linear commands must never silently skip failed writes - Existing pipeline commands remain valid even if Linear is unavailable -Recommended checkpoints: +**Mandatory checkpoints (not optional — every pipeline run must execute these):** - **`/create-issue` auto-binds Linear** — `/linear-bind` + root issue creation run automatically at the end of every `/create-issue`. No manual bind step required. -- After `/create-issue`: `/linear-sync issue` (brief already bound; sync the description) -- After `/create-plan`: `/linear-sync plan` -- After `/review`, `/peer-review`, `/qa-test`: `/linear-sync status` -- After `/deploy-check`: `/linear-sync release` -- After `/learning`: `/linear-close` +- After `/create-issue`: `/linear-sync issue` — sync the brief description +- After `/create-plan`: `/linear-sync plan` — sync PRD + child tasks +- After `/review`, `/peer-review`, `/qa-test`: `/linear-sync status` — sync gate outcomes +- After `/deploy-check`: `/linear-sync release` — sync PR/deployment links +- After `/learning`: `/linear-close` — finalize and archive the Linear project + +**Enforcement**: If a Linear sync is skipped at a checkpoint, the next command must begin by running the missed sync before proceeding. Do not silently skip. **Never use hard-coded template examples.** All outputs must reference the active project context. +### Real-Time Feedback Capture (CRITICAL) + +When the PM gives corrective feedback during any pipeline stage, apply it **immediately** — do not defer to `/learning`: + +1. **Update the relevant agent file** (`agents/-agent.md`) with the new rule, formatted as a hard constraint (not a suggestion) +2. **Update the relevant command file** (`commands/.md`) if the rule applies to the command protocol +3. **Update `CHANGELOG.md`** with a dated entry describing what changed and why +4. **Update `project-state.md`** Decisions Log with the correction + +The `/learning` command at end-of-cycle should **reinforce** these rules, not be the first time they are captured. If feedback is not captured in real time, it will be lost if the pipeline cycle is abandoned or compacted. + +**Rule**: Every mid-pipeline correction from the PM = immediate write to agent/command file + CHANGELOG entry. No exceptions. + ### State Management After every command execution, update `project-state.md`: diff --git a/agents/backend-architect-agent.md b/agents/backend-architect-agent.md index 1af03a2..13ec7f0 100644 --- a/agents/backend-architect-agent.md +++ b/agents/backend-architect-agent.md @@ -220,12 +220,29 @@ Before finalizing the architecture, answer all of the following. Any gap must be 9. **Telemetry Latency Isolation**: For every API route with a latency SLA (P95 target), confirm that PostHog/telemetry calls are fire-and-forget (not awaited). Awaited telemetry in hot paths violates latency contracts and creates false fallback triggers in experiment flows. → Exception: admin/cron routes where latency SLA doesn't apply. +10. **Dashboard / Report Rehydration Path**: For every dashboard, report, or results page that is linked from navigation, email CTA, push notification, or any external URL: + → Specify the exact authenticated read path for first-load rehydration: which API route is called, what query it runs, and what state it returns. + → The mutation response path (result available immediately after POST) is not sufficient — the page must hydrate from the DB on any entry point. + → Client-memory-only post-mutation flows are blocked for any page reachable from an email link or deep URL. + +11. **Parent/Child Write Atomicity**: For every user action that writes a parent record + one or more child records in sequence: + → Specify the atomicity strategy explicitly: if the child write fails, define whether the parent is rolled back or transitioned to a `failed` state, and confirm error telemetry fires. + → Partial success (parent = `processed` / `success`, children = missing) is never an acceptable terminal state. + → "Log and continue" on child write failure is a blocking omission in the architecture spec. + +12. **Fan-Out Worker HTTP Contract**: For every fan-out architecture (master cron → N worker routes): + → Specify the worker HTTP status contract explicitly: "Worker must return HTTP non-2xx (e.g., 502) on any failure that the master should count as failed." + → Master uses HTTP status only for success/failure accounting — never inspects JSON body. + → JSON error payloads with HTTP 200 are insufficient as a failure signal to the master. + # Added: 2026-03-19 — SMB Feature Bundling Engine # Updated: 2026-03-21 — Ozi Reorder Experiment (items 4–7) # Updated: 2026-03-28 — Nykaa Personalisation (items 8–9) +# Updated: 2026-04-03 — MoneyMirror (items 10–12) + --- ## Anti-Sycophancy Mandate diff --git a/agents/backend-engineer-agent.md b/agents/backend-engineer-agent.md index 59d8892..cac1a3d 100644 --- a/agents/backend-engineer-agent.md +++ b/agents/backend-engineer-agent.md @@ -154,3 +154,24 @@ Optimize for MVP speed. Experiment Integrity & Telemetry: Ensure cryptographic salts for A/B testing are server-only (do not use NEXT_PUBLIC). Telemetry calls (e.g., PostHog `captureServerEvent`) in user-facing API routes must be fire-and-forget (`.catch(() => {})`) instead of `await`ed to prevent external latency from corrupting SLAs and experiment data. Control group API responses must return a neutral label ("default"), never the real cohort string — the true cohort is captured server-side in PostHog only. # Added: 2026-03-28 — Nykaa Personalisation (issue-008) + +**Authenticated Route Caller Verification**: After adding authentication to any API route, search all client-side callers of that route path and verify each sends the required auth header. A `fetch()` call to an authenticated route without an `Authorization` header is a CRITICAL bug. A route auth fix without updating all callers is an incomplete fix — both the route and every caller must be updated in the same change. + +# Added: 2026-04-03 — MoneyMirror (issue-009) + +**File Size Budget at Generation Time**: Before writing any API route or page component expected to contain multi-phase logic, identify extraction points upfront. Route handlers must stay under 200 lines; page components must stay under 250 lines. If a file would exceed these limits, extract helpers or sub-components before writing past the limit — never write a large file and refactor later. + +# Added: 2026-04-03 — MoneyMirror (issue-009) + +**Infrastructure Provisioning is a hard deliverable** — not a README suggestion. Before execute-plan can be marked DONE, the Backend Engineer must confirm all of the following are complete: + +1. **Database project exists** — Neon/Supabase project created and `DATABASE_URL` is a real connection string in `.env.local` (not a placeholder). +2. **Schema applied** — `schema.sql` has been run against the live DB. Verify by querying `information_schema.tables` — every expected table must exist. +3. **Auth provider provisioned** — If the app uses Neon Auth, `NEON_AUTH_BASE_URL` must be obtained from the Neon console Auth section and filled in `.env.local`. OTP login must work locally before execute-plan closes. +4. **All non-optional env vars filled** — Every variable in `.env.local.example` that is not explicitly marked `# Optional` must have a real value in `.env.local`. Empty strings (`VAR=`) are a blocking violation. +5. **Sentry project created** — Create a Sentry project (free tier), run `npx @sentry/wizard@latest -i nextjs`, and fill `NEXT_PUBLIC_SENTRY_DSN`, `SENTRY_AUTH_TOKEN`, `SENTRY_ORG`, `SENTRY_PROJECT` in `.env.local`. This is a backend setup task, not a deploy-check task. +6. **`npm run dev` boots clean** — The app starts without errors and the core user flow works end-to-end. Auth, DB reads/writes, and the primary feature must all function before the task is closed. + +Infra gaps discovered at `/deploy-check` are Backend Engineer failures. Ship infra, not just code. + +# Added: 2026-04-03 — Shift-left infra validation (issue-009 postmortem pattern) diff --git a/agents/code-review-agent.md b/agents/code-review-agent.md index 3c07eb5..a88b785 100644 --- a/agents/code-review-agent.md +++ b/agents/code-review-agent.md @@ -22,11 +22,24 @@ You must be skeptical and critical. 3 Identify security issues 4 Improve maintainability 5 Ensure coding standards are followed +6 Verify production readiness (no console.log, no TODOs, no hardcoded secrets) +7 Review React/Hooks correctness in Client Components Your job is to critique, not approve blindly. --- +# Severity Ladder + +Assign one of these levels to every issue: + +**CRITICAL** — Security vulnerabilities, data loss, auth bypass, PostHog dual-emission, crashes +**HIGH** — Logic bugs, broken user flows, performance degradation, missing RLS +**MEDIUM** — Code quality, missing edge cases, maintainability problems +**LOW** — Style, naming, cosmetic improvements + +--- + # Inputs You will receive: @@ -34,7 +47,7 @@ You will receive: Frontend implementation Backend implementation Database schema -Architecture documentation +Architecture documentation (plan doc for the active issue) --- @@ -46,9 +59,9 @@ Follow this sequence. ## 1 Architecture Check -Verify implementation matches the architecture defined earlier. +Verify implementation matches the architecture defined in the plan doc for the active issue. -Flag deviations. +Flag deviations. "Follows existing patterns" is not sufficient — diff against the actual plan. --- @@ -61,6 +74,13 @@ maintainability modularity reusability +Also check: + +- No `console.log` in production code +- No TODO or FIXME comments in submitted code +- No hardcoded secrets, API keys, or debug flags +- No TypeScript `any` types; no `@ts-ignore` suppressions + --- ## 3 Bug Detection @@ -80,6 +100,7 @@ Check for issues such as: injection risks missing validation unsafe API usage +missing RLS policies on user-scoped tables **PostHog dual-emission check** (required for every review): @@ -87,25 +108,63 @@ Verify that no PostHog event name appears in BOTH a server-side API route AND a - Search for every `posthog.capture('event_name')` call in the codebase. - For each event, confirm it has exactly one emission point. -- Dual-emission of any North Star metric event is a **critical violation** — it corrupts funnel counts and makes the metric unmeasurable. +- Dual-emission of any North Star metric event is a **CRITICAL violation** — it corrupts funnel counts and makes the metric unmeasurable. If found: block approval and require removal of the client-side re-fire (server-side is the authoritative source when the API confirms the action). # Added: 2026-03-21 — Ozi Reorder Experiment +**Authenticated Route → Caller Cross-Verification** (required for every review): + +For every API route confirmed to require authentication: + +- Search all `fetch()`, `axios`, and `useSWR` calls in client components (`"use client"` files) targeting that route path. +- If any caller omits the `Authorization` header (or equivalent auth mechanism), flag as **CRITICAL**. +- A route auth fix without updating all callers is an incomplete fix — both sides must be verified in the same review pass. + +# Added: 2026-04-03 — MoneyMirror (issue-009) + +**Parent/Child Write Sequence** (required for every review): + +For every API route that writes a parent record followed by child records: + +- Verify the route cannot enter a success state (`processed`, `completed`, `201`) before child writes succeed. +- If parent status is set to a success terminal state before child insert completes, flag as **CRITICAL**. +- Verify that a child write failure either rolls back the parent or transitions it to a `failed` state — never silently logs and continues. + +# Added: 2026-04-03 — MoneyMirror (issue-009) + --- ## 5 Performance Risks Identify: -inefficient queries +inefficient queries (missing `.limit()`, N+1 patterns) large payloads -blocking operations +blocking operations in API routes + +**Client-side performance (for `"use client"` files only):** + +- Unnecessary re-renders +- Expensive calculations not wrapped in `useMemo` +- Stable callbacks not wrapped in `useCallback` when passed as props + +--- + +## 6 React / Hooks Review + +Applies only to files with `"use client"` directive. + +Check: + +- `useEffect` has cleanup where side effects persist (subscriptions, timers, event listeners) +- Dependency arrays are complete — no missing deps, no stale closures +- No patterns that cause infinite render loops --- -## 6 Suggested Improvements +## 7 Suggested Improvements Recommend improvements where needed. @@ -117,20 +176,31 @@ Return output using this structure. --- -Critical Issues +## Looks Clean -Architecture Violations +List items verified as correct. Not optional — confirms the review was thorough, not just a defect list. -Security Risks +- [item] -Performance Issues +--- -Code Quality Improvements +## Issues Found -Final Recommendation +For each issue: -Approve -Request Changes +**[SEVERITY]** `file:line` — description +Fix: specific suggested fix + +--- + +## Summary + +- Files reviewed: X +- CRITICAL issues: X +- HIGH issues: X +- MEDIUM issues: X +- LOW issues: X +- Recommendation: Approve / Request Changes --- @@ -140,7 +210,7 @@ Be strict. Prioritize user safety and system stability. -Never approve code with critical issues. +Never approve code with CRITICAL or HIGH issues unresolved. --- @@ -154,13 +224,8 @@ Responsibilities: - check code quality and maintainability - identify security issues - detect inefficient algorithms +- verify production readiness and React/Hooks correctness The agent must assume the implementation may contain mistakes. It should challenge assumptions and highlight potential failure points. - -Output format: - -Issues -Severity -Suggested Fix diff --git a/agents/qa-agent.md b/agents/qa-agent.md index 8af3bcc..835b086 100644 --- a/agents/qa-agent.md +++ b/agents/qa-agent.md @@ -116,6 +116,26 @@ Verify: # Added: 2026-03-21 — Ozi Reorder Experiment +**Env Var Key Name Cross-Check** (standalone QA dimension — required for all projects): + +Perform a grep-based audit to verify `.env.local.example` exactly matches the source code: + +```bash +grep -r 'process\.env\.' src/ | grep -oP 'process\.env\.\K[A-Z_]+' | sort -u +``` + +Compare the output against every key listed in `.env.local.example`. + +Verify: + +1. Every key used in source code appears in `.env.local.example`. +2. Every key name matches exactly — no `NEXT_PUBLIC_` prefix added or removed relative to source usage. +3. If any key in source is absent from the example file, or any name diverges, this is a **blocking QA finding** — env var mismatches cause silent production failures that are nearly impossible to debug from error logs alone. + +Note: Pay special attention to server-side telemetry keys (PostHog, Sentry). A `NEXT_PUBLIC_` prefix on a server-only key leaks it to the browser bundle; a missing prefix means server-side clients read `undefined`. + +# Added: 2026-04-03 — MoneyMirror (issue-009) + --- ## 4 Performance Testing diff --git a/apps/money-mirror/.gitignore b/apps/money-mirror/.gitignore new file mode 100644 index 0000000..dd146b5 --- /dev/null +++ b/apps/money-mirror/.gitignore @@ -0,0 +1,44 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.* +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/versions + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.pnpm-debug.log* + +# env files (can opt-in for committing if needed) +.env* + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts + +# Sentry Config File +.env.sentry-build-plugin diff --git a/apps/money-mirror/AGENTS.md b/apps/money-mirror/AGENTS.md new file mode 100644 index 0000000..c153a9b --- /dev/null +++ b/apps/money-mirror/AGENTS.md @@ -0,0 +1,7 @@ + + +# This is NOT the Next.js you know + +This version has breaking changes — APIs, conventions, and file structure may all differ from your training data. Read the relevant guide in `node_modules/next/dist/docs/` before writing any code. Heed deprecation notices. + + diff --git a/apps/money-mirror/CLAUDE.md b/apps/money-mirror/CLAUDE.md new file mode 100644 index 0000000..43c994c --- /dev/null +++ b/apps/money-mirror/CLAUDE.md @@ -0,0 +1 @@ +@AGENTS.md diff --git a/apps/money-mirror/CODEBASE-CONTEXT.md b/apps/money-mirror/CODEBASE-CONTEXT.md new file mode 100644 index 0000000..536febd --- /dev/null +++ b/apps/money-mirror/CODEBASE-CONTEXT.md @@ -0,0 +1,71 @@ +# Codebase Context: MoneyMirror + +Last updated: 2026-04-03 + +## What This App Does + +MoneyMirror is a mobile-first PWA AI financial coach for Gen Z Indians (₹20K–₹80K/month). Users sign in with Neon Auth email OTP, upload a password-free Indian bank-account or credit-card statement PDF, and Gemini 2.5 Flash parses and categorizes each transaction into needs/wants/investment/debt/other. The "Mirror moment" reveals the gap between self-reported spend from onboarding and actual spend from the statement. An advisory engine fires up to 5 consequence-first nudges, and a weekly recap email is sent by a Vercel cron fan-out every Monday at 8:00 AM IST. The primary North Star proxy is second-month statement upload rate (≥60%). + +## Architecture Overview + +- **Frontend**: Next.js 16 App Router (RSC by default, `"use client"` for interactive panels). Key pages: `/` (landing), `/onboarding` (5-question flow), `/score` (Money Health Score reveal), `/dashboard` (Mirror + advisory feed + upload). +- **Backend**: Next.js API routes under `src/app/api/`. Neon Auth for session auth, Neon Postgres for persistence, Gemini 2.5 Flash for PDF parse + categorization, Resend for weekly recap emails, PostHog for server-side telemetry. +- **Database**: Neon Postgres. 4 tables: `profiles`, `statements`, `transactions`, `advisory_feed`. `profiles` persists monthly income and perceived spend; `statements` now tracks `institution_name`, `statement_type`, and optional credit-card due metadata. All monetary values are stored as `BIGINT` in paisa (₹ × 100) to avoid float precision errors. +- **AI Integration**: Gemini 2.5 Flash via `@google/genai`. Used for: (1) PDF text → structured bank-account or credit-card statement JSON, (2) transaction category normalization. The statement-parse route currently enforces a 25s timeout and returns JSON 504 on timeout. +- **Analytics**: PostHog (server-side only, `posthog-node`). 10 events tracked: `onboarding_completed`, `statement_parse_started/rate_limited/success/timeout/failed`, `weekly_recap_triggered/completed`, `weekly_recap_email_sent/failed`. All calls fire-and-forget (`.catch(() => {})`). + +## Key Files + +| File | Purpose | +| -------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `src/app/api/statement/parse/route.ts` | Core pipeline: PDF upload → statement-type-aware Gemini parse → DB persist → advisory generation. Fail-closed: deletes parent statement row if transactions insert fails. | +| `src/app/api/statement/parse/persist-statement.ts` | Extracted helper: writes statements + transactions atomically; returns failure if child insert fails. | +| `src/app/api/dashboard/route.ts` | Authenticated GET — rehydrates the latest processed statement + transactions + advisory feed from DB. Called on every dashboard first-load (refresh, deep link, email CTA). | +| `src/app/api/dashboard/advisories/route.ts` | Authenticated GET — returns advisory_feed rows for the current user via the active Neon session cookie. | +| `src/app/api/cron/weekly-recap/route.ts` | Master cron: scheduled GET entrypoint for Vercel Cron; accepts Bearer `CRON_SECRET` or local `x-cron-secret`, paginates users in 1000-row batches, and fans out to the worker via Promise.allSettled. | +| `src/app/api/cron/weekly-recap/worker/route.ts` | Worker: sends Resend email per user. Returns HTTP 502 on failure so master counts it correctly. | +| `src/lib/advisory-engine.ts` | Fires 5 advisory types based on spend ratios and thresholds. Writes to `advisory_feed` table. | +| `src/lib/scoring.ts` | Computes Money Health Score (0–100) from 5 onboarding question responses. | +| `src/lib/statements.ts` | Defines statement types, parser prompts, metadata validation, and shared display labels for bank-account and credit-card uploads. | +| `src/lib/pdf-parser.ts` | Extracts raw text from PDF buffer using `pdf-parse`. Uses `result.total` (not `result.pages?.length`) for page count — v2 API. | +| `src/lib/posthog.ts` | Server-side PostHog singleton. Reads `POSTHOG_KEY` and `POSTHOG_HOST` (server-only, no `NEXT_PUBLIC_` prefix). | + +## Data Model + +- **profiles**: One row per user. `id` = Neon Auth user id (TEXT). Stores `monthly_income_paisa`, `perceived_spend_paisa`, `target_savings_rate`, `money_health_score`. +- **statements**: One per uploaded PDF. Tracks `institution_name`, `statement_type` (`bank_account` or `credit_card`), statement period, optional card due metadata, and `status`. Status never set to `processed` before `transactions` child insert succeeds. +- **transactions**: Many per statement. All amounts in paisa (BIGINT). `category` CHECK: `needs | wants | investment | debt | other` (lowercase). +- **advisory_feed**: Advisory nudges generated per statement. `trigger` identifies which advisory type fired. + +## API Endpoints + +| Method | Path | Auth | Purpose | +| ------ | ------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| POST | `/api/statement/parse` | Neon session cookie | Upload PDF plus `statement_type`, parse with Gemini, persist to DB, return mirror data | +| GET | `/api/dashboard` | Neon session cookie | Rehydrate latest processed statement + advisory feed (refresh/deep link path) | +| GET | `/api/dashboard/advisories` | Neon session cookie | Fetch advisory_feed rows for user | +| POST | `/api/onboarding/complete` | Neon session cookie | Save onboarding income, score, and perceived spend to profiles | +| GET | `/api/cron/weekly-recap` | `authorization: Bearer ` or local `x-cron-secret` | Scheduled master fan-out | +| POST | `/api/cron/weekly-recap/worker` | `x-cron-secret` header | Worker: send one recap email; returns 502 on failure | +| ALL | `/api/auth/[...path]` | — | Neon Auth passthrough | + +## Things NOT to Change Without Reading First + +1. **`pdf-parser.ts` uses `result.total` for page count** — not `result.pages?.length`. The `pdf-parse` v2 API changed this property. Do not revert to `result.numpages` or `result.pages?.length`. +2. **`persist-statement.ts` is fail-closed** — if the transactions bulk insert fails, it rolls back the statement insert and returns an error. Do not add a try/catch that swallows the child insert failure and allows the parent to stay as `processed`. +3. **`posthog.ts` reads `POSTHOG_KEY` and `POSTHOG_HOST`** — server-only (no `NEXT_PUBLIC_` prefix). Adding the prefix would leak the key to the browser bundle and also break the server-side client which reads the non-prefixed var. +4. **Dashboard hydration depends on the active Neon session cookie** — any refactor of dashboard data loading must preserve authenticated session-based access across `/api/dashboard` and related reads. +5. **Weekly recap worker returns HTTP 502 on email failure** — master uses HTTP status, not JSON body, to count failures. Do not change the worker to return 200 with `{ ok: false }`. +6. **All monetary values are stored in paisa (BIGINT)** — divide by 100 to display as rupees. Never store or compute in rupees directly. +7. **Ownership is enforced in route handlers** — this app no longer uses Supabase RLS or service-role patterns. + +## Known Limitations + +- Statement history browsing not yet implemented — dashboard always shows the latest processed statement. `GET /api/dashboard` accepts `?statement_id=` as a future extension point. +- Password removal stays manual outside the app. Password-protected PDFs are rejected with a clear retry message. +- Inbox ingestion from email is not implemented. Users must manually download the PDF and upload it. +- PDF parsing reliability depends on the PDF being text-based (not scanned/image). Scanned PDFs return 400. +- Rate limit for uploads is 3/day per user (in-memory, resets on server restart) — not durable across deployments. +- Weekly recap email only triggers if the user has at least one processed statement. New users without statements are silently skipped. +- Share button (`navigator.share`) is hidden on desktop browsers — only rendered when Web Share API is available. +- Current automated validation count is 45 tests across route and library coverage. diff --git a/apps/money-mirror/README.md b/apps/money-mirror/README.md new file mode 100644 index 0000000..e410515 --- /dev/null +++ b/apps/money-mirror/README.md @@ -0,0 +1,215 @@ +# MoneyMirror + +AI-powered personal finance coach for Gen Z Indians that uses Neon Auth plus Neon Postgres to parse Indian bank-account and credit-card statements, reveal the perception gap between perceived and actual spend, and deliver consequence-first nudges. + +**Sign in with your email, upload a statement, and get a brutally honest view of where your money actually goes.** + +## What it does + +1. User signs in with Neon Auth email OTP and lands in a private onboarding flow. +2. User completes 5 onboarding questions and the app calculates a Money Health Score. +3. User uploads a password-free bank-account or credit-card statement PDF and Gemini extracts and categorizes transactions entirely in memory. +4. Dashboard hydrates the latest processed statement and shows the Mirror Moment plus advisory cards. +5. Every Monday at 8:00 AM IST, a Vercel cron fan-out sends a weekly recap email to each eligible user via Resend. + +## Stack + +| Layer | Technology | +| --------- | ------------------------------------------ | +| Frontend | Next.js 16, TypeScript, Tailwind CSS 4 | +| Backend | Next.js App Router route handlers | +| Auth | Neon Auth (`@neondatabase/auth`) | +| Database | Neon Postgres (`@neondatabase/serverless`) | +| AI | Google Gemini 2.5 Flash | +| Analytics | PostHog (`posthog-node`) | +| Email | Resend | +| Hosting | Vercel | + +## Setup + +### 1. Install dependencies + +```bash +cd apps/money-mirror +npm install +``` + +### 2. Configure environment variables + +```bash +cp .env.local.example .env.local +``` + +Fill in these values: + +| Variable | Required | Description | +| ------------------------- | -------- | ------------------------------------------------------------------- | +| `DATABASE_URL` | Yes | Neon Postgres connection string | +| `NEON_AUTH_BASE_URL` | Yes | Base URL for your Neon Auth project | +| `NEON_AUTH_COOKIE_SECRET` | No | Optional only if Neon explicitly gives one for your project/runtime | +| `GEMINI_API_KEY` | Yes | Google AI Studio API key | +| `RESEND_API_KEY` | Yes | Resend API key | +| `POSTHOG_KEY` | Yes | Server-side PostHog key | +| `POSTHOG_HOST` | Yes | PostHog host URL | +| `NEXT_PUBLIC_APP_URL` | Yes | Public app URL used in recap links | +| `CRON_SECRET` | Yes | Shared secret for cron routes | +| `NEXT_PUBLIC_SENTRY_DSN` | Yes | Sentry DSN | +| `SENTRY_AUTH_TOKEN` | Yes | Sentry auth token | +| `SENTRY_ORG` | Yes | Sentry org slug | +| `SENTRY_PROJECT` | Yes | Sentry project slug | +| `CI` | No | Optional CI build flag | + +### 3. Create Neon project and enable Neon Auth + +1. Create a Neon project. +2. Enable Neon Auth for the project. +3. Configure email OTP delivery in Neon Auth. +4. Copy the Postgres connection string into `DATABASE_URL`. +5. Copy the Neon Auth base URL into `NEON_AUTH_BASE_URL`. +6. Only set `NEON_AUTH_COOKIE_SECRET` if Neon explicitly provides that value during auth setup. + +### 4. Apply database schema + +Run the full contents of [`schema.sql`](/Users/vijaysehgal/Downloads/02-Portfolio/ai-product-os/apps/money-mirror/schema.sql) against your Neon database. + +Tables created: + +- `profiles` +- `statements` +- `transactions` +- `advisory_feed` + +Indexes created: + +- `idx_statements_user_created_at` +- `idx_transactions_user_statement` +- `idx_advisory_feed_user_created_at` + +### 5. Run locally + +```bash +npm run dev +``` + +Open `http://localhost:3000`. + +Success looks like: + +- landing page loads +- `/login` can send an email OTP +- successful sign-in reaches onboarding +- dashboard loads after uploading a PDF + +First-run failure looks like: + +- `Error: DATABASE_URL is required.` +- `Error: NEON_AUTH_BASE_URL is required` + +## API + +### `POST /api/onboarding/complete` + +Persists the money health score and perceived spend for the authenticated user. + +**Auth**: Neon Auth session cookie required. + +**Body**: + +```json +{ + "money_health_score": 62, + "perceived_spend_paisa": 2500000 +} +``` + +**Returns**: + +```json +{ "ok": true } +``` + +### `POST /api/statement/parse` + +Accepts `multipart/form-data` with a `file` field and optional `statement_type`, extracts statement data, categorizes transactions, and persists a processed statement. + +**Auth**: Neon Auth session cookie required. + +**Returns**: + +```json +{ + "statement_id": "uuid", + "institution_name": "Kotak Mahindra Bank", + "statement_type": "bank_account", + "period_start": "2026-03-01", + "period_end": "2026-03-31", + "transaction_count": 47, + "summary": { + "needs_paisa": 850000, + "wants_paisa": 1200000, + "investment_paisa": 400000, + "debt_paisa": 300000, + "other_paisa": 50000, + "total_debits_paisa": 2800000, + "total_credits_paisa": 5000000 + } +} +``` + +### `GET /api/dashboard?statement_id=` + +Returns the full dashboard state for the authenticated user. + +**Auth**: Neon Auth session cookie required. + +### `GET /api/dashboard/advisories?statement_id=` + +Returns the advisory subset for the authenticated user and statement. + +**Auth**: Neon Auth session cookie required. + +### `GET /api/cron/weekly-recap` + +Fan-out master route that finds all users with processed statements and triggers worker jobs. This is the scheduled entrypoint configured in [`vercel.json`](/Users/vijaysehgal/Downloads/02-Portfolio/ai-product-os/apps/money-mirror/vercel.json). + +**Auth**: `authorization: Bearer ` from Vercel Cron. Local/manual triggering may also use `x-cron-secret: `. + +### `POST /api/cron/weekly-recap/worker` + +Sends one recap email for one user. + +**Auth**: `x-cron-secret: ` + +**Body**: + +```json +{ "userId": "user-id" } +``` + +## Analytics + +| Event | Where | Properties | +| ------------------------------ | ---------------------------------------------- | ------------------------------------------------------------------------------------- | +| `onboarding_completed` | `/api/onboarding/complete` | `money_health_score`, `perceived_spend_paisa` | +| `statement_parse_started` | `/api/statement/parse` | `pdf_text_length` | +| `statement_parse_rate_limited` | `/api/statement/parse` | `uploads_today`, `limit` | +| `statement_parse_success` | `/api/statement/parse` | `latency_ms`, `transaction_count`, `period_start`, `period_end`, `total_debits_paisa` | +| `statement_parse_timeout` | `/api/statement/parse` | `timeout_ms` | +| `statement_parse_failed` | `/api/statement/parse` and persistence helpers | `error_type`, optional context | +| `weekly_recap_triggered` | `/api/cron/weekly-recap` | `user_count` | +| `weekly_recap_completed` | `/api/cron/weekly-recap` | `total`, `succeeded`, `failed` | +| `weekly_recap_email_sent` | `/api/cron/weekly-recap/worker` | `period_start`, `period_end`, `total_debits_paisa` | +| `weekly_recap_email_failed` | `/api/cron/weekly-recap/worker` | `error` | + +## Key design decisions + +- **Neon-only boundary**: auth and database infrastructure now live on Neon so the app does not depend on Supabase-specific auth, RLS, or admin APIs. +- **Server-enforced ownership**: row ownership is checked in route handlers by authenticated `user.id`, replacing Postgres RLS. +- **Email-based identity**: user email is persisted in `profiles` so recap jobs do not need auth-admin lookups at send time. +- **Single-transaction statement persistence**: statement row creation, transaction inserts, and status finalization run in one Neon transaction to avoid partial writes. +- **Zero-retention PDF parsing**: uploaded PDFs are processed in memory and nulled immediately after text extraction. + +## Current scope + +- Shipped now: email OTP sign-in, onboarding score, multi-bank bank-account parsing, credit-card parsing, dashboard rehydration, 5 advisory triggers, weekly recap email. +- Not shipped in the current app: inbox ingestion from email, WhatsApp/WATI delivery, gamification, Warikoo Priority Ladder goal gating. diff --git a/apps/money-mirror/__tests__/api/onboarding-complete.test.ts b/apps/money-mirror/__tests__/api/onboarding-complete.test.ts new file mode 100644 index 0000000..3fd1630 --- /dev/null +++ b/apps/money-mirror/__tests__/api/onboarding-complete.test.ts @@ -0,0 +1,101 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest'; +import { NextRequest } from 'next/server'; + +const mockGetSessionUser = vi.fn(); +const mockUpsertProfileOnboarding = vi.fn(); +const mockCaptureServerEvent = vi.fn(); + +vi.mock('@/lib/auth/session', () => ({ + getSessionUser: mockGetSessionUser, +})); + +vi.mock('@/lib/db', () => ({ + upsertProfileOnboarding: mockUpsertProfileOnboarding, +})); + +vi.mock('@/lib/posthog', () => ({ + captureServerEvent: mockCaptureServerEvent, +})); + +async function getRoute() { + const mod = await import('@/app/api/onboarding/complete/route'); + return mod.POST; +} + +function makeRequest(body: Record) { + return new NextRequest('http://localhost/api/onboarding/complete', { + method: 'POST', + body: JSON.stringify(body), + headers: { + 'content-type': 'application/json', + }, + }); +} + +describe('POST /api/onboarding/complete', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockGetSessionUser.mockResolvedValue({ + id: 'user-123', + email: 'vijay@example.com', + name: 'Vijay', + }); + mockUpsertProfileOnboarding.mockResolvedValue(undefined); + mockCaptureServerEvent.mockResolvedValue(undefined); + }); + + it('returns 200 and emits telemetry after a successful profile write', async () => { + const POST = await getRoute(); + const res = await POST( + makeRequest({ + monthly_income_paisa: 6000000, + money_health_score: 62, + perceived_spend_paisa: 2500000, + }) + ); + const body = await res.json(); + + expect(res.status).toBe(200); + expect(body).toEqual({ ok: true }); + expect(mockUpsertProfileOnboarding).toHaveBeenCalledOnce(); + expect(mockCaptureServerEvent).toHaveBeenCalledWith('user-123', 'onboarding_completed', { + monthly_income_paisa: 6000000, + money_health_score: 62, + perceived_spend_paisa: 2500000, + }); + }); + + it('returns 500 and does not emit success telemetry when persistence fails', async () => { + mockUpsertProfileOnboarding.mockRejectedValueOnce(new Error('db down')); + + const POST = await getRoute(); + const res = await POST( + makeRequest({ + monthly_income_paisa: 5000000, + money_health_score: 55, + perceived_spend_paisa: 1800000, + }) + ); + const body = await res.json(); + + expect(res.status).toBe(500); + expect(body).toEqual({ error: 'Failed to save onboarding progress.' }); + expect(mockCaptureServerEvent).not.toHaveBeenCalled(); + }); + + it('returns 401 when the user is not authenticated', async () => { + mockGetSessionUser.mockResolvedValueOnce(null); + + const POST = await getRoute(); + const res = await POST( + makeRequest({ + monthly_income_paisa: 5000000, + money_health_score: 55, + perceived_spend_paisa: 0, + }) + ); + + expect(res.status).toBe(401); + expect(mockUpsertProfileOnboarding).not.toHaveBeenCalled(); + }); +}); diff --git a/apps/money-mirror/__tests__/api/parse.test.ts b/apps/money-mirror/__tests__/api/parse.test.ts new file mode 100644 index 0000000..d224c93 --- /dev/null +++ b/apps/money-mirror/__tests__/api/parse.test.ts @@ -0,0 +1,242 @@ +/** + * Tests for POST /api/statement/parse + * + * Strategy: mock Neon auth session, Neon DB helpers, Gemini, pdf-parser, and PostHog. + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { NextRequest } from 'next/server'; + +vi.mock('@/lib/pdf-parser', () => ({ + extractPdfText: vi.fn().mockResolvedValue({ + text: '01/03/2026 SWIGGY 450.00 Dr\n02/03/2026 SALARY 50000.00 Cr', + pageCount: 1, + }), + PdfExtractionError: class PdfExtractionError extends Error { + code: string; + constructor(message: string, code: string) { + super(message); + this.code = code; + } + }, +})); + +vi.mock('@/lib/posthog', () => ({ + captureServerEvent: vi.fn().mockResolvedValue(undefined), +})); + +const mockGetSessionUser = vi.fn(); +vi.mock('@/lib/auth/session', () => ({ + getSessionUser: mockGetSessionUser, +})); + +const mockCountUserStatementsSince = vi.fn(); +const mockEnsureProfile = vi.fn(); +vi.mock('@/lib/db', () => ({ + countUserStatementsSince: mockCountUserStatementsSince, + ensureProfile: mockEnsureProfile, +})); + +const mockPersistStatement = vi.fn(); +vi.mock('@/app/api/statement/parse/persist-statement', () => ({ + persistStatement: mockPersistStatement, +})); + +const mockGenerateContent = vi.fn(); +vi.mock('@google/genai', () => ({ + GoogleGenAI: vi.fn(function () { + return { + models: { + generateContent: mockGenerateContent, + }, + }; + }), +})); + +async function getRoute() { + const mod = await import('@/app/api/statement/parse/route'); + return mod.POST; +} + +function makeRequest(file: File) { + const formData = new FormData(); + formData.append('file', file); + formData.append('statement_type', 'bank_account'); + return new NextRequest('http://localhost/api/statement/parse', { + method: 'POST', + body: formData, + }); +} + +function makeRequestWithStatementType(file: File, statementType: 'bank_account' | 'credit_card') { + const formData = new FormData(); + formData.append('file', file); + formData.append('statement_type', statementType); + return new NextRequest('http://localhost/api/statement/parse', { + method: 'POST', + body: formData, + }); +} + +function makePdfFile(name: string, size: number) { + return new File([new Uint8Array(size)], name, { type: 'application/pdf' }); +} + +describe('POST /api/statement/parse', () => { + beforeEach(() => { + vi.clearAllMocks(); + + mockGetSessionUser.mockResolvedValue({ + id: 'user-123', + email: 'vijay@example.com', + name: 'Vijay', + }); + mockEnsureProfile.mockResolvedValue(undefined); + mockCountUserStatementsSince.mockResolvedValue(0); + mockPersistStatement.mockResolvedValue({ + statement_id: 'stmt-abc', + }); + mockGenerateContent.mockResolvedValue({ + candidates: [ + { + content: { + parts: [ + { + text: JSON.stringify({ + institution_name: 'HDFC Bank', + transactions: [ + { date: '2026-03-01', description: 'SWIGGY', amount: 450.0, type: 'debit' }, + { + date: '2026-03-01', + description: 'SALARY CREDIT', + amount: 50000.0, + type: 'credit', + }, + ], + period_start: '2026-03-01', + period_end: '2026-03-31', + }), + }, + ], + }, + }, + ], + }); + }); + + it('returns 200 with summary on valid PDF upload', async () => { + const POST = await getRoute(); + const req = makeRequest(makePdfFile('statement.pdf', 1024)); + const res = await POST(req); + const body = await res.json(); + + expect(res.status).toBe(200); + expect(body).toMatchObject({ + statement_id: 'stmt-abc', + institution_name: 'HDFC Bank', + statement_type: 'bank_account', + period_start: '2026-03-01', + period_end: '2026-03-31', + transaction_count: 2, + summary: expect.objectContaining({ + total_debits_paisa: expect.any(Number), + total_credits_paisa: expect.any(Number), + }), + }); + }); + + it('returns 401 when there is no authenticated Neon session', async () => { + mockGetSessionUser.mockResolvedValueOnce(null); + + const POST = await getRoute(); + const res = await POST(makeRequest(makePdfFile('statement.pdf', 1024))); + + expect(res.status).toBe(401); + }); + + it('returns 400 for non-PDF MIME type', async () => { + const POST = await getRoute(); + const txtFile = new File(['hello'], 'statement.txt', { type: 'text/plain' }); + const res = await POST(makeRequest(txtFile)); + expect(res.status).toBe(400); + }); + + it('returns 504 when Gemini times out', async () => { + const POST = await getRoute(); + mockGenerateContent.mockRejectedValueOnce(new Error('GEMINI_TIMEOUT')); + const res = await POST(makeRequest(makePdfFile('statement.pdf', 1024))); + expect(res.status).toBe(504); + }); + + it('returns 500 when persistence fails', async () => { + mockPersistStatement.mockResolvedValueOnce({ + statement_id: '', + error: 'Failed to save statement data.', + }); + + const POST = await getRoute(); + const res = await POST(makeRequest(makePdfFile('statement.pdf', 1024))); + const body = await res.json(); + + expect(res.status).toBe(500); + expect(body).toMatchObject({ + error: 'Failed to save statement data.', + }); + }); + + it('returns 200 for a credit-card statement and exposes card metadata', async () => { + mockGenerateContent.mockResolvedValueOnce({ + candidates: [ + { + content: { + parts: [ + { + text: JSON.stringify({ + institution_name: 'SBI Card', + period_start: '2026-03-01', + period_end: '2026-03-31', + due_date: '2026-04-18', + payment_due: 12999.0, + minimum_due: 1200.0, + credit_limit: 150000.0, + transactions: [ + { + date: '2026-03-02', + description: 'SWIGGY', + amount: 450.0, + type: 'debit', + entry_kind: 'purchase', + }, + { + date: '2026-03-05', + description: 'PAYMENT RECEIVED', + amount: 5000.0, + type: 'credit', + entry_kind: 'payment', + }, + ], + }), + }, + ], + }, + }, + ], + }); + + const POST = await getRoute(); + const res = await POST( + makeRequestWithStatementType(makePdfFile('credit-card.pdf', 1024), 'credit_card') + ); + const body = await res.json(); + + expect(res.status).toBe(200); + expect(body).toMatchObject({ + institution_name: 'SBI Card', + statement_type: 'credit_card', + due_date: '2026-04-18', + payment_due_paisa: 1299900, + minimum_due_paisa: 120000, + credit_limit_paisa: 15000000, + }); + }); +}); diff --git a/apps/money-mirror/__tests__/api/weekly-recap.test.ts b/apps/money-mirror/__tests__/api/weekly-recap.test.ts new file mode 100644 index 0000000..e97ddc6 --- /dev/null +++ b/apps/money-mirror/__tests__/api/weekly-recap.test.ts @@ -0,0 +1,100 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest'; +import { NextRequest } from 'next/server'; + +const mockListEligibleWeeklyRecapUsers = vi.fn(); +const mockCaptureServerEvent = vi.fn(); + +vi.mock('@/lib/db', () => ({ + listEligibleWeeklyRecapUsers: mockListEligibleWeeklyRecapUsers, +})); + +vi.mock('@/lib/posthog', () => ({ + captureServerEvent: mockCaptureServerEvent, +})); + +async function getRoute() { + const mod = await import('@/app/api/cron/weekly-recap/route'); + return { GET: mod.GET, POST: mod.POST }; +} + +function makeGetRequest(headers: Record = {}) { + return new NextRequest('http://localhost/api/cron/weekly-recap', { + method: 'GET', + headers, + }); +} + +describe('/api/cron/weekly-recap', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockListEligibleWeeklyRecapUsers.mockReset(); + mockCaptureServerEvent.mockReset(); + vi.stubEnv('CRON_SECRET', 'test-secret'); + mockListEligibleWeeklyRecapUsers + .mockResolvedValueOnce(['user-1', 'user-2']) + .mockResolvedValueOnce([]); + mockCaptureServerEvent.mockResolvedValue(undefined); + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + json: vi.fn().mockResolvedValue({ ok: true }), + }) + ); + }); + + it('returns 401 when the cron request is unauthorized', async () => { + const { GET } = await getRoute(); + const res = await GET(makeGetRequest()); + + expect(res.status).toBe(401); + }); + + it('accepts the Vercel cron GET contract with bearer auth', async () => { + const { GET } = await getRoute(); + const res = await GET(makeGetRequest({ authorization: 'Bearer test-secret' })); + const body = await res.json(); + + expect(res.status).toBe(200); + expect(body).toEqual({ ok: true, total: 2, succeeded: 2, failed: 0 }); + expect(fetch).toHaveBeenCalledTimes(2); + }); + + it('counts worker failures correctly during fan-out', async () => { + vi.stubGlobal( + 'fetch', + vi + .fn() + .mockResolvedValueOnce({ + ok: true, + status: 200, + json: vi.fn().mockResolvedValue({ ok: true }), + }) + .mockResolvedValueOnce({ + ok: false, + status: 502, + json: vi.fn().mockResolvedValue({ ok: false }), + }) + ); + + const { POST } = await getRoute(); + const res = await POST( + new NextRequest('http://localhost/api/cron/weekly-recap', { + method: 'POST', + headers: { + 'x-cron-secret': 'test-secret', + }, + }) + ); + const body = await res.json(); + + expect(res.status).toBe(200); + expect(body).toEqual({ ok: true, total: 2, succeeded: 1, failed: 1 }); + expect(mockCaptureServerEvent).toHaveBeenLastCalledWith('system', 'weekly_recap_completed', { + total: 2, + succeeded: 1, + failed: 1, + }); + }); +}); diff --git a/apps/money-mirror/__tests__/lib/categorizer.test.ts b/apps/money-mirror/__tests__/lib/categorizer.test.ts new file mode 100644 index 0000000..a7d78c3 --- /dev/null +++ b/apps/money-mirror/__tests__/lib/categorizer.test.ts @@ -0,0 +1,175 @@ +/** + * Tests for categorizer.ts + */ + +import { describe, it, expect } from 'vitest'; +import { + categorizeCreditCardTransaction, + categorizeTransaction, + summarizeByCategory, + type CategorizedTransaction, +} from '@/lib/categorizer'; + +const debit = (desc: string, amount = 50000) => + categorizeTransaction(desc, amount, '2026-03-15', 'debit'); + +const credit = (desc: string, amount = 100000) => + categorizeTransaction(desc, amount, '2026-03-01', 'credit'); + +describe('categorizeTransaction', () => { + // ── Needs ────────────────────────────────────────────────── + it('categorizes Zepto as needs', () => { + expect(debit('ZEPTO').category).toBe('needs'); + }); + + it('categorizes Blinkit as needs', () => { + expect(debit('BLINKIT').category).toBe('needs'); + }); + + it('categorizes Uber as needs', () => { + expect(debit('UBER TRIP').category).toBe('needs'); + }); + + it('categorizes rent payment as needs', () => { + expect(debit('RENT PAYMENT APR').category).toBe('needs'); + }); + + // ── Wants ────────────────────────────────────────────────── + it('categorizes Swiggy as wants', () => { + expect(debit('SWIGGY').category).toBe('wants'); + }); + + it('categorizes Netflix as wants', () => { + expect(debit('NETFLIX SUBSCRIPTION').category).toBe('wants'); + }); + + it('categorizes Myntra as wants', () => { + expect(debit('MYNTRA FASHION').category).toBe('wants'); + }); + + // ── Investment ───────────────────────────────────────────── + it('categorizes SIP as investment', () => { + expect(debit('SIP AXIS BLUECHIP').category).toBe('investment'); + }); + + it('categorizes Groww as investment', () => { + expect(debit('GROWW MUTUAL FUND').category).toBe('investment'); + }); + + // ── Debt ─────────────────────────────────────────────────── + it('categorizes EMI as debt', () => { + expect(debit('EMI HDFC BANK').category).toBe('debt'); + }); + + it('categorizes BNPL as debt', () => { + expect(debit('LAZYPAY REPAYMENT').category).toBe('debt'); + }); + + // ── Credits always other ──────────────────────────────────── + it('marks salary credit as other regardless of keyword', () => { + const result = credit('SALARY JANUARY'); + expect(result.category).toBe('other'); + expect(result.type).toBe('credit'); + }); + + // ── Recurring detection ───────────────────────────────────── + it('marks SIP as recurring', () => { + expect(debit('SIP NAVI MUTUAL').is_recurring).toBe(true); + }); + + it('does not mark one-off spend as recurring', () => { + expect(debit('SWIGGY ORDER').is_recurring).toBe(false); + }); + + // ── Priority: investment wins over needs keyword clash ────── + it('prioritizes investment over wants when both match (e.g. Groww)', () => { + // Groww could be considered digital but is investment + const result = debit('GROWW SIP'); + expect(result.category).toBe('investment'); + }); +}); + +describe('summarizeByCategory', () => { + const txns: CategorizedTransaction[] = [ + { + description: 'SWIGGY', + amount_paisa: 50000, + date: '2026-03-15', + type: 'debit', + category: 'wants', + is_recurring: false, + }, + { + description: 'RENT', + amount_paisa: 1500000, + date: '2026-03-01', + type: 'debit', + category: 'needs', + is_recurring: false, + }, + { + description: 'SIP', + amount_paisa: 500000, + date: '2026-03-05', + type: 'debit', + category: 'investment', + is_recurring: true, + }, + { + description: 'SALARY', + amount_paisa: 8000000, + date: '2026-03-01', + type: 'credit', + category: 'other', + is_recurring: false, + }, + ]; + + it('correctly sums debit buckets', () => { + const s = summarizeByCategory(txns); + expect(s.wants).toBe(50000); + expect(s.needs).toBe(1500000); + expect(s.investment).toBe(500000); + expect(s.total_debits).toBe(2050000); + }); + + it('correctly sums credits', () => { + const s = summarizeByCategory(txns); + expect(s.total_credits).toBe(8000000); + }); + + it('credits do not count toward debit buckets', () => { + const s = summarizeByCategory(txns); + expect(s.other).toBe(0); // credit "other" doesn't count toward debit total + }); +}); + +describe('categorizeCreditCardTransaction', () => { + it('treats card payments as credit without counting them as income', () => { + const result = categorizeCreditCardTransaction( + 'PAYMENT RECEIVED', + 500000, + '2026-03-10', + 'payment' + ); + expect(result.type).toBe('credit'); + expect(result.category).toBe('debt'); + }); + + it('treats refunds as non-income credits', () => { + const result = categorizeCreditCardTransaction('AMAZON REFUND', 120000, '2026-03-11', 'refund'); + expect(result.type).toBe('credit'); + expect(result.category).toBe('other'); + }); + + it('treats interest as debt', () => { + const result = categorizeCreditCardTransaction( + 'FINANCE CHARGES', + 25000, + '2026-03-12', + 'interest' + ); + expect(result.type).toBe('debit'); + expect(result.category).toBe('debt'); + }); +}); diff --git a/apps/money-mirror/__tests__/lib/pdf-parser.test.ts b/apps/money-mirror/__tests__/lib/pdf-parser.test.ts new file mode 100644 index 0000000..001f75d --- /dev/null +++ b/apps/money-mirror/__tests__/lib/pdf-parser.test.ts @@ -0,0 +1,74 @@ +/** + * T5a — [GREEN] Tests for pdf-parser.ts + * + * Mocks PDFParse class — we test OUR service contract. + * Uses vi.hoisted() to avoid TDZ reference errors from vi.mock hoisting. + */ + +// @vitest-environment node +import { describe, it, expect, vi, beforeEach } from 'vitest'; + +// ─── Hoisted mock fns ───────────────────────────────────────── +const { mockGetText, mockDestroy, MockPDFParse } = vi.hoisted(() => { + const mockGetText = vi.fn(); + const mockDestroy = vi.fn().mockResolvedValue(undefined); + function MockPDFParse(opts: unknown) { + void opts; + return { getText: mockGetText, destroy: mockDestroy }; + } + return { mockGetText, mockDestroy, MockPDFParse }; +}); + +vi.mock('pdf-parse', () => ({ + PDFParse: MockPDFParse, +})); + +import { extractPdfText, PdfExtractionError } from '@/lib/pdf-parser'; + +describe('extractPdfText', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockDestroy.mockResolvedValue(undefined); + }); + + it('throws EMPTY_FILE when buffer is empty', async () => { + await expect(extractPdfText(Buffer.alloc(0))).rejects.toMatchObject({ + code: 'EMPTY_FILE', + }); + }); + + it('throws PARSE_FAILED when getText throws', async () => { + mockGetText.mockRejectedValue(new Error('Invalid PDF structure')); + const garbage = Buffer.from('not a pdf', 'utf-8'); + await expect(extractPdfText(garbage)).rejects.toMatchObject({ + code: 'PARSE_FAILED', + }); + }); + + it('throws EMPTY_TEXT when PDF yields blank text', async () => { + mockGetText.mockResolvedValue({ text: ' ', total: 1 }); + const fakeBuffer = Buffer.from('fake pdf content'); + await expect(extractPdfText(fakeBuffer)).rejects.toMatchObject({ + code: 'EMPTY_TEXT', + }); + }); + + it('returns text and pageCount on success', async () => { + const fakeText = '01/03/26 SWIGGY 500.00 49500.00\n'; + mockGetText.mockResolvedValue({ text: fakeText, total: 3 }); + const fakeBuffer = Buffer.from('fake pdf content'); + + const result = await extractPdfText(fakeBuffer); + + expect(result.text).toBe(fakeText.trim()); + expect(result.pageCount).toBe(3); + }); + + it('returns a PdfExtractionError instance on failure', async () => { + try { + await extractPdfText(Buffer.alloc(0)); + } catch (e) { + expect(e).toBeInstanceOf(PdfExtractionError); + } + }); +}); diff --git a/apps/money-mirror/eslint.config.mjs b/apps/money-mirror/eslint.config.mjs new file mode 100644 index 0000000..05e726d --- /dev/null +++ b/apps/money-mirror/eslint.config.mjs @@ -0,0 +1,18 @@ +import { defineConfig, globalIgnores } from "eslint/config"; +import nextVitals from "eslint-config-next/core-web-vitals"; +import nextTs from "eslint-config-next/typescript"; + +const eslintConfig = defineConfig([ + ...nextVitals, + ...nextTs, + // Override default ignores of eslint-config-next. + globalIgnores([ + // Default ignores of eslint-config-next: + ".next/**", + "out/**", + "build/**", + "next-env.d.ts", + ]), +]); + +export default eslintConfig; diff --git a/apps/money-mirror/next.config.ts b/apps/money-mirror/next.config.ts new file mode 100644 index 0000000..544407d --- /dev/null +++ b/apps/money-mirror/next.config.ts @@ -0,0 +1,55 @@ +import type { NextConfig } from 'next'; +import { withSentryConfig } from '@sentry/nextjs'; + +const nextConfig: NextConfig = { + serverExternalPackages: ['pdf-parse'], + // Enable PWA headers + headers: async () => [ + { + source: '/(.*)', + headers: [ + { key: 'X-Content-Type-Options', value: 'nosniff' }, + { key: 'X-Frame-Options', value: 'DENY' }, + { key: 'X-XSS-Protection', value: '1; mode=block' }, + ], + }, + ], +}; + +export default withSentryConfig(nextConfig, { + // For all available options, see: + // https://www.npmjs.com/package/@sentry/webpack-plugin#options + + org: 'ai-product-os', + + project: 'javascript-nextjs', + + // Only print logs for uploading source maps in CI + silent: !process.env.CI, + + // For all available options, see: + // https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/ + + // Upload a larger set of source maps for prettier stack traces (increases build time) + widenClientFileUpload: true, + + // Uncomment to route browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers. + // This can increase your server load as well as your hosting bill. + // Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client- + // side errors will fail. + // tunnelRoute: "/monitoring", + + webpack: { + // Enables automatic instrumentation of Vercel Cron Monitors. (Does not yet work with App Router route handlers.) + // See the following for more information: + // https://docs.sentry.io/product/crons/ + // https://vercel.com/docs/cron-jobs + automaticVercelMonitors: true, + + // Tree-shaking options for reducing bundle size + treeshake: { + // Automatically tree-shake Sentry logger statements to reduce bundle size + removeDebugLogging: true, + }, + }, +}); diff --git a/apps/money-mirror/package.json b/apps/money-mirror/package.json new file mode 100644 index 0000000..7f1a7e5 --- /dev/null +++ b/apps/money-mirror/package.json @@ -0,0 +1,47 @@ +{ + "name": "money-mirror", + "version": "0.1.0", + "private": true, + "scripts": { + "dev": "next dev", + "build": "next build", + "start": "next start", + "lint": "eslint", + "test": "vitest run", + "test:watch": "vitest", + "test:coverage": "vitest run --coverage" + }, + "dependencies": { + "@google/genai": "^1.48.0", + "@neondatabase/auth": "^0.1.0-beta.21", + "@neondatabase/serverless": "^1.0.2", + "@sentry/nextjs": "^10.47.0", + "clsx": "^2.1.1", + "framer-motion": "^12.38.0", + "lucide-react": "^1.7.0", + "next": "16.2.2", + "pdf-parse": "^2.4.5", + "posthog-js": "^1.364.5", + "posthog-node": "^5.21.2", + "react": "19.2.4", + "react-dom": "19.2.4", + "resend": "^6.10.0", + "tailwind-merge": "^3.5.0" + }, + "devDependencies": { + "@tailwindcss/postcss": "^4", + "@testing-library/jest-dom": "^6.9.1", + "@testing-library/react": "^16.3.2", + "@types/node": "^20", + "@types/pdf-parse": "^1.1.5", + "@types/react": "^19", + "@types/react-dom": "^19", + "@vitest/coverage-v8": "^4.1.2", + "eslint": "^9", + "eslint-config-next": "16.2.2", + "jsdom": "^29.0.1", + "tailwindcss": "^4", + "typescript": "^5", + "vitest": "^4.1.2" + } +} diff --git a/apps/money-mirror/postcss.config.mjs b/apps/money-mirror/postcss.config.mjs new file mode 100644 index 0000000..61e3684 --- /dev/null +++ b/apps/money-mirror/postcss.config.mjs @@ -0,0 +1,7 @@ +const config = { + plugins: { + "@tailwindcss/postcss": {}, + }, +}; + +export default config; diff --git a/apps/money-mirror/proxy.ts b/apps/money-mirror/proxy.ts new file mode 100644 index 0000000..44a7b11 --- /dev/null +++ b/apps/money-mirror/proxy.ts @@ -0,0 +1,9 @@ +import { neonAuthMiddleware } from '@neondatabase/auth/next/server'; + +export default neonAuthMiddleware({ + loginUrl: '/login', +}); + +export const config = { + matcher: ['/dashboard/:path*', '/onboarding/:path*', '/score/:path*'], +}; diff --git a/apps/money-mirror/public/file.svg b/apps/money-mirror/public/file.svg new file mode 100644 index 0000000..004145c --- /dev/null +++ b/apps/money-mirror/public/file.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/apps/money-mirror/public/globe.svg b/apps/money-mirror/public/globe.svg new file mode 100644 index 0000000..567f17b --- /dev/null +++ b/apps/money-mirror/public/globe.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/apps/money-mirror/public/manifest.json b/apps/money-mirror/public/manifest.json new file mode 100644 index 0000000..612229f --- /dev/null +++ b/apps/money-mirror/public/manifest.json @@ -0,0 +1,13 @@ +{ + "name": "MoneyMirror", + "short_name": "MoneyMirror", + "description": "See the truth about your money. No sugar-coating.", + "start_url": "/", + "display": "standalone", + "background_color": "#080c10", + "theme_color": "#080c10", + "icons": [ + { "src": "/icon-192.png", "sizes": "192x192", "type": "image/png" }, + { "src": "/icon-512.png", "sizes": "512x512", "type": "image/png" } + ] +} diff --git a/apps/money-mirror/public/next.svg b/apps/money-mirror/public/next.svg new file mode 100644 index 0000000..5174b28 --- /dev/null +++ b/apps/money-mirror/public/next.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/apps/money-mirror/public/vercel.svg b/apps/money-mirror/public/vercel.svg new file mode 100644 index 0000000..7705396 --- /dev/null +++ b/apps/money-mirror/public/vercel.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/apps/money-mirror/public/window.svg b/apps/money-mirror/public/window.svg new file mode 100644 index 0000000..b2b2a44 --- /dev/null +++ b/apps/money-mirror/public/window.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/apps/money-mirror/schema.sql b/apps/money-mirror/schema.sql new file mode 100644 index 0000000..63749b0 --- /dev/null +++ b/apps/money-mirror/schema.sql @@ -0,0 +1,68 @@ +-- MoneyMirror Database Schema +-- Target: Neon Postgres + +CREATE EXTENSION IF NOT EXISTS pgcrypto; + +CREATE TABLE IF NOT EXISTS public.profiles ( + id TEXT PRIMARY KEY, + email TEXT NOT NULL UNIQUE, + monthly_income_paisa BIGINT, + perceived_spend_paisa BIGINT NOT NULL DEFAULT 0, + target_savings_rate INT NOT NULL DEFAULT 20, + money_health_score INT, + onboarded_at TIMESTAMPTZ +); + +CREATE TABLE IF NOT EXISTS public.statements ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + user_id TEXT NOT NULL REFERENCES public.profiles(id) ON DELETE CASCADE, + bank_name TEXT NOT NULL DEFAULT 'HDFC', + institution_name TEXT NOT NULL DEFAULT 'Unknown', + statement_type TEXT NOT NULL CHECK (statement_type IN ('bank_account', 'credit_card')) DEFAULT 'bank_account', + period_start DATE, + period_end DATE, + due_date DATE, + total_debits_paisa BIGINT NOT NULL DEFAULT 0, + total_credits_paisa BIGINT NOT NULL DEFAULT 0, + perceived_spend_paisa BIGINT NOT NULL DEFAULT 0, + payment_due_paisa BIGINT, + minimum_due_paisa BIGINT, + credit_limit_paisa BIGINT, + status TEXT NOT NULL CHECK (status IN ('processing', 'processed', 'failed')) DEFAULT 'processing', + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE TABLE IF NOT EXISTS public.transactions ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + statement_id UUID NOT NULL REFERENCES public.statements(id) ON DELETE CASCADE, + user_id TEXT NOT NULL REFERENCES public.profiles(id) ON DELETE CASCADE, + date DATE NOT NULL, + description TEXT NOT NULL, + amount_paisa BIGINT NOT NULL, + type TEXT NOT NULL CHECK (type IN ('debit', 'credit')), + category TEXT NOT NULL CHECK (category IN ('needs', 'wants', 'investment', 'debt', 'other')), + is_recurring BOOLEAN NOT NULL DEFAULT false, + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE TABLE IF NOT EXISTS public.advisory_feed ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + user_id TEXT NOT NULL REFERENCES public.profiles(id) ON DELETE CASCADE, + statement_id UUID REFERENCES public.statements(id) ON DELETE CASCADE, + trigger TEXT NOT NULL, + headline TEXT NOT NULL, + message TEXT NOT NULL, + severity TEXT NOT NULL CHECK (severity IN ('info', 'warning', 'critical')) DEFAULT 'info', + amount_paisa BIGINT, + is_read BOOLEAN NOT NULL DEFAULT false, + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_statements_user_created_at + ON public.statements(user_id, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_transactions_user_statement + ON public.transactions(user_id, statement_id); + +CREATE INDEX IF NOT EXISTS idx_advisory_feed_user_created_at + ON public.advisory_feed(user_id, created_at DESC); diff --git a/apps/money-mirror/sentry.client.config.ts b/apps/money-mirror/sentry.client.config.ts new file mode 100644 index 0000000..e2f3894 --- /dev/null +++ b/apps/money-mirror/sentry.client.config.ts @@ -0,0 +1,15 @@ +import * as Sentry from '@sentry/nextjs'; + +Sentry.init({ + dsn: process.env.NEXT_PUBLIC_SENTRY_DSN, + tracesSampleRate: 1.0, + debug: false, + replaysOnErrorSampleRate: 1.0, + replaysSessionSampleRate: 0.1, + integrations: [ + Sentry.replayIntegration({ + maskAllText: true, + blockAllMedia: true, + }), + ], +}); diff --git a/apps/money-mirror/sentry.edge.config.ts b/apps/money-mirror/sentry.edge.config.ts new file mode 100644 index 0000000..c6eec25 --- /dev/null +++ b/apps/money-mirror/sentry.edge.config.ts @@ -0,0 +1,20 @@ +// This file configures the initialization of Sentry for edge features (middleware, edge routes, and so on). +// The config you add here will be used whenever one of the edge features is loaded. +// Note that this config is unrelated to the Vercel Edge Runtime and is also required when running locally. +// https://docs.sentry.io/platforms/javascript/guides/nextjs/ + +import * as Sentry from '@sentry/nextjs'; + +Sentry.init({ + dsn: 'https://de65ad90374bba0827a8a24959f308fe@o4511154725060608.ingest.us.sentry.io/4511154738626560', + + // Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control. + tracesSampleRate: 1, + + // Enable logs to be sent to Sentry + enableLogs: true, + + // Enable sending user PII (Personally Identifiable Information) + // https://docs.sentry.io/platforms/javascript/guides/nextjs/configuration/options/#sendDefaultPii + sendDefaultPii: true, +}); diff --git a/apps/money-mirror/sentry.server.config.ts b/apps/money-mirror/sentry.server.config.ts new file mode 100644 index 0000000..9e7444a --- /dev/null +++ b/apps/money-mirror/sentry.server.config.ts @@ -0,0 +1,19 @@ +// This file configures the initialization of Sentry on the server. +// The config you add here will be used whenever the server handles a request. +// https://docs.sentry.io/platforms/javascript/guides/nextjs/ + +import * as Sentry from '@sentry/nextjs'; + +Sentry.init({ + dsn: 'https://de65ad90374bba0827a8a24959f308fe@o4511154725060608.ingest.us.sentry.io/4511154738626560', + + // Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control. + tracesSampleRate: 1, + + // Enable logs to be sent to Sentry + enableLogs: true, + + // Enable sending user PII (Personally Identifiable Information) + // https://docs.sentry.io/platforms/javascript/guides/nextjs/configuration/options/#sendDefaultPii + sendDefaultPii: true, +}); diff --git a/apps/money-mirror/src/app/api/auth/[...path]/route.ts b/apps/money-mirror/src/app/api/auth/[...path]/route.ts new file mode 100644 index 0000000..eb870d9 --- /dev/null +++ b/apps/money-mirror/src/app/api/auth/[...path]/route.ts @@ -0,0 +1,25 @@ +import { authApiHandler } from '@neondatabase/auth/next/server'; + +function getHandler() { + return authApiHandler(); +} + +export async function GET(request: Request, context: { params: Promise<{ path: string[] }> }) { + return getHandler().GET(request, context); +} + +export async function POST(request: Request, context: { params: Promise<{ path: string[] }> }) { + return getHandler().POST(request, context); +} + +export async function PUT(request: Request, context: { params: Promise<{ path: string[] }> }) { + return getHandler().PUT(request, context); +} + +export async function PATCH(request: Request, context: { params: Promise<{ path: string[] }> }) { + return getHandler().PATCH(request, context); +} + +export async function DELETE(request: Request, context: { params: Promise<{ path: string[] }> }) { + return getHandler().DELETE(request, context); +} diff --git a/apps/money-mirror/src/app/api/cron/weekly-recap/route.ts b/apps/money-mirror/src/app/api/cron/weekly-recap/route.ts new file mode 100644 index 0000000..6adf31a --- /dev/null +++ b/apps/money-mirror/src/app/api/cron/weekly-recap/route.ts @@ -0,0 +1,121 @@ +/** + * GET /api/cron/weekly-recap + * + * T11 — Weekly Recap Email (Fan-Out Pattern) + * + * Triggered by Vercel Cron every Monday at 8:00 AM IST. + * Fetches all users who have uploaded at least one statement, + * then fans out individual recap emails via Resend. + * + * Auth: Accepts either: + * - `authorization: Bearer ` from Vercel Cron + * - `x-cron-secret: ` for local/manual triggering + * + * Fan-out architecture (engineering lesson §2): + * - Master route: fetches user list, triggers per-user worker calls + * - Worker: /api/cron/weekly-recap/worker (handles single user email) + * + * PostHog telemetry — single emission source: server-side here. + * Events fired: + * - weekly_recap_triggered (master cron run) + * - weekly_recap_completed (after Promise.allSettled) + */ + +import { NextRequest, NextResponse } from 'next/server'; +import { listEligibleWeeklyRecapUsers } from '@/lib/db'; +import { captureServerEvent } from '@/lib/posthog'; + +const STATEMENT_BATCH_SIZE = 1000; + +function isAuthorizedCronRequest(req: NextRequest): boolean { + const expectedSecret = process.env.CRON_SECRET; + if (!expectedSecret) { + return false; + } + + const bearerToken = req.headers.get('authorization'); + if (bearerToken === `Bearer ${expectedSecret}`) { + return true; + } + + const sharedSecret = req.headers.get('x-cron-secret'); + return sharedSecret === expectedSecret; +} + +async function handleCron(req: NextRequest): Promise { + if (!isAuthorizedCronRequest(req)) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + const uniqueUserIds = new Set(); + let offset = 0; + + while (true) { + let users: string[]; + try { + users = await listEligibleWeeklyRecapUsers(STATEMENT_BATCH_SIZE, offset); + } catch (error) { + console.error('[weekly-recap] failed to fetch users:', error); + return NextResponse.json({ error: 'Failed to fetch users' }, { status: 500 }); + } + + for (const userId of users) { + uniqueUserIds.add(userId); + } + + if (users.length < STATEMENT_BATCH_SIZE) { + break; + } + + offset += STATEMENT_BATCH_SIZE; + } + const eligibleUserIds = [...uniqueUserIds]; + + await captureServerEvent('system', 'weekly_recap_triggered', { + user_count: eligibleUserIds.length, + }).catch((e) => console.error('[weekly-recap] posthog triggered failed:', e)); + + // ── Fan-out: trigger per-user worker ───────────────────────────── + const workerUrl = new URL('/api/cron/weekly-recap/worker', req.url).toString(); + + const results = await Promise.allSettled( + eligibleUserIds.map((userId) => + fetch(workerUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-cron-secret': process.env.CRON_SECRET!, + }, + body: JSON.stringify({ userId }), + }).then(async (res) => { + if (!res.ok) { + throw new Error(`worker returned ${res.status}`); + } + + const body = await res.json().catch(() => null); + if (!body?.ok) { + throw new Error('worker returned unsuccessful result'); + } + }) + ) + ); + + const succeeded = results.filter((r) => r.status === 'fulfilled').length; + const failed = results.filter((r) => r.status === 'rejected').length; + + await captureServerEvent('system', 'weekly_recap_completed', { + total: eligibleUserIds.length, + succeeded, + failed, + }).catch((e) => console.error('[weekly-recap] posthog completed failed:', e)); + + return NextResponse.json({ ok: true, total: eligibleUserIds.length, succeeded, failed }); +} + +export async function GET(req: NextRequest): Promise { + return handleCron(req); +} + +export async function POST(req: NextRequest): Promise { + return handleCron(req); +} diff --git a/apps/money-mirror/src/app/api/cron/weekly-recap/worker/route.ts b/apps/money-mirror/src/app/api/cron/weekly-recap/worker/route.ts new file mode 100644 index 0000000..eebee71 --- /dev/null +++ b/apps/money-mirror/src/app/api/cron/weekly-recap/worker/route.ts @@ -0,0 +1,119 @@ +/** + * POST /api/cron/weekly-recap/worker + * + * Per-user weekly recap email worker. + * Called by /api/cron/weekly-recap (fan-out master). + * + * Fetches the user's most recent processed statement, builds a + * summary, and sends the recap email via Resend. + * + * Auth: Requires `x-cron-secret` header (internal only). + * + * PostHog telemetry: + * - weekly_recap_email_sent (success) + * - weekly_recap_email_failed (failure — non-fatal, logged only) + */ + +import { NextRequest, NextResponse } from 'next/server'; +import { Resend } from 'resend'; +import { + getLatestProcessedStatementForUser, + getProfileEmail, + getTopCategoryTotalsForStatement, +} from '@/lib/db'; +import { captureServerEvent } from '@/lib/posthog'; + +export async function POST(req: NextRequest): Promise { + // ── Auth ────────────────────────────────────────────────────────── + const cronSecret = req.headers.get('x-cron-secret'); + if (!cronSecret || cronSecret !== process.env.CRON_SECRET) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + const body = await req.json().catch(() => null); + if (!body?.userId) { + return NextResponse.json({ error: 'userId required' }, { status: 400 }); + } + + const { userId } = body as { userId: string }; + const resend = new Resend(process.env.RESEND_API_KEY!); + + // ── Fetch most recent processed statement ───────────────────────── + const email = await getProfileEmail(userId); + if (!email) { + return NextResponse.json({ error: 'User not found' }, { status: 404 }); + } + + const statement = await getLatestProcessedStatementForUser(userId); + if (!statement) { + // No statement yet — skip silently, not an error + return NextResponse.json({ ok: true, skipped: true }); + } + + // ── Fetch top spending categories ───────────────────────────────── + const categoryTotals: Record = {}; + const topCategories = await getTopCategoryTotalsForStatement(statement.id, userId, 5); + + for (const tx of topCategories) { + categoryTotals[tx.category] = (categoryTotals[tx.category] ?? 0) + tx.amount_paisa; + } + const topCategory = Object.entries(categoryTotals).sort((a, b) => b[1] - a[1])[0]; + + const totalSpent = Math.round(statement.total_debits_paisa / 100).toLocaleString('en-IN'); + const periodLabel = statement.period_start + ? `${statement.period_start} → ${statement.period_end}` + : 'last month'; + const topCatLabel = topCategory + ? `${topCategory[0]} (₹${Math.round(topCategory[1] / 100).toLocaleString('en-IN')})` + : '—'; + + // ── Send email via Resend ───────────────────────────────────────── + try { + await resend.emails.send({ + from: 'MoneyMirror ', + to: email, + subject: `Your MoneyMirror weekly recap 🪞`, + html: ` +
+

MoneyMirror Weekly Recap

+

${periodLabel}

+ +
+
Total Spent
+
₹${totalSpent}
+
+ +
+
Biggest Category
+
${topCatLabel}
+
+ + + See Your Full Mirror → + + +

+ Your data is private and never shared. Unsubscribe +

+
+ `, + }); + + await captureServerEvent(userId, 'weekly_recap_email_sent', { + period_start: statement.period_start, + period_end: statement.period_end, + total_debits_paisa: statement.total_debits_paisa, + }).catch((e) => console.error('[worker] posthog sent failed:', e)); + + return NextResponse.json({ ok: true }); + } catch (err) { + console.error(`[worker] resend failed for ${userId}:`, err); + + await captureServerEvent(userId, 'weekly_recap_email_failed', { + error: err instanceof Error ? err.message : 'unknown', + }).catch((e) => console.error('[worker] posthog failed failed:', e)); + + return NextResponse.json({ ok: false, error: 'email send failed' }, { status: 502 }); + } +} diff --git a/apps/money-mirror/src/app/api/dashboard/advisories/route.ts b/apps/money-mirror/src/app/api/dashboard/advisories/route.ts new file mode 100644 index 0000000..f24f8d6 --- /dev/null +++ b/apps/money-mirror/src/app/api/dashboard/advisories/route.ts @@ -0,0 +1,36 @@ +/** + * GET /api/dashboard/advisories?statement_id= + * + * Fetches parsed transaction data from the DB and generates + * advisory feed items using the advisory engine. + */ + +import { NextRequest, NextResponse } from 'next/server'; +import { getSessionUser } from '@/lib/auth/session'; +import { ensureProfile } from '@/lib/db'; +import { fetchDashboardData } from '@/lib/dashboard'; + +export async function GET(req: NextRequest): Promise { + const statementId = req.nextUrl.searchParams.get('statement_id'); + + if (!statementId) { + return NextResponse.json({ error: 'statement_id is required' }, { status: 400 }); + } + + const user = await getSessionUser(); + if (!user) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + try { + await ensureProfile({ id: user.id, email: user.email }); + const dashboard = await fetchDashboardData(user.id, statementId); + if (!dashboard) { + return NextResponse.json({ error: 'Statement not found' }, { status: 404 }); + } + + return NextResponse.json({ advisories: dashboard.advisories }); + } catch { + return NextResponse.json({ error: 'Failed to fetch transactions' }, { status: 500 }); + } +} diff --git a/apps/money-mirror/src/app/api/dashboard/route.ts b/apps/money-mirror/src/app/api/dashboard/route.ts new file mode 100644 index 0000000..f243eb0 --- /dev/null +++ b/apps/money-mirror/src/app/api/dashboard/route.ts @@ -0,0 +1,24 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { getSessionUser } from '@/lib/auth/session'; +import { ensureProfile } from '@/lib/db'; +import { fetchDashboardData } from '@/lib/dashboard'; + +export async function GET(req: NextRequest): Promise { + const statementId = req.nextUrl.searchParams.get('statement_id'); + const user = await getSessionUser(); + if (!user) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + try { + await ensureProfile({ id: user.id, email: user.email }); + const dashboard = await fetchDashboardData(user.id, statementId); + if (!dashboard) { + return NextResponse.json({ error: 'Dashboard not found' }, { status: 404 }); + } + + return NextResponse.json(dashboard); + } catch { + return NextResponse.json({ error: 'Failed to load dashboard data' }, { status: 500 }); + } +} diff --git a/apps/money-mirror/src/app/api/onboarding/complete/route.ts b/apps/money-mirror/src/app/api/onboarding/complete/route.ts new file mode 100644 index 0000000..b375af5 --- /dev/null +++ b/apps/money-mirror/src/app/api/onboarding/complete/route.ts @@ -0,0 +1,54 @@ +/** + * POST /api/onboarding/complete + * + * Called at the end of the onboarding flow. + * Persists the money health score to the user's profile and fires + * the onboarding_completed PostHog event (single emission source: server). + * + * Body: { monthly_income_paisa: number, money_health_score: number, perceived_spend_paisa: number } + */ + +import { NextRequest, NextResponse } from 'next/server'; +import { getSessionUser } from '@/lib/auth/session'; +import { upsertProfileOnboarding } from '@/lib/db'; +import { captureServerEvent } from '@/lib/posthog'; + +export async function POST(req: NextRequest): Promise { + const user = await getSessionUser(); + if (!user) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + const body = await req.json().catch(() => null); + if ( + !body || + typeof body.monthly_income_paisa !== 'number' || + typeof body.money_health_score !== 'number' + ) { + return NextResponse.json({ error: 'Invalid request body' }, { status: 400 }); + } + + const { monthly_income_paisa, money_health_score, perceived_spend_paisa = 0 } = body; + + try { + await upsertProfileOnboarding( + { id: user.id, email: user.email }, + monthly_income_paisa, + money_health_score, + perceived_spend_paisa, + new Date().toISOString() + ); + } catch (error) { + console.error('[onboarding/complete] upsert failed:', error); + return NextResponse.json({ error: 'Failed to save onboarding progress.' }, { status: 500 }); + } + + // Single emission source: server-side only + await captureServerEvent(user.id, 'onboarding_completed', { + monthly_income_paisa, + money_health_score, + perceived_spend_paisa, + }).catch((e) => console.error('[onboarding/complete] posthog failed:', e)); + + return NextResponse.json({ ok: true }); +} diff --git a/apps/money-mirror/src/app/api/sentry-example-api/route.ts b/apps/money-mirror/src/app/api/sentry-example-api/route.ts new file mode 100644 index 0000000..6515080 --- /dev/null +++ b/apps/money-mirror/src/app/api/sentry-example-api/route.ts @@ -0,0 +1,17 @@ +import * as Sentry from '@sentry/nextjs'; +export const dynamic = 'force-dynamic'; + +class SentryExampleAPIError extends Error { + constructor(message: string | undefined) { + super(message); + this.name = 'SentryExampleAPIError'; + } +} + +// A faulty API route to test Sentry's error monitoring +export function GET() { + Sentry.logger.info('Sentry example API called'); + throw new SentryExampleAPIError( + 'This error is raised on the backend called by the example page.' + ); +} diff --git a/apps/money-mirror/src/app/api/statement/parse/persist-statement.ts b/apps/money-mirror/src/app/api/statement/parse/persist-statement.ts new file mode 100644 index 0000000..c454568 --- /dev/null +++ b/apps/money-mirror/src/app/api/statement/parse/persist-statement.ts @@ -0,0 +1,139 @@ +import { randomUUID } from 'node:crypto'; +import { getDb, getProfileFinancialSnapshot } from '@/lib/db'; +import { captureServerEvent } from '@/lib/posthog'; +import type { StatementType } from '@/lib/statements'; + +interface CategorizedTransaction { + date: string; + description: string; + amount_paisa: number; + type: 'debit' | 'credit'; + category: string; + is_recurring: boolean; +} + +interface StatementSummary { + total_debits: number; + total_credits: number; + needs: number; + wants: number; + investment: number; + debt: number; + other: number; +} + +interface PeriodInfo { + institution_name: string; + statement_type: StatementType; + period_start: string; + period_end: string; + due_date: string | null; + payment_due_paisa: number | null; + minimum_due_paisa: number | null; + credit_limit_paisa: number | null; +} + +interface PersistResult { + statement_id: string; + error?: string; +} + +/** + * Persists a parsed statement and its transactions to Neon Postgres. + * Returns the statement ID on success, or an error string on failure. + * Uses a single transaction so partial writes are rolled back. + */ +export async function persistStatement( + userId: string, + categorized: CategorizedTransaction[], + summary: StatementSummary, + period: PeriodInfo +): Promise { + const sql = getDb(); + const statementId = randomUUID(); + + try { + const profile = await getProfileFinancialSnapshot(userId); + const transactionQueries = categorized.map( + (tx) => + sql` + INSERT INTO transactions ( + id, + statement_id, + user_id, + date, + description, + amount_paisa, + type, + category, + is_recurring + ) + VALUES ( + ${randomUUID()}, + ${statementId}, + ${userId}, + ${tx.date}::date, + ${tx.description}, + ${tx.amount_paisa}, + ${tx.type}, + ${tx.category}, + ${tx.is_recurring} + ) + ` + ); + + await sql.transaction([ + sql` + INSERT INTO statements ( + id, + user_id, + bank_name, + institution_name, + statement_type, + period_start, + period_end, + due_date, + total_debits_paisa, + total_credits_paisa, + perceived_spend_paisa, + payment_due_paisa, + minimum_due_paisa, + credit_limit_paisa, + status + ) + VALUES ( + ${statementId}, + ${userId}, + ${period.institution_name}, + ${period.institution_name}, + ${period.statement_type}, + ${period.period_start}::date, + ${period.period_end}::date, + ${period.due_date}::date, + ${summary.total_debits}, + ${summary.total_credits}, + ${profile.perceived_spend_paisa}, + ${period.payment_due_paisa}, + ${period.minimum_due_paisa}, + ${period.credit_limit_paisa}, + 'processing' + ) + `, + ...transactionQueries, + sql` + UPDATE statements + SET status = 'processed' + WHERE id = ${statementId} + AND user_id = ${userId} + `, + ]); + } catch (error) { + await captureServerEvent(userId, 'statement_parse_failed', { + error_type: 'DB_TRANSACTION_FAILED', + }); + console.error('[persist-statement] transaction failed:', error); + return { statement_id: '', error: 'Failed to save statement data.' }; + } + + return { statement_id: statementId }; +} diff --git a/apps/money-mirror/src/app/api/statement/parse/route.ts b/apps/money-mirror/src/app/api/statement/parse/route.ts new file mode 100644 index 0000000..fc003a5 --- /dev/null +++ b/apps/money-mirror/src/app/api/statement/parse/route.ts @@ -0,0 +1,296 @@ +/** + * POST /api/statement/parse + * + * Accepts a multipart/form-data PDF upload, extracts transactions + * using Gemini Flash, categorizes them, and saves to the database. + * + * Privacy: The PDF is processed entirely in-memory (Option A). + * The buffer reference is explicitly nulled after text extraction. + * (T7 — Zero-retention delete) + * + * Rate limit: 3 uploads per user per day (enforced via DB count). + * + * Auth: Requires an authenticated Neon Auth session cookie. + * + * Telemetry (PostHog) — Single emission source: server-side here. + * Events fired: + * - statement_parse_started + * - statement_parse_rate_limited + * - statement_parse_success + * - statement_parse_timeout + * - statement_parse_failed + */ + +import { NextRequest, NextResponse } from 'next/server'; +import { GoogleGenAI } from '@google/genai'; +import { getSessionUser } from '@/lib/auth/session'; +import { countUserStatementsSince, ensureProfile } from '@/lib/db'; +import { extractPdfText, PdfExtractionError } from '@/lib/pdf-parser'; +import { + categorizeCreditCardTransaction, + categorizeTransaction, + summarizeByCategory, +} from '@/lib/categorizer'; +import { captureServerEvent } from '@/lib/posthog'; +import { + buildStatementParserPrompt, + parseStatementType, + type ParsedStatementResult, + validateParsedStatement, +} from '@/lib/statements'; +import { persistStatement } from './persist-statement'; + +const TIMEOUT_MS = 25_000; +const MAX_UPLOADS_PER_DAY = 3; +const MAX_FILE_SIZE_BYTES = 10 * 1024 * 1024; // 10 MB + +// ─── Gemini schema for structured output ───────────────────────────────── + +// ─── Route Handler ──────────────────────────────────────────────────────── + +export async function POST(req: NextRequest): Promise { + const startTime = Date.now(); + + const user = await getSessionUser(); + if (!user) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + const userId = user.id; + await ensureProfile({ id: user.id, email: user.email }); + + // ── 2. Rate limiting: max 3 uploads/day ────────────────────────── + const today = new Date().toISOString().split('T')[0]; + const count = await countUserStatementsSince(userId, `${today}T00:00:00Z`); + + if ((count ?? 0) >= MAX_UPLOADS_PER_DAY) { + // Single emission source: server-side — fire-and-forget, must not block response + captureServerEvent(userId, 'statement_parse_rate_limited', { + uploads_today: count, + limit: MAX_UPLOADS_PER_DAY, + }).catch(() => {}); + return NextResponse.json( + { + error: 'Upload limit reached', + detail: `You can upload up to ${MAX_UPLOADS_PER_DAY} statements per day.`, + }, + { status: 429 } + ); + } + + // ── 3. Parse multipart form ─────────────────────────────────────── + let fileBuffer: Buffer | null = null; + let fileName = 'statement.pdf'; + let statementType = parseStatementType(null); + + try { + const formData = await req.formData(); + const file = formData.get('file'); + statementType = parseStatementType(formData.get('statement_type')); + + if (!file || typeof file === 'string') { + return NextResponse.json({ error: 'No file uploaded' }, { status: 400 }); + } + + if (file.size > MAX_FILE_SIZE_BYTES) { + return NextResponse.json( + { error: 'File too large. Maximum size is 10 MB.' }, + { status: 400 } + ); + } + + // Check MIME type (basic check — content-type from browser) + const mimeType = file.type; + if (mimeType && mimeType !== 'application/pdf') { + return NextResponse.json({ error: 'Only PDF files are accepted.' }, { status: 400 }); + } + + fileName = file.name ?? fileName; + const arrayBuffer = await file.arrayBuffer(); + fileBuffer = Buffer.from(arrayBuffer); + } catch { + return NextResponse.json({ error: 'Failed to read uploaded file.' }, { status: 400 }); + } + + // ── 4. Extract PDF text ─────────────────────────────────────────── + let pdfText: string; + + try { + const extracted = await extractPdfText(fileBuffer); + pdfText = extracted.text; + } catch (err) { + // T7 — Null buffer immediately after use (zero-retention) + fileBuffer = null; + + const code = err instanceof PdfExtractionError ? err.code : 'PARSE_FAILED'; + captureServerEvent(userId, 'statement_parse_failed', { + error_type: code, + file_name: fileName, + }).catch(() => {}); + const errorMessage = + code === 'EMPTY_TEXT' + ? 'This PDF appears to be a scanned image. Please upload a digitally generated bank statement.' + : code === 'PASSWORD_PROTECTED' + ? 'Your PDF is password-protected. Please remove the password and re-upload.' + : 'Failed to read the PDF. Please ensure it is a valid bank statement.'; + return NextResponse.json({ error: errorMessage }, { status: 422 }); + } + + // T7 — Null the buffer immediately after text extraction + fileBuffer = null; + + // ── 5. Emit telemetry: parse started — fire-and-forget ─────────── + captureServerEvent(userId, 'statement_parse_started', { + pdf_text_length: pdfText.length, + }).catch(() => {}); + + // ── 6. Gemini extraction with timeout ──────────────────────────── + const genai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY! }); + let parsedStatement: ParsedStatementResult; + + try { + const geminiPromise = genai.models + .generateContent({ + model: 'gemini-2.5-flash', + config: { + // Disable thinking — structured data extraction needs speed, not reasoning + thinkingConfig: { thinkingBudget: 0 }, + }, + contents: [ + { + role: 'user', + parts: [ + { text: buildStatementParserPrompt(statementType) }, + { + text: `Parse this statement and return JSON:\n\n${pdfText.slice(0, 30_000)}`, + }, + ], + }, + ], + }) + .then((res) => { + // Find the first part with text (thinking models may have thought parts first) + const parts = res.candidates?.[0]?.content?.parts ?? []; + const raw = + parts.find( + (p) => (p.text && p.text.trim().startsWith('{')) || p.text?.includes('"transactions"') + )?.text ?? + parts.find((p) => p.text)?.text ?? + ''; + // Strip markdown code fences if present + const json = raw + .replace(/^```(?:json)?\s*/i, '') + .replace(/```\s*$/i, '') + .trim(); + return validateParsedStatement(JSON.parse(json), statementType); + }); + + const timeoutPromise = new Promise((_, reject) => + setTimeout(() => reject(new Error('GEMINI_TIMEOUT')), TIMEOUT_MS) + ); + + parsedStatement = await Promise.race([geminiPromise, timeoutPromise]); + } catch (err) { + const isTimeout = err instanceof Error && err.message === 'GEMINI_TIMEOUT'; + + if (isTimeout) { + captureServerEvent(userId, 'statement_parse_timeout', { + timeout_ms: TIMEOUT_MS, + }).catch(() => {}); + return NextResponse.json( + { error: 'Processing took too long. Please try again.' }, + { status: 504 } + ); + } + + const geminiErrMsg = err instanceof Error ? err.message : String(err); + captureServerEvent(userId, 'statement_parse_failed', { + error_type: 'GEMINI_ERROR', + detail: geminiErrMsg.slice(0, 200), + }).catch(() => {}); + return NextResponse.json( + { error: 'Failed to extract transactions from the PDF.' }, + { status: 500 } + ); + } + + // ── 7. Categorize transactions ──────────────────────────────────── + const categorized = parsedStatement.transactions.map((tx) => { + const amountPaisa = Math.round(tx.amount * 100); + + if (statementType === 'credit_card') { + return categorizeCreditCardTransaction( + tx.description, + amountPaisa, + tx.date, + tx.entry_kind ?? 'other' + ); + } + + return categorizeTransaction(tx.description, amountPaisa, tx.date, tx.type); + }); + + const summary = summarizeByCategory(categorized); + + // ── 8. Persist to database ──────────────────────────────────────── + const { statement_id, error: persistError } = await persistStatement( + userId, + categorized, + summary, + { + institution_name: parsedStatement.institution_name, + statement_type: parsedStatement.statement_type, + period_start: parsedStatement.period_start, + period_end: parsedStatement.period_end, + due_date: parsedStatement.due_date, + payment_due_paisa: parsedStatement.payment_due_paisa, + minimum_due_paisa: parsedStatement.minimum_due_paisa, + credit_limit_paisa: parsedStatement.credit_limit_paisa, + } + ); + + if (persistError) { + return NextResponse.json({ error: persistError }, { status: 500 }); + } + + // ── 9. Success telemetry — single emission source, fire-and-forget ── + const latencyMs = Date.now() - startTime; + + captureServerEvent(userId, 'statement_parse_success', { + latency_ms: latencyMs, + transaction_count: categorized.length, + period_start: parsedStatement.period_start, + period_end: parsedStatement.period_end, + institution_name: parsedStatement.institution_name, + statement_type: parsedStatement.statement_type, + total_debits_paisa: summary.total_debits, + needs_pct: + summary.total_debits > 0 ? Math.round((summary.needs / summary.total_debits) * 100) : 0, + wants_pct: + summary.total_debits > 0 ? Math.round((summary.wants / summary.total_debits) * 100) : 0, + investment_pct: + summary.total_debits > 0 ? Math.round((summary.investment / summary.total_debits) * 100) : 0, + }).catch(() => {}); + + return NextResponse.json({ + statement_id, + institution_name: parsedStatement.institution_name, + statement_type: parsedStatement.statement_type, + period_start: parsedStatement.period_start, + period_end: parsedStatement.period_end, + due_date: parsedStatement.due_date, + payment_due_paisa: parsedStatement.payment_due_paisa, + minimum_due_paisa: parsedStatement.minimum_due_paisa, + credit_limit_paisa: parsedStatement.credit_limit_paisa, + transaction_count: categorized.length, + summary: { + needs_paisa: summary.needs, + wants_paisa: summary.wants, + investment_paisa: summary.investment, + debt_paisa: summary.debt, + other_paisa: summary.other, + total_debits_paisa: summary.total_debits, + total_credits_paisa: summary.total_credits, + }, + }); +} diff --git a/apps/money-mirror/src/app/dashboard/ParsingPanel.tsx b/apps/money-mirror/src/app/dashboard/ParsingPanel.tsx new file mode 100644 index 0000000..7aaf4ae --- /dev/null +++ b/apps/money-mirror/src/app/dashboard/ParsingPanel.tsx @@ -0,0 +1,36 @@ +'use client'; + +export function ParsingPanel() { + return ( +
+
+
+

+ Processing your statement... +

+

+ This takes about 5–10 seconds. +

+
+
+ ); +} diff --git a/apps/money-mirror/src/app/dashboard/ResultsPanel.tsx b/apps/money-mirror/src/app/dashboard/ResultsPanel.tsx new file mode 100644 index 0000000..e8ec66b --- /dev/null +++ b/apps/money-mirror/src/app/dashboard/ResultsPanel.tsx @@ -0,0 +1,239 @@ +'use client'; + +import { MirrorCard } from '@/components/MirrorCard'; +import { AdvisoryFeed } from '@/components/AdvisoryFeed'; +import type { Advisory } from '@/lib/advisory-engine'; +import { getCreditsLabel, getStatementTypeLabel, type StatementType } from '@/lib/statements'; + +interface ResultSummary { + needs_paisa: number; + wants_paisa: number; + investment_paisa: number; + debt_paisa: number; + other_paisa: number; + total_debits_paisa: number; + total_credits_paisa: number; +} + +interface ResultsPanelProps { + institution_name: string; + statement_type: StatementType; + period_start: string | null; + period_end: string | null; + due_date: string | null; + payment_due_paisa: number | null; + minimum_due_paisa: number | null; + credit_limit_paisa: number | null; + transaction_count: number; + summary: ResultSummary; + advisories: Advisory[]; +} + +const CATEGORY_META = [ + { key: 'needs_paisa' as const, label: 'Needs', color: 'var(--accent)', icon: '🏠' }, + { key: 'wants_paisa' as const, label: 'Wants', color: 'var(--warning)', icon: '🛍️' }, + { key: 'investment_paisa' as const, label: 'Investments', color: 'var(--success)', icon: '📈' }, + { key: 'debt_paisa' as const, label: 'Debt & EMIs', color: 'var(--danger)', icon: '💳' }, + { key: 'other_paisa' as const, label: 'Other', color: 'var(--text-muted)', icon: '📦' }, +]; + +export function ResultsPanel({ + institution_name, + statement_type, + period_start, + period_end, + due_date, + payment_due_paisa, + minimum_due_paisa, + credit_limit_paisa, + transaction_count, + summary, + advisories, +}: ResultsPanelProps) { + const totalSpent = Math.round(summary.total_debits_paisa / 100).toLocaleString('en-IN'); + const totalIncome = Math.round(summary.total_credits_paisa / 100).toLocaleString('en-IN'); + const creditsLabel = getCreditsLabel(statement_type); + const statementTypeLabel = getStatementTypeLabel(statement_type); + + return ( +
+
+

+ Your Money Mirror 🪞 +

+

+ {institution_name} • {statementTypeLabel} • {period_start ?? 'Unknown start'} →{' '} + {period_end ?? 'Unknown end'} • {transaction_count} transactions +

+
+ +
+
+
+ Total Spent +
+
+ ₹{totalSpent} +
+
+
+
+ {creditsLabel} +
+
+ ₹{totalIncome} +
+
+
+ + {statement_type === 'credit_card' && ( +
+ {payment_due_paisa !== null && ( +
+
+ Payment Due +
+
+ ₹{Math.round(payment_due_paisa / 100).toLocaleString('en-IN')} +
+
+ )} + {minimum_due_paisa !== null && ( +
+
+ Minimum Due +
+
+ ₹{Math.round(minimum_due_paisa / 100).toLocaleString('en-IN')} +
+
+ )} + {due_date && ( +
+
+ Due Date +
+
{due_date}
+
+ )} + {credit_limit_paisa !== null && ( +
+
+ Credit Limit +
+
+ ₹{Math.round(credit_limit_paisa / 100).toLocaleString('en-IN')} +
+
+ )} +
+ )} + +
+

+ Where it went +

+
+ {CATEGORY_META.map((cat) => ( + + ))} +
+
+ + {advisories.length > 0 && ( +
+

+ Truth Bombs 💣 +

+ +
+ )} + +
+ {typeof navigator !== 'undefined' && navigator.share && ( + + )} +

+ Share anonymously — your data stays private. +

+
+
+ ); +} diff --git a/apps/money-mirror/src/app/dashboard/UploadPanel.tsx b/apps/money-mirror/src/app/dashboard/UploadPanel.tsx new file mode 100644 index 0000000..2869b45 --- /dev/null +++ b/apps/money-mirror/src/app/dashboard/UploadPanel.tsx @@ -0,0 +1,138 @@ +'use client'; + +import { useRef } from 'react'; +import type { StatementType } from '@/lib/statements'; + +interface UploadPanelProps { + error: string | null; + statementType: StatementType; + onStatementTypeChange: (statementType: StatementType) => void; + onUpload: (file: File, statementType: StatementType) => void; +} + +export function UploadPanel({ + error, + statementType, + onStatementTypeChange, + onUpload, +}: UploadPanelProps) { + const fileRef = useRef(null); + + const handleDrop = (e: React.DragEvent) => { + e.preventDefault(); + const file = e.dataTransfer.files[0]; + if (file) onUpload(file, statementType); + }; + + const handleFileChange = (e: React.ChangeEvent) => { + const file = e.target.files?.[0]; + if (file) onUpload(file, statementType); + }; + + return ( +
+
+

+ Upload your statement +

+

+ Upload a password-free PDF from your bank account or credit card. We'll show you + exactly where your money is going. +

+
+ +
+ + +
+ +
e.preventDefault()} + onDrop={handleDrop} + onClick={() => fileRef.current?.click()} + onKeyDown={(e) => { + if (e.key === 'Enter' || e.key === ' ') fileRef.current?.click(); + }} + style={{ + border: '2px dashed var(--border)', + borderRadius: '16px', + padding: '48px 24px', + textAlign: 'center', + cursor: 'pointer', + background: 'var(--bg-card)', + transition: 'all 0.2s ease', + }} + > +
📄
+

+ Drag & drop your PDF, or tap to browse +

+

+ PDF only • Password removed • Max 10 MB +

+ +
+ + {error && ( +
+ {error} +
+ )} + +
+ 🔒 +

+ Your PDF is processed in memory and{' '} + deleted immediately after + parsing. We never store your statement file. +

+
+
+ ); +} diff --git a/apps/money-mirror/src/app/dashboard/page.tsx b/apps/money-mirror/src/app/dashboard/page.tsx new file mode 100644 index 0000000..1baffa5 --- /dev/null +++ b/apps/money-mirror/src/app/dashboard/page.tsx @@ -0,0 +1,222 @@ +'use client'; + +import { useState, useCallback, useEffect } from 'react'; +import type { Advisory } from '@/lib/advisory-engine'; +import type { StatementType } from '@/lib/statements'; +import { UploadPanel } from './UploadPanel'; +import { ParsingPanel } from './ParsingPanel'; +import { ResultsPanel } from './ResultsPanel'; + +/** + * T8a — Dashboard Page + * + * Three states: + * 1. UPLOAD → drag-and-drop PDF upload zone + * 2. PARSING → loading skeleton with progress text + * 3. RESULTS → MirrorCards breakdown + AdvisoryFeed + */ + +type DashboardState = 'upload' | 'parsing' | 'results'; + +interface DashboardResult { + statement_id: string; + institution_name: string; + statement_type: StatementType; + period_start: string | null; + period_end: string | null; + due_date: string | null; + payment_due_paisa: number | null; + minimum_due_paisa: number | null; + credit_limit_paisa: number | null; + transaction_count: number; + summary: { + needs_paisa: number; + wants_paisa: number; + investment_paisa: number; + debt_paisa: number; + other_paisa: number; + total_debits_paisa: number; + total_credits_paisa: number; + }; +} + +export default function DashboardPage() { + const [state, setState] = useState('upload'); + const [result, setResult] = useState(null); + const [advisories, setAdvisories] = useState([]); + const [error, setError] = useState(null); + const [isLoadingDashboard, setIsLoadingDashboard] = useState(true); + const [statementType, setStatementType] = useState('bank_account'); + + const loadDashboard = useCallback(async (statementId: string | null) => { + setIsLoadingDashboard(true); + setError(null); + + try { + const query = statementId ? `?statement_id=${encodeURIComponent(statementId)}` : ''; + const resp = await fetch(`/api/dashboard${query}`); + + if (resp.status === 404) { + setState('upload'); + setResult(null); + setAdvisories([]); + return; + } + + if (!resp.ok) { + const body = await resp.json().catch(() => ({})); + throw new Error(body.error ?? body.detail ?? `Dashboard load failed (${resp.status})`); + } + + const data: DashboardResult & { advisories: Advisory[] } = await resp.json(); + setResult(data); + setAdvisories(data.advisories); + setState('results'); + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to load your dashboard.'); + setState('upload'); + setResult(null); + setAdvisories([]); + } finally { + setIsLoadingDashboard(false); + } + }, []); + + useEffect(() => { + loadDashboard(null).catch(() => { + // Error already reflected in state. + }); + }, [loadDashboard]); + + const handleUpload = useCallback( + async (file: File, nextStatementType: StatementType) => { + setError(null); + + if (file.type && file.type !== 'application/pdf') { + setError('Please upload a PDF file.'); + return; + } + if (file.size > 10 * 1024 * 1024) { + setError('File is too large. Maximum 10 MB.'); + return; + } + + setState('parsing'); + + try { + const formData = new FormData(); + formData.append('file', file); + formData.append('statement_type', nextStatementType); + + const resp = await fetch('/api/statement/parse', { method: 'POST', body: formData }); + + if (!resp.ok) { + const body = await resp.json().catch(() => ({})); + throw new Error(body.error ?? body.detail ?? `Upload failed (${resp.status})`); + } + + const data: DashboardResult = await resp.json(); + await loadDashboard(data.statement_id); + } catch (err) { + setError(err instanceof Error ? err.message : 'Something went wrong.'); + setState('upload'); + } + }, + [loadDashboard] + ); + + const resetToUpload = useCallback(() => { + setState('upload'); + setResult(null); + setAdvisories([]); + setError(null); + }, []); + + return ( +
+ {/* Header */} +
+ + MoneyMirror + + {state === 'results' && ( + + )} +
+ + {/* Loading skeleton */} + {isLoadingDashboard && state !== 'parsing' && ( +
+
+
+
+ )} + + {state === 'upload' && !isLoadingDashboard && ( + + )} + + {state === 'parsing' && } + + {state === 'results' && result && !isLoadingDashboard && ( + + )} + + +
+ ); +} diff --git a/apps/money-mirror/src/app/favicon.ico b/apps/money-mirror/src/app/favicon.ico new file mode 100644 index 0000000..718d6fe Binary files /dev/null and b/apps/money-mirror/src/app/favicon.ico differ diff --git a/apps/money-mirror/src/app/global-error.tsx b/apps/money-mirror/src/app/global-error.tsx new file mode 100644 index 0000000..ad4654c --- /dev/null +++ b/apps/money-mirror/src/app/global-error.tsx @@ -0,0 +1,23 @@ +'use client'; + +import * as Sentry from '@sentry/nextjs'; +import NextError from 'next/error'; +import { useEffect } from 'react'; + +export default function GlobalError({ error }: { error: Error & { digest?: string } }) { + useEffect(() => { + Sentry.captureException(error); + }, [error]); + + return ( + + + {/* `NextError` is the default Next.js error page component. Its type + definition requires a `statusCode` prop. However, since the App Router + does not expose status codes for errors, we simply pass 0 to render a + generic error message. */} + + + + ); +} diff --git a/apps/money-mirror/src/app/globals.css b/apps/money-mirror/src/app/globals.css new file mode 100644 index 0000000..7723c6c --- /dev/null +++ b/apps/money-mirror/src/app/globals.css @@ -0,0 +1,119 @@ +@import url("https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800;900&family=Space+Grotesk:wght@400;500;600;700&display=swap"); +@import "tailwindcss"; + +:root { + --bg-base: #080c10; + --bg-card: #0f1520; + --bg-elevated: #161e2e; + --bg-subtle: #1c2436; + --text-primary: #f0f4ff; + --text-secondary: #8b9cbf; + --text-muted: #4a5568; + --accent: #00e5c3; + --accent-dim: rgba(0, 229, 195, 0.12); + --accent-hover: #00ffd5; + --danger: #ff4d6d; + --danger-dim: rgba(255, 77, 109, 0.12); + --warning: #ffb547; + --warning-dim: rgba(255, 181, 71, 0.12); + --success: #22d3a0; + --border: rgba(255, 255, 255, 0.06); + --border-accent: rgba(0, 229, 195, 0.2); + --shadow-card: 0 4px 24px rgba(0, 0, 0, 0.4); +} + +* { box-sizing: border-box; -webkit-font-smoothing: antialiased; } + +html, body { + margin: 0; + padding: 0; + background: var(--bg-base); + color: var(--text-primary); + font-family: "Inter", system-ui, sans-serif; + min-height: 100dvh; + overflow-x: hidden; +} + +h1, h2, h3 { font-family: "Space Grotesk", "Inter", sans-serif; } + +.card { + background: var(--bg-card); + border: 1px solid var(--border); + border-radius: 16px; + padding: 24px; + box-shadow: var(--shadow-card); +} + +.btn-primary { + background: var(--accent); + color: #000; + font-weight: 700; + font-size: 1rem; + padding: 14px 28px; + border-radius: 12px; + border: none; + cursor: pointer; + transition: all 0.2s ease; + width: 100%; +} +.btn-primary:hover { background: var(--accent-hover); transform: translateY(-1px); box-shadow: 0 8px 24px rgba(0,229,195,0.25); } +.btn-primary:active { transform: translateY(0); } + +.btn-ghost { + background: transparent; + color: var(--text-secondary); + font-weight: 500; + font-size: 0.9rem; + padding: 12px 20px; + border-radius: 10px; + border: 1px solid var(--border); + cursor: pointer; + transition: all 0.2s ease; + width: 100%; +} +.btn-ghost:hover { border-color: var(--border-accent); color: var(--text-primary); } + +@keyframes fadeInUp { from { opacity: 0; transform: translateY(16px); } to { opacity: 1; transform: translateY(0); } } +@keyframes pulse-glow { 0%, 100% { box-shadow: 0 0 20px rgba(0,229,195,0.1); } 50% { box-shadow: 0 0 40px rgba(0,229,195,0.3); } } +@keyframes shimmer { 0% { background-position: -200% 0; } 100% { background-position: 200% 0; } } + +.animate-fade-up { animation: fadeInUp 0.45s ease both; } + +.skeleton { + background: linear-gradient(90deg, var(--bg-elevated) 25%, var(--bg-subtle) 50%, var(--bg-elevated) 75%); + background-size: 200% 100%; + animation: shimmer 1.5s infinite; + border-radius: 8px; +} + +.page-container { + max-width: 430px; + margin: 0 auto; + min-height: 100dvh; + padding: 0 20px; + display: flex; + flex-direction: column; +} + +.content-center { flex: 1; display: flex; flex-direction: column; justify-content: center; } + +.progress-bar { height: 3px; background: var(--border); border-radius: 99px; overflow: hidden; } +.progress-fill { height: 100%; background: var(--accent); border-radius: 99px; transition: width 0.4s cubic-bezier(0.4,0,0.2,1); } + +input[type="tel"], input[type="number"], input[type="text"] { + background: var(--bg-elevated); + border: 1px solid var(--border); + border-radius: 12px; + color: var(--text-primary); + font-size: 1rem; + font-family: "Inter", sans-serif; + padding: 14px 16px; + width: 100%; + outline: none; + transition: border-color 0.2s ease; +} +input:focus { border-color: var(--border-accent); } + +.badge-danger { display: inline-flex; align-items: center; gap: 6px; background: var(--danger-dim); color: var(--danger); border: 1px solid rgba(255,77,109,0.2); border-radius: 99px; padding: 4px 12px; font-size: 0.78rem; font-weight: 600; } +.badge-warning { display: inline-flex; align-items: center; gap: 6px; background: var(--warning-dim); color: var(--warning); border: 1px solid rgba(255,181,71,0.2); border-radius: 99px; padding: 4px 12px; font-size: 0.78rem; font-weight: 600; } + diff --git a/apps/money-mirror/src/app/layout.tsx b/apps/money-mirror/src/app/layout.tsx new file mode 100644 index 0000000..1c05d05 --- /dev/null +++ b/apps/money-mirror/src/app/layout.tsx @@ -0,0 +1,34 @@ +import type { Metadata, Viewport } from 'next'; +import './globals.css'; + +export const metadata: Metadata = { + title: 'MoneyMirror — See the truth about your money', + description: + 'Upload your bank statement and discover where your money actually goes. No sugar-coating. For Gen Z Indians.', + keywords: ['personal finance', 'bank statement', 'budget', 'India', 'Gen Z'], + openGraph: { + title: 'MoneyMirror — The truth about your money', + description: 'See exactly where your money goes. No sugar-coating.', + type: 'website', + }, + manifest: '/manifest.json', +}; + +export const viewport: Viewport = { + themeColor: '#080c10', + width: 'device-width', + initialScale: 1, + maximumScale: 1, +}; + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + {children} + + ); +} diff --git a/apps/money-mirror/src/app/login/page.tsx b/apps/money-mirror/src/app/login/page.tsx new file mode 100644 index 0000000..1da5254 --- /dev/null +++ b/apps/money-mirror/src/app/login/page.tsx @@ -0,0 +1,206 @@ +'use client'; + +import { useEffect, useState } from 'react'; +import Link from 'next/link'; +import { useRouter } from 'next/navigation'; +import { authClient } from '@/lib/auth/client'; + +type LoginStep = 'email' | 'otp'; + +export default function LoginPage() { + const router = useRouter(); + const { data: session, isPending } = authClient.useSession(); + const [step, setStep] = useState('email'); + const [email, setEmail] = useState(''); + const [otp, setOtp] = useState(''); + const [error, setError] = useState(null); + const [isSubmitting, setIsSubmitting] = useState(false); + + function getNextPath(fallbackPath: string): string { + if (typeof window === 'undefined') { + return fallbackPath; + } + + return new URLSearchParams(window.location.search).get('next') ?? fallbackPath; + } + + useEffect(() => { + if (!isPending && session?.user) { + router.replace(getNextPath('/dashboard')); + } + }, [isPending, router, session]); + + async function handleSendOtp(): Promise { + setIsSubmitting(true); + setError(null); + + const result = await authClient.emailOtp.sendVerificationOtp({ + email, + type: 'sign-in', + }); + + setIsSubmitting(false); + + if (result.error) { + setError(result.error.message ?? 'Failed to send sign-in code.'); + return; + } + + setStep('otp'); + } + + async function handleVerifyOtp(): Promise { + setIsSubmitting(true); + setError(null); + + const result = await authClient.signIn.emailOtp({ + email, + otp, + }); + + setIsSubmitting(false); + + if (result.error) { + setError(result.error.message ?? 'Invalid verification code.'); + return; + } + + router.replace(getNextPath('/onboarding')); + router.refresh(); + } + + return ( +
+
+
+ + MoneyMirror + + +
+

+ Sign in to see your money clearly +

+

+ We use Neon Auth email OTP for private dashboard access, statement uploads, and weekly + recap ownership. +

+
+
+ +
+ + + {step === 'otp' && ( + + )} + + {error && ( +
+ {error} +
+ )} + + {step === 'email' ? ( + + ) : ( +
+ + +
+ )} +
+ +

+ By continuing, you agree to receive a one-time sign-in code. Your statement data stays + private and account access is tied to this email address. +

+ + + ← Back to home + +
+
+ ); +} diff --git a/apps/money-mirror/src/app/onboarding/page.tsx b/apps/money-mirror/src/app/onboarding/page.tsx new file mode 100644 index 0000000..e9058c5 --- /dev/null +++ b/apps/money-mirror/src/app/onboarding/page.tsx @@ -0,0 +1,284 @@ +'use client'; + +import { useState, useCallback } from 'react'; +import { useRouter } from 'next/navigation'; +import { calculateMoneyHealthScore, type OnboardingAnswers } from '@/lib/scoring'; + +// ─── Question Config ───────────────────────────────────────────────────── + +type QuestionId = 'income' | 'perceived_spend' | 'emergency_fund' | 'invests_sip' | 'has_emi'; + +interface Question { + id: QuestionId; + step: number; + label: string; + sublabel: string; + type: 'number' | 'boolean'; +} + +const QUESTIONS: Question[] = [ + { + id: 'income', + step: 1, + label: 'What is your monthly take-home salary?', + sublabel: 'After taxes and PF deductions.', + type: 'number', + }, + { + id: 'perceived_spend', + step: 2, + label: 'How much do you think you spend per month?', + sublabel: 'Your gut feeling — before you see the bank statement.', + type: 'number', + }, + { + id: 'emergency_fund', + step: 3, + label: 'Do you have 3+ months of expenses saved as an emergency fund?', + sublabel: 'Liquid money you can access in 24 hours.', + type: 'boolean', + }, + { + id: 'invests_sip', + step: 4, + label: 'Do you invest in a SIP or mutual fund every month?', + sublabel: 'Even ₹500/month counts.', + type: 'boolean', + }, + { + id: 'has_emi', + step: 5, + label: 'Do you have any active EMIs, BNPL, or credit card dues?', + sublabel: 'Phone EMI, Zest Money, Lazy Pay, etc.', + type: 'boolean', + }, +]; + +// ─── Component ──────────────────────────────────────────────────────────── + +export default function OnboardingPage() { + const router = useRouter(); + const [step, setStep] = useState(0); + const [answers, setAnswers] = useState>>({}); + const [inputVal, setInputVal] = useState(''); + const [isAnimating, setIsAnimating] = useState(false); + + const current = QUESTIONS[step]; + const progressPct = (step / QUESTIONS.length) * 100; + + const handleNext = useCallback( + async (value: string | boolean) => { + if (isAnimating) return; + setIsAnimating(true); + + const updated = { ...answers, [current.id]: value }; + setAnswers(updated); + + if (step < QUESTIONS.length - 1) { + // Brief pause for animation, then advance + setTimeout(() => { + setStep((s) => s + 1); + setInputVal(''); + setIsAnimating(false); + }, 200); + } else { + // Last answer — calculate score and redirect + const finalAnswers: OnboardingAnswers = { + monthly_income_paisa: Math.round(Number(updated.income ?? 0) * 100), + perceived_spend_paisa: Math.round(Number(updated.perceived_spend ?? 0) * 100), + has_emergency_fund: Boolean(updated.emergency_fund), + invests_in_sip: Boolean(updated.invests_sip), + has_emi_or_bnpl: Boolean(updated.has_emi), + }; + + const result = calculateMoneyHealthScore(finalAnswers); + + // Store for score reveal page (survives navigation) + try { + sessionStorage.setItem('mm_score', JSON.stringify(result)); + sessionStorage.setItem('mm_perceived_spend', String(updated.perceived_spend ?? 0)); + } catch { + // sessionStorage unavailable (private/incognito fallback) + console.warn('[MoneyMirror] sessionStorage unavailable'); + } + + // Fire onboarding_completed server-side (single emission source) + fetch('/api/onboarding/complete', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + monthly_income_paisa: finalAnswers.monthly_income_paisa, + money_health_score: result.score, + perceived_spend_paisa: finalAnswers.perceived_spend_paisa, + }), + }).catch(() => { + // Non-fatal — score reveal must not be blocked + }); + + // Navigate to score reveal + router.push('/score'); + } + }, + [step, answers, current, isAnimating, router] + ); + + const handleBack = () => { + if (step > 0) { + setStep((s) => s - 1); + setInputVal(''); + } + }; + + return ( +
+ {/* Progress bar */} +
+
+
+
+

+ {step + 1} of {QUESTIONS.length} +

+
+ + {/* Question */} +
+
+ {/* Logo */} + + MoneyMirror + + +
+

+ {current.label} +

+

+ {current.sublabel} +

+
+ + {/* Input */} + {current.type === 'number' ? ( +
+
+ + ₹ + + setInputVal(e.target.value)} + onKeyDown={(e) => { + if (e.key === 'Enter' && Number(inputVal) > 0) { + handleNext(inputVal); + } + }} + min={0} + style={{ paddingLeft: '36px' }} + autoFocus + /> +
+ +
+ ) : ( +
+ + +
+ )} +
+
+ + {/* Back button */} + {step > 0 && ( +
+ +
+ )} +
+ ); +} diff --git a/apps/money-mirror/src/app/page.tsx b/apps/money-mirror/src/app/page.tsx new file mode 100644 index 0000000..6ec89b3 --- /dev/null +++ b/apps/money-mirror/src/app/page.tsx @@ -0,0 +1,246 @@ +import Link from 'next/link'; + +/** + * T11 — Landing Page + * + * The first thing a user sees. Warikoo-style brutal honesty positioning. + * No login required — landing page pushes to sign-in. + */ + +export default function LandingPage() { + return ( +
+
+ {/* Hero */} +
+ {/* Logo */} +

+ Money +
+ Mirror +

+ +

+ See the truth about +
+ where your money goes. +

+ +

+ Upload your bank statement. Get a{' '} + + brutally honest breakdown + {' '} + of your spending — needs vs wants vs leaks. Works with Indian bank account and credit + card PDFs. No sugar‑coating. No judgement. Just facts. +

+
+ + {/* Stats Strip */} +
+
+
+ 67% +
+
+ underestimate +
+ their spending +
+
+
+
+ ₹12k +
+
+ avg hidden leaks +
+ per month +
+
+
+
+ 2 min +
+
+ to see +
+ the truth +
+
+
+ + {/* How it works */} +
+

+ How it works +

+ + {[ + { step: '01', text: 'Answer 5 quick questions about your money habits' }, + { step: '02', text: 'Upload your bank account or credit card statement (PDF)' }, + { step: '03', text: 'See exactly where every rupee went' }, + ].map((item) => ( +
+ + {item.step} + + + {item.text} + +
+ ))} +
+ + {/* CTA */} +
+ + + +

+ 🔒 Your data never leaves your browser until you upload. +
+ PDFs are deleted immediately after processing. +

+
+ + {/* Footer */} +
+

+ Inspired by Ankur Warikoo's Money Matters series. +
+ Built for Gen Z Indians. +

+
+
+
+ ); +} diff --git a/apps/money-mirror/src/app/score/page.tsx b/apps/money-mirror/src/app/score/page.tsx new file mode 100644 index 0000000..aac952a --- /dev/null +++ b/apps/money-mirror/src/app/score/page.tsx @@ -0,0 +1,230 @@ +'use client'; + +import { useEffect, useState } from 'react'; +import { useRouter } from 'next/navigation'; +import type { MoneyHealthScore } from '@/lib/scoring'; + +/** + * T10 — Score Reveal Page + * + * The "mirror moment" — shows their Money Health Score with a dramatic + * reveal animation. This is the payoff after the 5-question onboarding. + * The CTA pushes them to upload their bank statement for the real truth. + */ + +const GRADE_CONFIG: Record< + MoneyHealthScore['grade'], + { color: string; emoji: string; message: string } +> = { + A: { + color: 'var(--success)', + emoji: '🪞', + message: "Your perception is pretty close to reality. Let's verify.", + }, + B: { + color: 'var(--accent)', + emoji: '🪞', + message: "Not bad — but there's probably a gap. Want to see it?", + }, + C: { + color: 'var(--warning)', + emoji: '⚠️', + message: "Most people in your bracket spend 30–40% more than they think. Let's find out.", + }, + D: { + color: 'var(--danger)', + emoji: '🔥', + message: "You're likely bleeding money you don't know about.", + }, + F: { + color: 'var(--danger)', + emoji: '🚨', + message: 'Financial blind spot detected. You need to see your bank statement — now.', + }, +}; + +export default function ScoreRevealPage() { + const router = useRouter(); + const [score] = useState(() => { + if (typeof window === 'undefined') { + return null; + } + + try { + const raw = sessionStorage.getItem('mm_score'); + if (!raw) { + return null; + } + + return JSON.parse(raw) as MoneyHealthScore; + } catch { + return null; + } + }); + const [revealed, setRevealed] = useState(false); + + useEffect(() => { + if (!score) { + router.replace('/onboarding'); + return; + } + + const timer = setTimeout(() => setRevealed(true), 600); + return () => clearTimeout(timer); + }, [router, score]); + + if (!score) { + return ( +
+
+
+
+
+ ); + } + + const config = GRADE_CONFIG[score.grade]; + + return ( +
+
+ {/* Logo */} + + MoneyMirror + + + {/* Score Circle */} +
+
+ + {score.score} + + + out of 100 + +
+ + {/* Grade Badge */} +
+ {config.emoji} {score.label} +
+
+ + {/* Message */} +
+

+ {config.message} +

+ + {score.perceived_gap_pct > 0 && ( +

+ Estimated perception gap: ~{score.perceived_gap_pct}% +

+ )} +
+ + {/* CTA */} +
+ + +
+
+
+ ); +} diff --git a/apps/money-mirror/src/app/sentry-example-page/page.tsx b/apps/money-mirror/src/app/sentry-example-page/page.tsx new file mode 100644 index 0000000..80f449d --- /dev/null +++ b/apps/money-mirror/src/app/sentry-example-page/page.tsx @@ -0,0 +1,236 @@ +'use client'; + +import * as Sentry from '@sentry/nextjs'; +import Head from 'next/head'; +import { useEffect, useState } from 'react'; + +class SentryExampleFrontendError extends Error { + constructor(message: string | undefined) { + super(message); + this.name = 'SentryExampleFrontendError'; + } +} + +export default function Page() { + const [hasSentError, setHasSentError] = useState(false); + const [isConnected, setIsConnected] = useState(true); + + useEffect(() => { + Sentry.logger.info('Sentry example page loaded'); + async function checkConnectivity() { + const result = await Sentry.diagnoseSdkConnectivity(); + setIsConnected(result !== 'sentry-unreachable'); + } + checkConnectivity(); + }, []); + + return ( +
+ + sentry-example-page + + + +
+
+ + + +

sentry-example-page

+ +

+ Click the button below, and view the sample error on the Sentry{' '} + + Issues Page + + . For more details about setting up Sentry,{' '} + + read our docs + + . +

+ + + + {hasSentError ? ( +

Error sent to Sentry.

+ ) : !isConnected ? ( +
+

+ It looks like network requests to Sentry are being blocked, which will prevent errors + from being captured. Try disabling your ad-blocker to complete the test. +

+
+ ) : ( +
+ )} + +
+
+ + +
+ ); +} diff --git a/apps/money-mirror/src/components/AdvisoryFeed.tsx b/apps/money-mirror/src/components/AdvisoryFeed.tsx new file mode 100644 index 0000000..4f6c911 --- /dev/null +++ b/apps/money-mirror/src/components/AdvisoryFeed.tsx @@ -0,0 +1,112 @@ +/** + * AdvisoryFeed — Displays advisory cards from the advisory engine + * + * Each advisory is styled by severity (critical = red, warning = amber, info = teal). + */ + +import type { Advisory } from '@/lib/advisory-engine'; + +interface AdvisoryFeedProps { + advisories: Advisory[]; +} + +const SEVERITY_STYLES: Record< + Advisory['severity'], + { bg: string; border: string; color: string; icon: string } +> = { + critical: { + bg: 'var(--danger-dim)', + border: 'rgba(255,77,109,0.25)', + color: 'var(--danger)', + icon: '🚨', + }, + warning: { + bg: 'var(--warning-dim)', + border: 'rgba(255,181,71,0.25)', + color: 'var(--warning)', + icon: '⚠️', + }, + info: { + bg: 'var(--accent-dim)', + border: 'rgba(0,229,195,0.25)', + color: 'var(--accent)', + icon: '💡', + }, +}; + +export function AdvisoryFeed({ advisories }: AdvisoryFeedProps) { + if (advisories.length === 0) { + return ( +
+ +

+ No red flags found. Your spending looks healthy! +

+
+ ); + } + + return ( +
+ {advisories.map((adv, i) => { + const style = SEVERITY_STYLES[adv.severity]; + return ( +
+
+ {style.icon} + + {adv.headline} + +
+

+ {adv.message} +

+
+ ); + })} +
+ ); +} diff --git a/apps/money-mirror/src/components/MirrorCard.tsx b/apps/money-mirror/src/components/MirrorCard.tsx new file mode 100644 index 0000000..4348e1a --- /dev/null +++ b/apps/money-mirror/src/components/MirrorCard.tsx @@ -0,0 +1,84 @@ +/** + * MirrorCard — A breakdown card showing spending by category + * + * Displays: category name, amount in ₹, percentage of total, + * and a visual bar chart indicator. + */ + +interface MirrorCardProps { + label: string; + amount_paisa: number; + total_paisa: number; + color: string; + icon: string; +} + +export function MirrorCard({ label, amount_paisa, total_paisa, color, icon }: MirrorCardProps) { + const rupees = Math.round(amount_paisa / 100); + const pct = total_paisa > 0 ? Math.round((amount_paisa / total_paisa) * 100) : 0; + + return ( +
+
+
+ {icon} + + {label} + +
+
+ + ₹{rupees.toLocaleString('en-IN')} + + + {pct}% + +
+
+
+
+
+
+ ); +} diff --git a/apps/money-mirror/src/instrumentation-client.ts b/apps/money-mirror/src/instrumentation-client.ts new file mode 100644 index 0000000..fb70726 --- /dev/null +++ b/apps/money-mirror/src/instrumentation-client.ts @@ -0,0 +1,31 @@ +// This file configures the initialization of Sentry on the client. +// The added config here will be used whenever a users loads a page in their browser. +// https://docs.sentry.io/platforms/javascript/guides/nextjs/ + +import * as Sentry from '@sentry/nextjs'; + +Sentry.init({ + dsn: 'https://de65ad90374bba0827a8a24959f308fe@o4511154725060608.ingest.us.sentry.io/4511154738626560', + + // Add optional integrations for additional features + integrations: [Sentry.replayIntegration()], + + // Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control. + tracesSampleRate: 1, + // Enable logs to be sent to Sentry + enableLogs: true, + + // Define how likely Replay events are sampled. + // This sets the sample rate to be 10%. You may want this to be 100% while + // in development and sample at a lower rate in production + replaysSessionSampleRate: 0.1, + + // Define how likely Replay events are sampled when an error occurs. + replaysOnErrorSampleRate: 1.0, + + // Enable sending user PII (Personally Identifiable Information) + // https://docs.sentry.io/platforms/javascript/guides/nextjs/configuration/options/#sendDefaultPii + sendDefaultPii: true, +}); + +export const onRouterTransitionStart = Sentry.captureRouterTransitionStart; diff --git a/apps/money-mirror/src/instrumentation.ts b/apps/money-mirror/src/instrumentation.ts new file mode 100644 index 0000000..8aff09f --- /dev/null +++ b/apps/money-mirror/src/instrumentation.ts @@ -0,0 +1,13 @@ +import * as Sentry from '@sentry/nextjs'; + +export async function register() { + if (process.env.NEXT_RUNTIME === 'nodejs') { + await import('../sentry.server.config'); + } + + if (process.env.NEXT_RUNTIME === 'edge') { + await import('../sentry.edge.config'); + } +} + +export const onRequestError = Sentry.captureRequestError; diff --git a/apps/money-mirror/src/lib/advisory-engine.test.ts b/apps/money-mirror/src/lib/advisory-engine.test.ts new file mode 100644 index 0000000..1539f35 --- /dev/null +++ b/apps/money-mirror/src/lib/advisory-engine.test.ts @@ -0,0 +1,48 @@ +import { describe, expect, it } from 'vitest'; +import { generateAdvisories } from '@/lib/advisory-engine'; + +describe('generateAdvisories', () => { + it('skips no-investment on credit-card statements', () => { + const advisories = generateAdvisories({ + statement_type: 'credit_card', + summary: { + needs: 100000, + wants: 200000, + investment: 0, + debt: 0, + other: 0, + total_debits: 300000, + total_credits: 500000, + }, + perceived_spend_paisa: 250000, + monthly_income_paisa: 6000000, + debt_load_paisa: 0, + food_delivery_paisa: 0, + subscription_paisa: 0, + }); + + expect(advisories.some((advisory) => advisory.trigger === 'NO_INVESTMENT')).toBe(false); + }); + + it('uses debt_load_paisa for high-debt advisory math', () => { + const advisories = generateAdvisories({ + statement_type: 'credit_card', + summary: { + needs: 0, + wants: 300000, + investment: 0, + debt: 0, + other: 0, + total_debits: 300000, + total_credits: 1500000, + }, + perceived_spend_paisa: 250000, + monthly_income_paisa: 1000000, + debt_load_paisa: 500000, + food_delivery_paisa: 0, + subscription_paisa: 0, + }); + + expect(advisories.some((advisory) => advisory.trigger === 'HIGH_DEBT_RATIO')).toBe(true); + }); +}); diff --git a/apps/money-mirror/src/lib/advisory-engine.ts b/apps/money-mirror/src/lib/advisory-engine.ts new file mode 100644 index 0000000..7c8accb --- /dev/null +++ b/apps/money-mirror/src/lib/advisory-engine.ts @@ -0,0 +1,143 @@ +/** + * T9 — Advisory Engine + * + * Generates 5 types of financial advisory cards based on the + * user's actual transaction data (post bank-statement-parse). + * + * Triggers: + * 1. PERCEPTION_GAP — comparing perceived vs actual spend + * 2. SUBSCRIPTION_LEAK — recurring wants > ₹2,000/month + * 3. FOOD_DELIVERY — dining/food delivery > 15% of debits + * 4. NO_INVESTMENT — zero SIP/MF transactions detected + * 5. HIGH_DEBT_RATIO — debt payments > 40% of income + * + * Each advisory has a severity (info | warning | critical), + * a headline, and a detailed message. + */ + +import type { CategorySummary } from './categorizer'; +import type { StatementType } from './statements'; + +export type AdvisorySeverity = 'info' | 'warning' | 'critical'; + +export interface Advisory { + id: string; + trigger: string; + severity: AdvisorySeverity; + headline: string; + message: string; + amount_paisa?: number; +} + +interface AdvisoryInput { + statement_type: StatementType; + summary: CategorySummary; + perceived_spend_paisa: number; + monthly_income_paisa: number; + debt_load_paisa: number; + food_delivery_paisa: number; + subscription_paisa: number; +} + +/** + * Generate advisory feed items from parsed statement data. + * Returns advisories sorted by severity (critical first). + */ +export function generateAdvisories(input: AdvisoryInput): Advisory[] { + const advisories: Advisory[] = []; + const { summary, perceived_spend_paisa, monthly_income_paisa } = input; + + // ── 1. PERCEPTION_GAP ──────────────────────────────────────────── + if (perceived_spend_paisa > 0 && summary.total_debits > 0) { + const gap = summary.total_debits - perceived_spend_paisa; + const gapPct = Math.round((gap / perceived_spend_paisa) * 100); + + if (gapPct > 10) { + const severity: AdvisorySeverity = + gapPct > 40 ? 'critical' : gapPct > 20 ? 'warning' : 'info'; + + advisories.push({ + id: 'perception-gap', + trigger: 'PERCEPTION_GAP', + severity, + headline: `You spent ${gapPct}% more than you thought`, + message: `You estimated ₹${formatRupees(perceived_spend_paisa)} per month, but your bank statement shows ₹${formatRupees(summary.total_debits)}. That's ₹${formatRupees(gap)} you didn't account for.`, + amount_paisa: gap, + }); + } + } + + // ── 2. SUBSCRIPTION_LEAK ───────────────────────────────────────── + if (input.subscription_paisa > 200000) { + // > ₹2,000/month + advisories.push({ + id: 'subscription-leak', + trigger: 'SUBSCRIPTION_LEAK', + severity: input.subscription_paisa > 500000 ? 'warning' : 'info', + headline: `₹${formatRupees(input.subscription_paisa)}/mo in subscriptions`, + message: + 'Netflix, Spotify, YouTube Premium, gym membership — they feel small individually but compound fast. Are you actively using all of them?', + amount_paisa: input.subscription_paisa, + }); + } + + // ── 3. FOOD_DELIVERY ───────────────────────────────────────────── + if (summary.total_debits > 0) { + const foodPct = (input.food_delivery_paisa / summary.total_debits) * 100; + if (foodPct > 15) { + advisories.push({ + id: 'food-delivery', + trigger: 'FOOD_DELIVERY', + severity: foodPct > 25 ? 'critical' : 'warning', + headline: `${Math.round(foodPct)}% of your spending is food delivery`, + message: `You spent ₹${formatRupees(input.food_delivery_paisa)} on Swiggy, Zomato, and restaurants. That's ₹${formatRupees(input.food_delivery_paisa * 12)} per year just on convenience.`, + amount_paisa: input.food_delivery_paisa, + }); + } + } + + // ── 4. NO_INVESTMENT ───────────────────────────────────────────── + if (input.statement_type === 'bank_account' && summary.investment === 0) { + advisories.push({ + id: 'no-investment', + trigger: 'NO_INVESTMENT', + severity: 'warning', + headline: 'No investments detected this month', + message: + 'Your statement shows zero SIP, mutual fund, or recurring investment transactions. Even ₹500/month in an index fund compounds significantly over 10 years.', + }); + } + + // ── 5. HIGH_DEBT_RATIO ─────────────────────────────────────────── + if (monthly_income_paisa > 0) { + const debtRatio = (input.debt_load_paisa / monthly_income_paisa) * 100; + if (debtRatio > 40) { + advisories.push({ + id: 'high-debt', + trigger: 'HIGH_DEBT_RATIO', + severity: 'critical', + headline: `${Math.round(debtRatio)}% of income goes to debt & EMIs`, + message: `You're paying ₹${formatRupees(input.debt_load_paisa)} in EMIs, BNPL, and credit card dues. Anything above 40% of income is a red flag — consider consolidating or paying off high-interest debt first.`, + amount_paisa: input.debt_load_paisa, + }); + } + } + + // Sort: critical → warning → info + const severityOrder: Record = { + critical: 0, + warning: 1, + info: 2, + }; + + return advisories.sort((a, b) => severityOrder[a.severity] - severityOrder[b.severity]); +} + +/** + * Convert paisa to formatted rupees string (no currency symbol). + * E.g. 1234500 → "12,345" + */ +function formatRupees(paisa: number): string { + const rupees = Math.round(paisa / 100); + return rupees.toLocaleString('en-IN'); +} diff --git a/apps/money-mirror/src/lib/auth/client.ts b/apps/money-mirror/src/lib/auth/client.ts new file mode 100644 index 0000000..b784d3d --- /dev/null +++ b/apps/money-mirror/src/lib/auth/client.ts @@ -0,0 +1,5 @@ +'use client'; + +import { createAuthClient } from '@neondatabase/auth/next'; + +export const authClient = createAuthClient(); diff --git a/apps/money-mirror/src/lib/auth/server.ts b/apps/money-mirror/src/lib/auth/server.ts new file mode 100644 index 0000000..2f9a943 --- /dev/null +++ b/apps/money-mirror/src/lib/auth/server.ts @@ -0,0 +1,13 @@ +import { createAuthServer } from '@neondatabase/auth/next/server'; + +type AuthServer = ReturnType; + +let authServer: AuthServer | null = null; + +export function getAuthServer(): AuthServer { + if (!authServer) { + authServer = createAuthServer(); + } + + return authServer; +} diff --git a/apps/money-mirror/src/lib/auth/session.ts b/apps/money-mirror/src/lib/auth/session.ts new file mode 100644 index 0000000..fe20aa5 --- /dev/null +++ b/apps/money-mirror/src/lib/auth/session.ts @@ -0,0 +1,22 @@ +import { getAuthServer } from '@/lib/auth/server'; + +export interface SessionUser { + id: string; + email: string; + name: string; +} + +export async function getSessionUser(): Promise { + const authServer = getAuthServer(); + const { data, error } = await authServer.getSession(); + + if (error || !data?.user?.id || !data.user.email) { + return null; + } + + return { + id: data.user.id, + email: data.user.email, + name: data.user.name, + }; +} diff --git a/apps/money-mirror/src/lib/categorizer.ts b/apps/money-mirror/src/lib/categorizer.ts new file mode 100644 index 0000000..10f3185 --- /dev/null +++ b/apps/money-mirror/src/lib/categorizer.ts @@ -0,0 +1,192 @@ +/** + * T6 — Transaction Categorization Engine + * + * Classifies a parsed transaction description into one of four buckets: + * - needs → rent, utilities, groceries, transport, insurance + * - wants → dining, entertainment, shopping, subscriptions + * - investment → SIP, mutual funds, stocks, gold + * - debt → EMI, BNPL, credit card payment, loan + * - other → catch-all (transfers, ATM withdrawals, etc.) + * + * Approach: rule-based keyword matching (fast, zero API calls). + * The Gemini parser step handles the initial description cleanup, + * so inputs here are normalized merchant names. + */ + +export type TransactionCategory = 'needs' | 'wants' | 'investment' | 'debt' | 'other'; + +export interface CategorizedTransaction { + description: string; + amount_paisa: number; + date: string; + type: 'debit' | 'credit'; + category: TransactionCategory; + is_recurring: boolean; +} + +export type CreditCardEntryKind = + | 'purchase' + | 'payment' + | 'refund' + | 'reversal' + | 'fee' + | 'interest' + | 'cash_advance' + | 'other'; + +// ─── Keyword rule sets ───────────────────────────────────────────────────── + +const RULES: Record = { + needs: + /\b(rent|electricity|water|gas|broadband|internet|grocery|grocer|zepto|blinkit|bigbasket|d-mart|dmart|reliance smart|metro|more supermarket|uber|ola|rapido|namma yatri|petrol|fuel|insurance|mediclaim|lic|health|pharmacy|chemist|doctor|hospital|clinic)\b/i, + + wants: + /\b(swiggy|zomato|eatsure|dunzo|barbeque|starbucks|cafe|restaurant|hotel|bar|pub|food|pizza|burger|kfc|mcdonald|domino|netflix|hotstar|prime video|amazon prime|spotify|gaana|youtube premium|jiocinema|zee5|sonyliv|bookmyshow|pvr|inox|myntra|ajio|nykaa|meesho|flipkart|amazon|shopping|fashion|cosmetic|saloon|salon|parlour|gaming|ludo|steam|play store|app store|travel|makemytrip|goibibo|ixigo|cleartrip|irctc|redbus|yolo)\b/i, + + investment: + /\b(sip|mutual fund|groww|zerodha|kuvera|paytm money|coin by zerodha|iifl|hdfc mutual|sbi mutual|axis mutual|icici pru|nippon|mirae|navi mutual|smallcase|stock|nse|bse|gold|sovereign gold|ppf|nps|elss|fd|fixed deposit|recurring deposit|rd)\b/i, + + debt: /\b(emi|equated monthly|loan|credit card|payment due|minimum due|bnpl|buy now pay later|bajaj finserv|zestmoney|zest money|lazypay|lazy pay|simpl|slice|uni card|onecard|stashfin|moneyview|creditmantri|freecharge credit|paytm postpaid|flipkart pay later|amazon pay later|hdfc card|sbi card|icici card|axis card|kotak card)\b/i, + + // "other" is the fallback — not matched by keyword + other: /^$/, +}; + +const RECURRING_SIGNALS: RegExp = + /\b(sip|subscription|emi|lic|insurance|broadband|internet|rent|netflix|hotstar|prime|spotify|mutual fund|monthly|auto.?debit)\b/i; + +// ─── Categorization Function ─────────────────────────────────────────────── + +/** + * Categorize a single transaction. + * + * @param description - Normalized merchant/narration string from Gemini + * @param amount_paisa - Transaction amount in paisa (always positive) + * @param date - ISO date string (YYYY-MM-DD) + * @param type - "debit" or "credit" + */ +export function categorizeTransaction( + description: string, + amount_paisa: number, + date: string, + type: 'debit' | 'credit' +): CategorizedTransaction { + const d = description.trim(); + let category: TransactionCategory = 'other'; + + // Credits are always "other" (salary, transfers, refunds) + if (type === 'credit') { + return { + description: d, + amount_paisa, + date, + type, + category: 'other', + is_recurring: false, + }; + } + + // Priority order: investment > debt > needs > wants > other + for (const key of ['investment', 'debt', 'needs', 'wants'] as TransactionCategory[]) { + if (RULES[key].test(d)) { + category = key; + break; + } + } + + const is_recurring = RECURRING_SIGNALS.test(d); + + return { + description: d, + amount_paisa, + date, + type, + category, + is_recurring, + }; +} + +export function categorizeCreditCardTransaction( + description: string, + amount_paisa: number, + date: string, + entryKind: CreditCardEntryKind +): CategorizedTransaction { + const normalizedEntryKind = entryKind; + + if (normalizedEntryKind === 'payment') { + return { + description: description.trim(), + amount_paisa, + date, + type: 'credit', + category: 'debt', + is_recurring: false, + }; + } + + if (normalizedEntryKind === 'refund' || normalizedEntryKind === 'reversal') { + return { + description: description.trim(), + amount_paisa, + date, + type: 'credit', + category: 'other', + is_recurring: false, + }; + } + + if ( + normalizedEntryKind === 'fee' || + normalizedEntryKind === 'interest' || + normalizedEntryKind === 'cash_advance' + ) { + return { + description: description.trim(), + amount_paisa, + date, + type: 'debit', + category: 'debt', + is_recurring: false, + }; + } + + return categorizeTransaction(description, amount_paisa, date, 'debit'); +} + +/** + * Summarize a list of categorized transactions into bucket totals. + * Returns totals in paisa. + */ +export interface CategorySummary { + needs: number; + wants: number; + investment: number; + debt: number; + other: number; + total_debits: number; + total_credits: number; +} + +export function summarizeByCategory(transactions: CategorizedTransaction[]): CategorySummary { + const summary: CategorySummary = { + needs: 0, + wants: 0, + investment: 0, + debt: 0, + other: 0, + total_debits: 0, + total_credits: 0, + }; + + for (const tx of transactions) { + if (tx.type === 'credit') { + summary.total_credits += tx.amount_paisa; + } else { + summary.total_debits += tx.amount_paisa; + summary[tx.category] += tx.amount_paisa; + } + } + + return summary; +} diff --git a/apps/money-mirror/src/lib/dashboard.ts b/apps/money-mirror/src/lib/dashboard.ts new file mode 100644 index 0000000..55c5c09 --- /dev/null +++ b/apps/money-mirror/src/lib/dashboard.ts @@ -0,0 +1,261 @@ +import { generateAdvisories, type Advisory } from '@/lib/advisory-engine'; +import type { CategorySummary } from '@/lib/categorizer'; +import { getDb, toNumber } from '@/lib/db'; +import type { StatementType } from '@/lib/statements'; + +const FOOD_REGEX = + /\b(swiggy|zomato|eatsure|dunzo|barbeque|starbucks|cafe|restaurant|food|pizza|burger|kfc|mcdonald|domino)\b/i; +const SUBSCRIPTION_REGEX = + /\b(netflix|hotstar|prime|spotify|youtube premium|jiocinema|zee5|sonyliv|subscription|gym|membership)\b/i; + +export interface DashboardSummary { + needs_paisa: number; + wants_paisa: number; + investment_paisa: number; + debt_paisa: number; + other_paisa: number; + total_debits_paisa: number; + total_credits_paisa: number; +} + +export interface DashboardData { + statement_id: string; + institution_name: string; + statement_type: StatementType; + period_start: string | null; + period_end: string | null; + due_date: string | null; + payment_due_paisa: number | null; + minimum_due_paisa: number | null; + credit_limit_paisa: number | null; + transaction_count: number; + summary: DashboardSummary; + advisories: Advisory[]; +} + +interface StatementRow { + id: string; + institution_name: string; + statement_type: StatementType; + period_start: string | null; + period_end: string | null; + due_date: string | null; + payment_due_paisa: number | null; + minimum_due_paisa: number | null; + credit_limit_paisa: number | null; + perceived_spend_paisa: number; + monthly_income_paisa: number; +} + +interface TransactionRow { + category: string; + amount_paisa: number; + type: 'debit' | 'credit'; + description: string; + is_recurring: boolean; +} + +export async function fetchDashboardData( + userId: string, + statementId: string | null +): Promise { + const sql = getDb(); + + const statementRows = statementId + ? ((await sql` + SELECT s.id, s.institution_name, s.statement_type, s.period_start, s.period_end, s.due_date, s.payment_due_paisa, s.minimum_due_paisa, s.credit_limit_paisa, s.perceived_spend_paisa, p.monthly_income_paisa + FROM statements s + LEFT JOIN profiles p ON p.id = s.user_id + WHERE s.user_id = ${userId} + AND s.status = 'processed' + AND s.id = ${statementId} + LIMIT 1 + `) as { + id: string; + institution_name: string; + statement_type: StatementType; + period_start: string | null; + period_end: string | null; + due_date: string | null; + payment_due_paisa: number | string | bigint | null; + minimum_due_paisa: number | string | bigint | null; + credit_limit_paisa: number | string | bigint | null; + perceived_spend_paisa: number | string | bigint; + monthly_income_paisa: number | string | bigint | null; + }[]) + : ((await sql` + SELECT s.id, s.institution_name, s.statement_type, s.period_start, s.period_end, s.due_date, s.payment_due_paisa, s.minimum_due_paisa, s.credit_limit_paisa, s.perceived_spend_paisa, p.monthly_income_paisa + FROM statements s + LEFT JOIN profiles p ON p.id = s.user_id + WHERE s.user_id = ${userId} + AND s.status = 'processed' + ORDER BY s.created_at DESC + LIMIT 1 + `) as { + id: string; + institution_name: string; + statement_type: StatementType; + period_start: string | null; + period_end: string | null; + due_date: string | null; + payment_due_paisa: number | string | bigint | null; + minimum_due_paisa: number | string | bigint | null; + credit_limit_paisa: number | string | bigint | null; + perceived_spend_paisa: number | string | bigint; + monthly_income_paisa: number | string | bigint | null; + }[]); + + const row = statementRows[0]; + if (!row) { + return null; + } + + const statement: StatementRow = { + id: row.id, + institution_name: row.institution_name, + statement_type: row.statement_type, + period_start: row.period_start, + period_end: row.period_end, + due_date: row.due_date, + payment_due_paisa: row.payment_due_paisa === null ? null : toNumber(row.payment_due_paisa), + minimum_due_paisa: row.minimum_due_paisa === null ? null : toNumber(row.minimum_due_paisa), + credit_limit_paisa: row.credit_limit_paisa === null ? null : toNumber(row.credit_limit_paisa), + perceived_spend_paisa: toNumber(row.perceived_spend_paisa), + monthly_income_paisa: + row.monthly_income_paisa === null ? 0 : toNumber(row.monthly_income_paisa), + }; + + const transactionRows = (await sql` + SELECT category, amount_paisa, type, description, is_recurring + FROM transactions + WHERE statement_id = ${statement.id} + AND user_id = ${userId} + ORDER BY date DESC + LIMIT 1000 + `) as { + category: string; + amount_paisa: number | string | bigint; + type: 'debit' | 'credit'; + description: string; + is_recurring: boolean; + }[]; + + const transactions: TransactionRow[] = transactionRows.map((transactionRow) => ({ + category: transactionRow.category, + amount_paisa: toNumber(transactionRow.amount_paisa), + type: transactionRow.type, + description: transactionRow.description, + is_recurring: transactionRow.is_recurring, + })); + + const summary = buildDashboardSummary(transactions); + const advisories = buildAdvisories(summary, statement, transactions); + + return { + statement_id: statement.id, + institution_name: statement.institution_name, + statement_type: statement.statement_type, + period_start: statement.period_start, + period_end: statement.period_end, + due_date: statement.due_date, + payment_due_paisa: statement.payment_due_paisa, + minimum_due_paisa: statement.minimum_due_paisa, + credit_limit_paisa: statement.credit_limit_paisa, + transaction_count: transactions.length, + summary, + advisories, + }; +} + +function buildDashboardSummary(transactions: TransactionRow[]): DashboardSummary { + const summary: CategorySummary = { + needs: 0, + wants: 0, + investment: 0, + debt: 0, + other: 0, + total_debits: 0, + total_credits: 0, + }; + + for (const tx of transactions) { + if (tx.type === 'credit') { + summary.total_credits += tx.amount_paisa; + continue; + } + + summary.total_debits += tx.amount_paisa; + + if (tx.category === 'needs') { + summary.needs += tx.amount_paisa; + continue; + } + if (tx.category === 'wants') { + summary.wants += tx.amount_paisa; + continue; + } + if (tx.category === 'investment') { + summary.investment += tx.amount_paisa; + continue; + } + if (tx.category === 'debt') { + summary.debt += tx.amount_paisa; + continue; + } + + summary.other += tx.amount_paisa; + } + + return { + needs_paisa: summary.needs, + wants_paisa: summary.wants, + investment_paisa: summary.investment, + debt_paisa: summary.debt, + other_paisa: summary.other, + total_debits_paisa: summary.total_debits, + total_credits_paisa: summary.total_credits, + }; +} + +function buildAdvisories( + summary: DashboardSummary, + statement: StatementRow, + transactions: TransactionRow[] +): Advisory[] { + let foodDeliveryPaisa = 0; + let subscriptionPaisa = 0; + + for (const tx of transactions) { + if (tx.type !== 'debit') { + continue; + } + + if (FOOD_REGEX.test(tx.description)) { + foodDeliveryPaisa += tx.amount_paisa; + } + if (SUBSCRIPTION_REGEX.test(tx.description)) { + subscriptionPaisa += tx.amount_paisa; + } + } + + return generateAdvisories({ + statement_type: statement.statement_type, + summary: { + needs: summary.needs_paisa, + wants: summary.wants_paisa, + investment: summary.investment_paisa, + debt: summary.debt_paisa, + other: summary.other_paisa, + total_debits: summary.total_debits_paisa, + total_credits: summary.total_credits_paisa, + }, + perceived_spend_paisa: statement.perceived_spend_paisa, + monthly_income_paisa: statement.monthly_income_paisa, + debt_load_paisa: + statement.statement_type === 'credit_card' + ? (statement.payment_due_paisa ?? statement.minimum_due_paisa ?? summary.debt_paisa) + : summary.debt_paisa, + food_delivery_paisa: foodDeliveryPaisa, + subscription_paisa: subscriptionPaisa, + }); +} diff --git a/apps/money-mirror/src/lib/db.ts b/apps/money-mirror/src/lib/db.ts new file mode 100644 index 0000000..edd4288 --- /dev/null +++ b/apps/money-mirror/src/lib/db.ts @@ -0,0 +1,233 @@ +import { neon } from '@neondatabase/serverless'; + +export interface ProfileIdentity { + id: string; + email: string; +} + +export interface ProfileFinancialSnapshot { + monthly_income_paisa: number; + perceived_spend_paisa: number; +} + +export interface WeeklyRecapStatementRow { + id: string; + period_start: string | null; + period_end: string | null; + total_debits_paisa: number; + total_credits_paisa: number; +} + +export interface CategoryTotalRow { + category: string; + amount_paisa: number; +} + +type SqlClient = ReturnType; + +let sqlClient: SqlClient | null = null; + +function readRequiredEnv(name: string): string { + const value = process.env[name]; + if (!value) { + throw new Error(`${name} is required.`); + } + return value; +} + +function getSqlClient(): SqlClient { + if (!sqlClient) { + sqlClient = neon(readRequiredEnv('DATABASE_URL')); + } + return sqlClient; +} + +function toNumber(value: number | string | bigint | null | undefined): number { + if (typeof value === 'number') { + return value; + } + if (typeof value === 'bigint') { + return Number(value); + } + if (typeof value === 'string') { + return Number(value); + } + return 0; +} + +export function getDb(): SqlClient { + return getSqlClient(); +} + +export async function ensureProfile(identity: ProfileIdentity): Promise { + const sql = getSqlClient(); + await sql` + INSERT INTO profiles (id, email) + VALUES (${identity.id}, ${identity.email}) + ON CONFLICT (id) DO UPDATE + SET email = EXCLUDED.email + `; +} + +export async function upsertProfileOnboarding( + identity: ProfileIdentity, + monthlyIncomePaisa: number, + moneyHealthScore: number, + perceivedSpendPaisa: number, + onboardedAtIso: string +): Promise { + const sql = getSqlClient(); + await sql` + INSERT INTO profiles ( + id, + email, + monthly_income_paisa, + money_health_score, + perceived_spend_paisa, + onboarded_at + ) + VALUES ( + ${identity.id}, + ${identity.email}, + ${monthlyIncomePaisa}, + ${moneyHealthScore}, + ${perceivedSpendPaisa}, + ${onboardedAtIso}::timestamptz + ) + ON CONFLICT (id) DO UPDATE + SET + email = EXCLUDED.email, + monthly_income_paisa = EXCLUDED.monthly_income_paisa, + money_health_score = EXCLUDED.money_health_score, + perceived_spend_paisa = EXCLUDED.perceived_spend_paisa, + onboarded_at = EXCLUDED.onboarded_at + `; +} + +export async function getProfileFinancialSnapshot( + userId: string +): Promise { + const sql = getSqlClient(); + const rows = (await sql` + SELECT monthly_income_paisa, perceived_spend_paisa + FROM profiles + WHERE id = ${userId} + LIMIT 1 + `) as { + monthly_income_paisa: number | string | bigint | null; + perceived_spend_paisa: number | string | bigint; + }[]; + + const row = rows[0]; + if (!row) { + return { + monthly_income_paisa: 0, + perceived_spend_paisa: 0, + }; + } + + return { + monthly_income_paisa: toNumber(row.monthly_income_paisa), + perceived_spend_paisa: toNumber(row.perceived_spend_paisa), + }; +} + +export async function countUserStatementsSince(userId: string, fromIso: string): Promise { + const sql = getSqlClient(); + const rows = (await sql` + SELECT COUNT(*)::int AS count + FROM statements + WHERE user_id = ${userId} + AND created_at >= ${fromIso}::timestamptz + `) as { count: number | string }[]; + + return toNumber(rows[0]?.count); +} + +export async function listEligibleWeeklyRecapUsers( + limit: number, + offset: number +): Promise { + const sql = getSqlClient(); + const rows = (await sql` + SELECT user_id + FROM statements + WHERE status = 'processed' + GROUP BY user_id + ORDER BY MAX(created_at) DESC + LIMIT ${limit} + OFFSET ${offset} + `) as { user_id: string }[]; + + return rows.map((row) => row.user_id); +} + +export async function getProfileEmail(userId: string): Promise { + const sql = getSqlClient(); + const rows = (await sql` + SELECT email + FROM profiles + WHERE id = ${userId} + LIMIT 1 + `) as { email: string }[]; + + return rows[0]?.email ?? null; +} + +export async function getLatestProcessedStatementForUser( + userId: string +): Promise { + const sql = getSqlClient(); + const rows = (await sql` + SELECT id, period_start, period_end, total_debits_paisa, total_credits_paisa + FROM statements + WHERE user_id = ${userId} + AND status = 'processed' + ORDER BY created_at DESC + LIMIT 1 + `) as { + id: string; + period_start: string | null; + period_end: string | null; + total_debits_paisa: number | string | bigint; + total_credits_paisa: number | string | bigint; + }[]; + + const row = rows[0]; + if (!row) { + return null; + } + + return { + id: row.id, + period_start: row.period_start, + period_end: row.period_end, + total_debits_paisa: toNumber(row.total_debits_paisa), + total_credits_paisa: toNumber(row.total_credits_paisa), + }; +} + +export async function getTopCategoryTotalsForStatement( + statementId: string, + userId: string, + limit: number +): Promise { + const sql = getSqlClient(); + const rows = (await sql` + SELECT category, SUM(amount_paisa)::bigint AS amount_paisa + FROM transactions + WHERE statement_id = ${statementId} + AND user_id = ${userId} + AND type = 'debit' + GROUP BY category + ORDER BY SUM(amount_paisa) DESC + LIMIT ${limit} + `) as { category: string; amount_paisa: number | string | bigint }[]; + + return rows.map((row) => ({ + category: row.category, + amount_paisa: toNumber(row.amount_paisa), + })); +} + +export { toNumber }; diff --git a/apps/money-mirror/src/lib/error-handler.ts b/apps/money-mirror/src/lib/error-handler.ts new file mode 100644 index 0000000..0ec6fcb --- /dev/null +++ b/apps/money-mirror/src/lib/error-handler.ts @@ -0,0 +1,122 @@ +import * as Sentry from '@sentry/nextjs'; +import { NextResponse } from 'next/server'; + +// ─── API Error Responses ────────────────────────────────────────────────── + +export function apiError(message: string, status: number, context?: Record) { + if (status >= 500) { + // Log server errors with context for debugging + console.error(`[API Error ${status}]`, message, context ?? {}); + } + return NextResponse.json({ error: message }, { status }); +} + +export const API_ERRORS = { + BAD_REQUEST: (msg: string) => apiError(msg, 400), + UNAUTHORIZED: () => apiError('Unauthorized', 401), + NOT_FOUND: (resource: string) => apiError(`${resource} not found`, 404), + RATE_LIMITED: () => apiError('Too many requests', 429), + SERVER_ERROR: (e: unknown, context?: Record) => { + Sentry.captureException(e, { extra: context }); + return apiError('Internal server error', 500, context); + }, + TIMEOUT: (timeoutMs: number) => apiError(`Request timed out after ${timeoutMs}ms`, 504), +} as const; + +// ─── AI Response Parsing ────────────────────────────────────────────────── + +/** + * Safely parse an LLM response that may be wrapped in markdown codeblocks. + * Returns the parsed object or a fallback if parsing fails. + * + * @example + * const result = parseAIResponse<{ category: string }>( + * aiResponse.text, + * { category: 'uncategorized' }, + * 'categorize-task' + * ); + */ +export function parseAIResponse(rawText: string, fallback: T, context: string): T { + const cleanText = rawText + .replace(/```json\n?/g, '') + .replace(/```\n?/g, '') + .trim(); + + try { + const parsed = JSON.parse(cleanText); + return parsed as T; + } catch (e) { + console.error(`[${context}] AI response parse failed:`, cleanText.substring(0, 200)); + Sentry.captureException(e, { extra: { context, rawText: cleanText.substring(0, 500) } }); + return fallback; + } +} + +// ─── Error Classification ───────────────────────────────────────────────── + +/** + * Determine if an error from a third-party API is transient (retry-able) + * or permanent (fail fast). + * + * Transient: 429, 503, 504, network timeouts + * Permanent: 400, 401, 403, 404 + */ +export function isTransientError(statusCode: number): boolean { + return [429, 500, 502, 503, 504].includes(statusCode); +} + +// ─── Auth Header Validation ─────────────────────────────────────────────── + +/** + * Validate a shared secret header on worker/cron routes. + * Use for routes that are "internal" but externally reachable. + * + * @example + * const authError = validateWorkerAuth(request, process.env.CRON_SECRET!); + * if (authError) return authError; + */ +export function validateWorkerAuth( + request: Request, + expectedSecret: string, + headerName = 'x-worker-key' +): NextResponse | null { + const provided = request.headers.get(headerName); + if (!provided || provided !== expectedSecret) { + return API_ERRORS.UNAUTHORIZED(); + } + return null; +} + +// ─── Timeout Wrapper ────────────────────────────────────────────────────── + +/** + * Wrap an async operation with a timeout using AbortController. + * Required for all AI model calls on Vercel (stay under 9s). + * + * @example + * const result = await withTimeout( + * (signal) => gemini.generateContent({ ..., signal }), + * 8500, + * 'gemini-categorize' + * ); + */ +export async function withTimeout( + fn: (signal: AbortSignal) => Promise, + timeoutMs: number, + context: string +): Promise { + const controller = new AbortController(); + const timer = setTimeout(() => controller.abort(), timeoutMs); + + try { + const result = await fn(controller.signal); + clearTimeout(timer); + return result; + } catch (e) { + clearTimeout(timer); + if (controller.signal.aborted) { + throw new Error(`[${context}] Timed out after ${timeoutMs}ms`); + } + throw e; + } +} diff --git a/apps/money-mirror/src/lib/pdf-parser.ts b/apps/money-mirror/src/lib/pdf-parser.ts new file mode 100644 index 0000000..a35c54f --- /dev/null +++ b/apps/money-mirror/src/lib/pdf-parser.ts @@ -0,0 +1,72 @@ +/** + * T4 — PDF Text Extraction Service + * + * Accepts a raw PDF Buffer, extracts plain text, and returns it. + * The buffer is consumed in-memory and never written to disk (Option A). + * + * Privacy guarantee: the caller is responsible for immediately nulling + * the buffer reference after calling extractPdfText() so it becomes + * eligible for GC. See /api/statement/parse/route.ts. + */ + +import { PDFParse } from 'pdf-parse'; + +export interface PdfExtractionResult { + text: string; + pageCount: number; +} + +export class PdfExtractionError extends Error { + constructor( + message: string, + public readonly code: 'EMPTY_FILE' | 'PARSE_FAILED' | 'EMPTY_TEXT' | 'PASSWORD_PROTECTED' + ) { + super(message); + this.name = 'PdfExtractionError'; + } +} + +/** + * Extract plain text from a PDF buffer. + * + * @param buffer - Raw PDF file bytes. Must be >0 bytes. + * @returns Extracted text and page count. + * @throws PdfExtractionError on empty input or parse failure. + */ +export async function extractPdfText(buffer: Buffer): Promise { + if (!buffer || buffer.length === 0) { + throw new PdfExtractionError('PDF buffer is empty', 'EMPTY_FILE'); + } + + let text: string; + let pageCount: number; + + try { + const parser = new PDFParse({ data: buffer, verbosity: 0 }); + const result = await parser.getText(); + text = result.text ?? ''; + pageCount = result.total ?? 1; + await parser.destroy(); + } catch (err) { + if (err instanceof PdfExtractionError) throw err; + const msg = err instanceof Error ? err.message : String(err); + if (msg.includes('No password') || msg.includes('PasswordException')) { + throw new PdfExtractionError('PDF is password-protected', 'PASSWORD_PROTECTED'); + } + throw new PdfExtractionError(`Failed to parse PDF: ${msg}`, 'PARSE_FAILED'); + } + + const trimmed = text.trim(); + + if (!trimmed) { + throw new PdfExtractionError( + 'PDF appears to be a scanned image or has no extractable text', + 'EMPTY_TEXT' + ); + } + + return { + text: trimmed, + pageCount, + }; +} diff --git a/apps/money-mirror/src/lib/posthog.ts b/apps/money-mirror/src/lib/posthog.ts new file mode 100644 index 0000000..bef15b2 --- /dev/null +++ b/apps/money-mirror/src/lib/posthog.ts @@ -0,0 +1,50 @@ +// ─── Server-side (API Routes / Cron) ────────────────────────────────────── + +// Required packages: posthog-node + +import { PostHog } from 'posthog-node'; + +let _posthogServer: PostHog | null = null; + +/** + * Returns a singleton PostHog server client. + * Always call posthogServer.shutdown() at the end of serverless functions. + */ +export function getPostHogServer(): PostHog { + if (!_posthogServer) { + _posthogServer = new PostHog(process.env.POSTHOG_KEY!, { + host: process.env.POSTHOG_HOST ?? 'https://app.posthog.com', + flushAt: 1, // Flush immediately in serverless + flushInterval: 0, + }); + } + return _posthogServer; +} + +/** + * Convenience wrapper: capture a server-side event and flush. + * Use this in API routes and cron workers. + * + * Single emission source rule: if this event is captured server-side, + * do NOT also capture it client-side via useEffect. + */ +export async function captureServerEvent( + distinctId: string, + event: string, + properties?: Record +): Promise { + const client = getPostHogServer(); + client.capture({ distinctId, event, properties }); + await client.shutdown(); + _posthogServer = null; +} + +// ─── Telemetry Verification Checklist ───────────────────────────────────── +// +// Before marking execute-plan complete, verify every event from metric-plan +// is present in the codebase. Run: +// +// grep -r "posthog.capture\|captureServerEvent" apps/[project]/src --include="*.ts" --include="*.tsx" +// +// All events listed in experiments/plans/manifest-NNN.json["posthog_events"] +// must appear in the grep output. diff --git a/apps/money-mirror/src/lib/scoring.test.ts b/apps/money-mirror/src/lib/scoring.test.ts new file mode 100644 index 0000000..a47d640 --- /dev/null +++ b/apps/money-mirror/src/lib/scoring.test.ts @@ -0,0 +1,84 @@ +import { describe, it, expect } from 'vitest'; +import { calculateMoneyHealthScore, type OnboardingAnswers } from './scoring'; + +// Helper: build answers in paisa +const income = (rupees: number) => rupees * 100; +const spend = (rupees: number) => rupees * 100; + +describe('calculateMoneyHealthScore', () => { + it('gives max score to a financially disciplined user', () => { + const answers: OnboardingAnswers = { + monthly_income_paisa: income(80000), + perceived_spend_paisa: spend(40000), // 50% savings rate + has_emergency_fund: true, + invests_in_sip: true, + has_emi_or_bnpl: false, + }; + const result = calculateMoneyHealthScore(answers); + expect(result.score).toBe(100); + expect(result.grade).toBe('A'); + expect(result.label).toBe('Financially Aware'); + }); + + it('gives low score to user spending more than they earn', () => { + const answers: OnboardingAnswers = { + monthly_income_paisa: income(30000), + perceived_spend_paisa: spend(35000), // negative savings + has_emergency_fund: false, + invests_in_sip: false, + has_emi_or_bnpl: true, + }; + const result = calculateMoneyHealthScore(answers); + expect(result.score).toBeLessThan(20); + expect(result.grade).toBe('F'); + expect(result.perceived_gap_pct).toBeGreaterThan(60); + }); + + it('awards 25 pts for emergency fund', () => { + const base: OnboardingAnswers = { + monthly_income_paisa: income(50000), + perceived_spend_paisa: spend(50000), // 0% savings = 5 pts + has_emergency_fund: false, + invests_in_sip: false, + has_emi_or_bnpl: true, // 0 pts + }; + const withFund = { + ...base, + has_emergency_fund: true, + }; + const without = calculateMoneyHealthScore(base); + const with_ = calculateMoneyHealthScore(withFund); + expect(with_.score - without.score).toBe(25); + }); + + it('handles zero income gracefully (no division by zero)', () => { + const answers: OnboardingAnswers = { + monthly_income_paisa: 0, + perceived_spend_paisa: spend(20000), + has_emergency_fund: false, + invests_in_sip: false, + has_emi_or_bnpl: false, + }; + const result = calculateMoneyHealthScore(answers); + expect(result.score).toBeGreaterThanOrEqual(0); + expect(result.score).toBeLessThanOrEqual(100); + }); + + it('returns correct grade boundaries — B grade (60–79)', () => { + // Spending 85% of income → savings rate ~15% → 18 pts + // + emergency fund: 25 pts → total 43 + // + no EMI: 15 pts → total 58 + // + SIP: 20 pts → total 78 → grade B + const bGradeAnswers: OnboardingAnswers = { + monthly_income_paisa: income(50000), + perceived_spend_paisa: spend(42500), // ~15% savings → 18 pts + has_emergency_fund: true, // 25 pts → 43 + invests_in_sip: true, // 20 pts → 63 + has_emi_or_bnpl: false, // 15 pts → 78 + }; + const result = calculateMoneyHealthScore(bGradeAnswers); + expect(result.grade).toBe('B'); + expect(result.score).toBeGreaterThanOrEqual(60); + expect(result.score).toBeLessThan(80); + }); +}); diff --git a/apps/money-mirror/src/lib/scoring.ts b/apps/money-mirror/src/lib/scoring.ts new file mode 100644 index 0000000..242f019 --- /dev/null +++ b/apps/money-mirror/src/lib/scoring.ts @@ -0,0 +1,97 @@ +/** + * MoneyMirror — Money Health Score Algorithm + * + * Produces a score (0–100) from the 5-question onboarding flow. + * The score is used as the "mirror trigger" — a low score motivates + * the user to upload their bank statement and see the reality. + * + * Scoring philosophy (Warikoo-style): + * - Savings rate is the single most predictive factor + * - Emergency fund absence is an immediate red flag + * - Investment behavior and debt awareness are secondary signals + * + * All monetary values are in PAISA to avoid floating-point errors. + */ + +export interface OnboardingAnswers { + /** Monthly take-home salary in paisa */ + monthly_income_paisa: number; + /** Estimated monthly spend in paisa (perceived) */ + perceived_spend_paisa: number; + /** Whether they have 3+ months of emergency fund */ + has_emergency_fund: boolean; + /** Whether they invest in mutual funds or SIP */ + invests_in_sip: boolean; + /** Whether they have any active EMI or BNPL */ + has_emi_or_bnpl: boolean; +} + +export interface MoneyHealthScore { + score: number; // 0–100 + grade: 'A' | 'B' | 'C' | 'D' | 'F'; + label: string; // Human-readable label + perceived_gap_pct: number; // Likely perception gap % (0–100) +} + +/** + * Calculate the Money Health Score. + * + * Breakdown (100 pts): + * 40 pts — Savings rate (perceived_spend vs income) + * 25 pts — Emergency fund presence + * 20 pts — Active SIP investment + * 15 pts — No active debt / BNPL + */ +export function calculateMoneyHealthScore(answers: OnboardingAnswers): MoneyHealthScore { + let score = 0; + + // ─── 1. Savings rate (40 points) ───────────────────────── + const savingsRate = + answers.monthly_income_paisa > 0 + ? Math.max(0, 1 - answers.perceived_spend_paisa / answers.monthly_income_paisa) + : 0; + + if (savingsRate >= 0.3) score += 40; + else if (savingsRate >= 0.2) score += 30; + else if (savingsRate >= 0.1) score += 18; + else score += 5; + + // ─── 2. Emergency fund (25 points) ─────────────────────── + if (answers.has_emergency_fund) score += 25; + + // ─── 3. SIP investment (20 points) ─────────────────────── + if (answers.invests_in_sip) score += 20; + + // ─── 4. No debt / BNPL (15 points) ─────────────────────── + if (!answers.has_emi_or_bnpl) score += 15; + + // ─── Grade ──────────────────────────────────────────────── + const grade = scoreToGrade(score); + const label = gradeToLabel(grade); + + // ─── Perception Gap Estimate ────────────────────────────── + // The lower the score, the higher the likely gap. + // This is an estimate to set expectations before the PDF upload. + const perceived_gap_pct = Math.round(Math.max(0, 80 - score * 0.6)); + + return { score, grade, label, perceived_gap_pct }; +} + +function scoreToGrade(score: number): MoneyHealthScore['grade'] { + if (score >= 80) return 'A'; + if (score >= 60) return 'B'; + if (score >= 40) return 'C'; + if (score >= 20) return 'D'; + return 'F'; +} + +function gradeToLabel(grade: MoneyHealthScore['grade']): string { + const labels: Record = { + A: 'Financially Aware', + B: 'Getting There', + C: 'Reality Check Needed', + D: 'Leaking Money', + F: 'Financial Blind Spot', + }; + return labels[grade]; +} diff --git a/apps/money-mirror/src/lib/statements.ts b/apps/money-mirror/src/lib/statements.ts new file mode 100644 index 0000000..e119137 --- /dev/null +++ b/apps/money-mirror/src/lib/statements.ts @@ -0,0 +1,216 @@ +export type StatementType = 'bank_account' | 'credit_card'; + +export type CreditCardEntryKind = + | 'purchase' + | 'payment' + | 'refund' + | 'reversal' + | 'fee' + | 'interest' + | 'cash_advance' + | 'other'; + +export interface ParsedStatementTransaction { + date: string; + description: string; + amount: number; + type: 'debit' | 'credit'; + entry_kind?: CreditCardEntryKind; +} + +export interface ParsedStatementMetadata { + institution_name: string; + statement_type: StatementType; + period_start: string; + period_end: string; + due_date: string | null; + payment_due_paisa: number | null; + minimum_due_paisa: number | null; + credit_limit_paisa: number | null; +} + +export interface ParsedStatementResult extends ParsedStatementMetadata { + transactions: ParsedStatementTransaction[]; +} + +const CREDIT_CARD_ENTRY_KINDS: CreditCardEntryKind[] = [ + 'purchase', + 'payment', + 'refund', + 'reversal', + 'fee', + 'interest', + 'cash_advance', + 'other', +]; + +function isObject(value: unknown): value is Record { + return typeof value === 'object' && value !== null; +} + +function readRequiredString(source: Record, key: string): string { + const value = source[key]; + if (typeof value !== 'string' || value.trim().length === 0) { + throw new Error(`Parsed statement is missing "${key}".`); + } + return value.trim(); +} + +function readOptionalString(source: Record, key: string): string | null { + const value = source[key]; + if (value === undefined || value === null || value === '') { + return null; + } + if (typeof value !== 'string') { + throw new Error(`Parsed statement field "${key}" must be a string.`); + } + const trimmed = value.trim(); + return trimmed.length > 0 ? trimmed : null; +} + +function readOptionalPaisa(source: Record, key: string): number | null { + const value = source[key]; + if (value === undefined || value === null || value === '') { + return null; + } + if (typeof value !== 'number' || !Number.isFinite(value) || value < 0) { + throw new Error(`Parsed statement field "${key}" must be a non-negative number.`); + } + return Math.round(value * 100); +} + +function normalizeInstitutionName(value: string): string { + return value.replace(/\s+/g, ' ').trim(); +} + +function validateTransaction( + value: unknown, + statementType: StatementType, + index: number +): ParsedStatementTransaction { + if (!isObject(value)) { + throw new Error(`Transaction ${index + 1} is not an object.`); + } + + const date = readRequiredString(value, 'date'); + const description = readRequiredString(value, 'description'); + const amount = value.amount; + const type = value.type; + + if (typeof amount !== 'number' || !Number.isFinite(amount) || amount <= 0) { + throw new Error(`Transaction ${index + 1} has an invalid amount.`); + } + if (type !== 'debit' && type !== 'credit') { + throw new Error(`Transaction ${index + 1} has an invalid type.`); + } + + const entryKindValue = value.entry_kind; + if (statementType === 'credit_card') { + if ( + typeof entryKindValue !== 'string' || + !CREDIT_CARD_ENTRY_KINDS.includes(entryKindValue as CreditCardEntryKind) + ) { + throw new Error(`Transaction ${index + 1} is missing a valid entry_kind.`); + } + } + + return { + date, + description, + amount, + type, + entry_kind: + typeof entryKindValue === 'string' ? (entryKindValue as CreditCardEntryKind) : undefined, + }; +} + +export function parseStatementType(value: FormDataEntryValue | null): StatementType { + if (value === 'credit_card') { + return 'credit_card'; + } + return 'bank_account'; +} + +export function getStatementTypeLabel(statementType: StatementType): string { + return statementType === 'credit_card' ? 'Credit Card Statement' : 'Bank Account Statement'; +} + +export function getCreditsLabel(statementType: StatementType): string { + return statementType === 'credit_card' ? 'Credits & Payments' : 'Total Income'; +} + +export function buildStatementParserPrompt(statementType: StatementType): string { + if (statementType === 'credit_card') { + return `You are a parser for Indian credit card statement PDFs. +Extract statement metadata and every transaction into valid JSON only. +Return: +- institution_name: issuing bank or card institution +- period_start: YYYY-MM-DD +- period_end: YYYY-MM-DD +- due_date: YYYY-MM-DD or null +- payment_due: rupees as decimal or null +- minimum_due: rupees as decimal or null +- credit_limit: rupees as decimal or null +- transactions: array + +For each transaction return: +- date: YYYY-MM-DD +- description: normalized merchant or narration +- amount: positive rupees decimal +- type: "debit" for purchases, fees, interest, cash advances; "credit" for payments, refunds, reversals +- entry_kind: one of purchase, payment, refund, reversal, fee, interest, cash_advance, other + +Return JSON only. No markdown. No explanations.`; + } + + return `You are a parser for Indian bank account statement PDFs. +Extract statement metadata and every transaction into valid JSON only. +Return: +- institution_name: bank name +- period_start: YYYY-MM-DD +- period_end: YYYY-MM-DD +- transactions: array + +For each transaction return: +- date: YYYY-MM-DD +- description: normalized merchant or narration +- amount: positive rupees decimal +- type: "debit" or "credit" + +Return JSON only. No markdown. No explanations.`; +} + +export function validateParsedStatement( + parsed: unknown, + statementType: StatementType +): ParsedStatementResult { + if (!isObject(parsed)) { + throw new Error('Parsed statement payload is not an object.'); + } + + const transactionsValue = parsed.transactions; + if (!Array.isArray(transactionsValue) || transactionsValue.length === 0) { + throw new Error('Parsed statement must contain at least one transaction.'); + } + + const institutionName = normalizeInstitutionName(readRequiredString(parsed, 'institution_name')); + const periodStart = readRequiredString(parsed, 'period_start'); + const periodEnd = readRequiredString(parsed, 'period_end'); + const dueDate = readOptionalString(parsed, 'due_date'); + + const transactions = transactionsValue.map((tx, index) => + validateTransaction(tx, statementType, index) + ); + + return { + institution_name: institutionName, + statement_type: statementType, + period_start: periodStart, + period_end: periodEnd, + due_date: dueDate, + payment_due_paisa: readOptionalPaisa(parsed, 'payment_due'), + minimum_due_paisa: readOptionalPaisa(parsed, 'minimum_due'), + credit_limit_paisa: readOptionalPaisa(parsed, 'credit_limit'), + transactions, + }; +} diff --git a/apps/money-mirror/tsconfig.json b/apps/money-mirror/tsconfig.json new file mode 100644 index 0000000..cf9c65d --- /dev/null +++ b/apps/money-mirror/tsconfig.json @@ -0,0 +1,34 @@ +{ + "compilerOptions": { + "target": "ES2017", + "lib": ["dom", "dom.iterable", "esnext"], + "allowJs": true, + "skipLibCheck": true, + "strict": true, + "noEmit": true, + "esModuleInterop": true, + "module": "esnext", + "moduleResolution": "bundler", + "resolveJsonModule": true, + "isolatedModules": true, + "jsx": "react-jsx", + "incremental": true, + "plugins": [ + { + "name": "next" + } + ], + "paths": { + "@/*": ["./src/*"] + } + }, + "include": [ + "next-env.d.ts", + "**/*.ts", + "**/*.tsx", + ".next/types/**/*.ts", + ".next/dev/types/**/*.ts", + "**/*.mts" + ], + "exclude": ["node_modules"] +} diff --git a/apps/money-mirror/vercel.json b/apps/money-mirror/vercel.json new file mode 100644 index 0000000..d08f179 --- /dev/null +++ b/apps/money-mirror/vercel.json @@ -0,0 +1,8 @@ +{ + "crons": [ + { + "path": "/api/cron/weekly-recap", + "schedule": "30 2 * * 1" + } + ] +} diff --git a/apps/money-mirror/vitest.config.ts b/apps/money-mirror/vitest.config.ts new file mode 100644 index 0000000..802dce5 --- /dev/null +++ b/apps/money-mirror/vitest.config.ts @@ -0,0 +1,35 @@ +import { defineConfig } from 'vitest/config'; +import path from 'path'; + +/** + * Shared Vitest base config for AI Product OS apps. + * + * Usage in an app's vitest.config.ts: + * import { defineConfig, mergeConfig } from "vitest/config"; + * import baseConfig from "../../libs/shared/vitest.config"; + * export default mergeConfig(baseConfig, defineConfig({ ... })); + */ +export default defineConfig({ + test: { + environment: 'jsdom', + globals: true, + include: [ + 'src/**/*.test.ts', + 'src/**/*.test.tsx', + '__tests__/**/*.test.ts', + '__tests__/**/*.test.tsx', + ], + exclude: ['node_modules', '.next'], + setupFiles: [], + coverage: { + provider: 'v8', + include: ['src/lib/**', 'src/app/api/**'], + exclude: ['src/**/*.test.*', 'node_modules'], + }, + }, + resolve: { + alias: { + '@': path.resolve(process.cwd(), 'src'), + }, + }, +}); diff --git a/command-protocol.md b/command-protocol.md index 396c013..9fdf50c 100644 --- a/command-protocol.md +++ b/command-protocol.md @@ -289,14 +289,16 @@ Rules: 6. Child Linear tasks should be derived from `manifest-.json` when available. 7. If Linear is unavailable, raise an explicit error with operation context. Do not silently skip. -## Recommended Checkpoints +## Mandatory Checkpoints (not optional — execute after every pipeline run) -- After `create-issue`: `linear-bind`, then `linear-sync issue` +- After `create-issue`: `linear-bind` (auto), then `linear-sync issue` - After `create-plan`: `linear-sync plan` - After `review`, `peer-review`, `qa-test`: `linear-sync status` - After `deploy-check`: `linear-sync release` - After `learning`: `linear-close` +If a sync is skipped at any checkpoint, the next command must run the missed sync before proceeding. Never silently skip a Linear sync. + ## Default State Mapping - `create-issue`, `explore` -> discovery or triage @@ -433,3 +435,44 @@ Display: "💡 Consider running /compact before the next command to free context Before peer-review or postmortem: Display: "💡 Run /compact now — adversarial analysis needs maximum context headroom." + +--- + +# Real-Time Feedback Capture Protocol + +When the PM provides corrective feedback at any point during the pipeline, the system must act immediately — not defer to /learning. + +## Required actions (execute in order): + +1. Identify the agent or command file responsible for the failure +2. Open that file and add the new rule as a hard constraint (not a note) +3. Update CHANGELOG.md with a dated entry: what changed, why, which file +4. Update project-state.md Decisions Log with the correction + +## Rule + +Every mid-pipeline PM correction = immediate write to agent/command file + CHANGELOG entry. + +The /learning command reinforces these rules at end of cycle. It is not the first capture point. + +If feedback is not captured immediately, it will be lost if the cycle is abandoned, compacted, or restarted. + +--- + +# CHANGELOG Discipline + +CHANGELOG.md must be updated whenever: + +- Any agent file is modified (agents/\*.md) +- Any command file is modified (commands/\*.md) +- Any knowledge file is modified (knowledge/\*.md) +- CLAUDE.md or command-protocol.md is modified +- Any system-level behavior is changed based on PM feedback + +Format: + +## YYYY-MM-DD — [Short title] + +**What:** One or two sentences describing the change. +**Why:** The PM feedback or postmortem finding that triggered it. +**Files:** List of files changed. diff --git a/commands/create-issue.md b/commands/create-issue.md index 78fed81..7b513dd 100644 --- a/commands/create-issue.md +++ b/commands/create-issue.md @@ -40,10 +40,32 @@ Follow this structure. --- +## 0 Pre-Flight Questions + +Before generating the issue, assess whether the raw idea is thin (missing user, behavior, or outcome). + +**If thin**: Ask 2–3 targeted questions in a single message. Wait for answers before proceeding. + +Questions to ask when needed: + +- What's the current behavior / pain point? +- What does the desired outcome look like? +- Who is the primary user? + +**If fully formed**: Skip questions and generate directly. + +Keep questions brief — one message, max 3 questions, no back-and-forth. + +--- + ## 1 Problem Statement Describe the problem clearly. +**Current State**: What is happening today / what pain exists. + +**Desired Outcome**: What should be true after this is solved. + --- ## 2 Target User @@ -74,6 +96,18 @@ Example: --- +## 6 Risks / Open Questions + +Call out anything that could block this or make it hard to build. + +- Dependencies on external systems, APIs, or other issues +- Regulatory, technical, or product unknowns +- Questions that `/explore` should resolve + +Omit this section if nothing notable applies. + +--- + # Output Format Return the result in this structure. @@ -82,7 +116,7 @@ Return the result in this structure. Issue Title -Problem +Problem (Current State → Desired Outcome) User @@ -92,6 +126,8 @@ Opportunity Hypothesis +Risks / Open Questions (omit if none) + --- # Post-Output Steps diff --git a/commands/deploy-check.md b/commands/deploy-check.md index 3c19922..3c35571 100644 --- a/commands/deploy-check.md +++ b/commands/deploy-check.md @@ -44,6 +44,29 @@ Follow this sequence. --- +## 0 Local Smoke Test (PM runs manually before triggering /deploy-check) + +**This gate must pass before running the command.** If any checkbox fails, fix the infra/env issue first — do not run `/deploy-check` against a broken local environment. + +``` +□ `npm run dev` starts without errors (port 3000 accessible) +□ /login loads and OTP is sent successfully (Neon Auth is provisioned + NEON_AUTH_BASE_URL filled) +□ Onboarding completes (DB write to profiles table succeeds) +□ Core feature works end-to-end (e.g., PDF upload parses, dashboard loads with data) +□ No 500 errors in browser console or terminal +□ All non-optional env vars have real values in .env.local (not empty strings) +``` + +If any checkbox fails → diagnose and fix before proceeding. Common causes: + +- `NEON_AUTH_BASE_URL` empty → provision Neon Auth on the project, copy the URL +- Missing API keys → get them from the relevant service dashboard +- Schema not applied → run `schema.sql` in Neon/Supabase SQL editor + +# Added: 2026-04-03 — Shift-left infra validation; catch env/auth gaps before PR creation + +--- + ## 1 Build Verification Ensure all components build successfully. @@ -68,6 +91,21 @@ AI model credentials Ensure secrets are not exposed. +**ENV Completeness Check (blocking)**: + +1. Scan `apps//src/` for all `process.env.*` references using grep. +2. Compare the full list against `.env.local.example` — report any variable present in code but missing from `.env.local.example` as a **BLOCKING violation**. +3. Read `.env.local` directly and check each variable's value. Classify each as: + - ✅ FILLED — has a real value + - ⚠️ EMPTY — present in file but value is blank (`VAR=` or `VAR=""`) + - ❌ MISSING — not in file at all +4. Report EMPTY variables as a **BLOCKING violation** — a variable that exists in the file with no value is just as broken as one that's missing. +5. Exception: variables explicitly marked `# Optional` in `.env.local.example` may be empty without blocking. + +# Added: 2026-04-02 — ENV completeness must be a gate, not a checklist item + +# Updated: 2026-04-03 — Distinguish EMPTY vs MISSING; empty values are a blocking violation + --- ## 3 Infrastructure Readiness @@ -82,6 +120,45 @@ API server environment ready --- +## 3a Database Schema Verification (blocking) + +Before proceeding, verify the database schema has been applied to the remote instance. + +**Step 1**: Read `apps//schema.sql` and extract all `CREATE TABLE` table names. + +**Step 2**: Attempt verification using MCP tools if available: + +- Query `information_schema.tables WHERE table_schema = 'public'` +- Check each expected table exists +- Report pass/fail per table + +**Step 3**: If MCP is unavailable, print a blocking prompt to the user: + +``` +⚠️ DATABASE SCHEMA CHECK REQUIRED + +The following tables must exist in your Supabase/Neon instance before deployment: + - [table_1] + - [table_2] + - ... + +To apply: + 1. Open Supabase SQL Editor (or Neon console) + 2. Run the full contents of apps//schema.sql + 3. Verify tables appear in the Table Editor + +Confirm tables are applied before proceeding. Do not continue until this is done. +``` + +**Block deployment if**: + +- MCP query shows any expected table is missing +- User has not confirmed tables are applied when MCP is unavailable + +# Added: 2026-04-02 — Schema verification must be a blocking gate, not a PR reviewer TODO + +--- + ## 4 Monitoring and Logging Verify monitoring exists. @@ -203,8 +280,8 @@ If all checks pass (Build, Environment, Infrastructure, Monitoring, README, Sent ## Test Plan - [ ] Run `npm test` — all tests pass - - [ ] Apply `schema.sql` to Supabase - - [ ] Set env vars from `.env.local.example` + - [x] Schema verified (all tables confirmed present before PR creation) + - [x] ENV verified (.env.local.example complete, all process.env.* vars accounted for) - [ ] Run `npm run dev` and verify user journey 🤖 Generated with AI Product OS @@ -230,9 +307,11 @@ Return output using this structure. --- +Local Smoke Test (Gate 0 — PM confirmed) + Build Status -Environment Configuration +Environment Configuration (FILLED / EMPTY / MISSING per var) Infrastructure Readiness diff --git a/commands/execute-plan.md b/commands/execute-plan.md index 1fa07a4..eea8a21 100644 --- a/commands/execute-plan.md +++ b/commands/execute-plan.md @@ -103,6 +103,15 @@ implement service logic integrate database operations handle errors and validation +**Sentry setup is a backend deliverable** — not a deploy-check task. During backend implementation: + +1. `npm install @sentry/nextjs` +2. `npx @sentry/wizard@latest -i nextjs` (creates `sentry.client.config.ts`, `sentry.server.config.ts`, updates `next.config.ts`) +3. Add `NEXT_PUBLIC_SENTRY_DSN`, `SENTRY_AUTH_TOKEN`, `SENTRY_ORG`, `SENTRY_PROJECT` to `.env.local.example` +4. Wrap at least one API error handler with `Sentry.captureException(e)` + +# Added: 2026-04-03 — Move Sentry setup to execute-plan; deploy-check is verification not first setup + --- ## 3 Database Setup @@ -129,6 +138,26 @@ core user journey works data flows correctly through system UI interactions behave correctly +**Read path / write path checkpoint** (required for every page in the plan): + +For every page that displays data, verify BOTH paths are implemented before marking it complete: + +- **Write path**: mutation fires (POST/upload) → result displayed in same request cycle +- **Read path**: page loads fresh (refresh, direct URL, email deep link) → same result hydrated from DB via authenticated GET endpoint + +If only the write path is implemented, the page is incomplete. Any page linked from an email CTA, push notification, or external URL that has no implemented read endpoint is a blocking gap. + +**Third-party library API verification** (required for every new npm integration): + +After wiring any npm package for the first time: + +1. Check the installed version in `package.json`. +2. Verify the generated call pattern against the package's TypeScript types or exported index — not against training knowledge. +3. Run `npm test` to confirm the integration behaves as expected. +4. Training knowledge of library APIs is not sufficient for version-sensitive properties (e.g., `result.total` vs `result.pages?.length`). + +# Added: 2026-04-03 — MoneyMirror (issue-009) + --- # Output Format @@ -151,6 +180,22 @@ Known Issues --- +## 5b File Size Budget Requirement + +The 300-line pre-commit limit must be applied **during code generation**, not discovered at commit time. + +**Rules**: + +- API route handlers: must stay under **200 lines**. If a route handles more than 2 logical phases (e.g., validate → AI call → DB write → telemetry), extract each phase into a named helper function in a separate file before writing the route past 150 lines. +- Page components: must stay under **250 lines**. If a page includes multiple UI states (loading, upload, result), extract each state into a named sub-component before writing the page past 200 lines. +- **Never write a large file and refactor later.** Identify extraction points upfront during task breakdown (Step 0). If a file is projected to exceed the limit, add an extraction task to the task list before writing any code. + +Violations discovered at deploy-check (pre-commit hook rejection) are execute-plan failures, not deploy-check tasks. + +# Added: 2026-04-03 — MoneyMirror (issue-009) + +--- + ## 6 Telemetry Completeness Requirement For every API route calling an external AI service, implement PostHog events in ALL branches: @@ -309,11 +354,24 @@ Before marking execute-plan complete, verify: - Key design decisions 2. **`.env.local.example`** lists every `process.env.*` reference in the codebase — including any variables added during peer-review or fix cycles. + - **Mandatory grep verification**: Run `grep -r 'process\.env\.' src/ | grep -oP 'process\.env\.\K[A-Z_]+' | sort -u` and compare against every key in `.env.local.example`. Any key in the grep output absent from `.env.local.example` is a blocking gap. Any key name that diverges (e.g., `NEXT_PUBLIC_` added or removed) is a deploy blocker. `.env.local.example` must be generated from source, never from memory. + + # Added: 2026-04-03 — MoneyMirror (issue-009) -If either is missing, execute-plan is **not complete**. A deploy-check README failure that originates here is an execute-plan prompt failure — flag it in the postmortem. +3. **Infrastructure provisioning is complete** (blocking — do not mark done until all pass): + - [ ] Neon/Supabase project created and `DATABASE_URL` filled in `.env.local` + - [ ] Database schema applied (`schema.sql` run in SQL editor; all tables verified) + - [ ] Auth provider provisioned (e.g., Neon Auth `NEON_AUTH_BASE_URL` obtained and filled) + - [ ] All non-optional env vars have real values in `.env.local` — no empty strings + - [ ] Sentry project created; `NEXT_PUBLIC_SENTRY_DSN`, `SENTRY_AUTH_TOKEN`, `SENTRY_ORG`, `SENTRY_PROJECT` filled in `.env.local` + - [ ] `npm run dev` boots without errors and the core user flow works end-to-end locally + +If any item above is incomplete, execute-plan is **not done** — it is blocked. Infra gaps discovered at deploy-check are execute-plan failures. # Added: 2026-03-21 — Ozi Reorder Experiment +# Updated: 2026-04-03 — Add infra provisioning checklist + Sentry setup as execute-plan hard deliverables (shift-left from deploy-check) + --- # Rules diff --git a/commands/review.md b/commands/review.md index 0d8e552..7f6cffc 100644 --- a/commands/review.md +++ b/commands/review.md @@ -41,6 +41,19 @@ Generated from the /execute-plan command. --- +# Severity Ladder + +Apply one of these four levels to every issue found: + +**CRITICAL** — Security vulnerabilities, data loss risk, auth bypass, PostHog dual-emission, crashes +**HIGH** — Logic bugs, broken user flows, performance degradation, missing RLS policies +**MEDIUM** — Code quality violations, missing edge case handling, maintainability problems +**LOW** — Style inconsistencies, minor naming issues, cosmetic improvements + +Do not approve any implementation with a CRITICAL or HIGH issue unresolved. + +--- + # Process Follow this sequence. @@ -49,9 +62,9 @@ Follow this sequence. ## 1 Architecture Check -Verify the implementation matches the system architecture defined earlier. +Verify the implementation matches the system architecture defined in `experiments/plans/plan-.md`. -Flag deviations or design violations. +Flag deviations or design violations. Generic "follows patterns" is not sufficient — diff against the actual plan. --- @@ -64,6 +77,13 @@ maintainability modularity reusability +Also check explicitly: + +- No `console.log` statements in production code (use structured logging) +- No TODO or FIXME comments left in submitted code +- No hardcoded secrets, API keys, or debug flags +- No `any` types in TypeScript; no `@ts-ignore` suppressions + --- ## 3 Bug Detection @@ -84,6 +104,13 @@ Check for issues such as: missing input validation unsafe API usage data exposure risks +missing RLS policies on user-scoped tables + +**PostHog dual-emission check (required — blocks approval if violated):** + +Search every `posthog.capture('event_name')` call in the codebase. For each event name, confirm it appears in exactly one place — either a server-side API route OR a client-side component, never both. Dual-emission corrupts funnel counts and makes North Star metrics unmeasurable. + +If found: severity is CRITICAL. Block approval. Require removal of the client-side re-fire. --- @@ -91,32 +118,61 @@ data exposure risks Identify issues such as: -inefficient queries +inefficient queries (missing `.limit()`, N+1 patterns) large payloads -blocking operations +blocking operations in API routes + +**Client-side performance (for `"use client"` files only):** + +- Unnecessary re-renders (state or props changes triggering heavy subtree re-renders) +- Expensive calculations not wrapped in `useMemo` +- Stable callbacks not wrapped in `useCallback` when passed as props + +--- + +## 6 React / Hooks Review + +Applies only to files with `"use client"` directive. + +Check: + +- `useEffect` has a cleanup function where side effects persist (subscriptions, timers, event listeners) +- Dependency arrays are complete — no missing deps, no stale closures +- No patterns that cause infinite render loops (state update inside un-guarded effect) --- # Output Format -Return output using this structure. +Return output using this exact structure. --- -Critical Issues +## Looks Clean -Architecture Violations +List items verified as correct. Gives the PM signal on what was checked and passed. -Security Risks +- [item] -Performance Issues +--- + +## Issues Found + +For each issue: -Code Quality Improvements +**[SEVERITY]** `file:line` — description +Fix: specific suggested fix -Recommendation +--- + +## Summary -Approve -Request Changes +- Files reviewed: X +- CRITICAL issues: X +- HIGH issues: X +- MEDIUM issues: X +- LOW issues: X +- Recommendation: Approve / Request Changes --- @@ -124,6 +180,8 @@ Request Changes Be strict. -Do not approve implementations with critical issues. +Do not approve implementations with CRITICAL or HIGH issues unresolved. Prioritize system reliability and user safety. + +The "Looks Clean" section is not optional — it confirms the review was thorough, not just a defect list. diff --git a/experiments/exploration/exploration-009.md b/experiments/exploration/exploration-009.md new file mode 100644 index 0000000..e4eb406 --- /dev/null +++ b/experiments/exploration/exploration-009.md @@ -0,0 +1,153 @@ +# Exploration — Issue 009: MoneyMirror + +**Date:** 2026-04-02 +**Agent:** Research Agent +**Stage:** explore + +--- + +## Problem Analysis + +**Verified fact:** Gen Z Indians earning ₹20K–₹80K/month systematically underestimate their spending by 60–75%. The failure is structural — not ignorance, but the absence of a continuous, personalized accountability layer. + +Three compounding failures are well-documented: + +1. **Perception gap** — People believe they spend ₹30–50K. Actual is 1.6–1.75x that when subscriptions, BNPL instalments, food delivery, and convenience fees are added up. +2. **Invisible leaks** — Subscription creep (₹3K–5K/month), Zomato/Swiggy 50%+ markups, BNPL normalizing debt, minimum-payment credit card traps. These semi-conscious decisions aggregate into 15–25% of monthly income disappearing without a deliberate choice. +3. **Accountability vacuum** — Signal from 13 Warikoo transcripts (238,000+ chars) and 100+ Money Matters episodes: he recommends Zerodha, Coin, Ditto for investing and insurance. Zero recommendation for budgeting or behavioral coaching — not because it's not needed, but because nothing credible exists. + +--- + +## Market Scan + +| Product | Approach | Strength | Gap | +| ------------------ | ----------------------------------------- | ------------------------------ | ----------------------------------------------------------- | +| **Walnut** | Auto-reads SMS for UPI/bank transactions | Passive, automatic tracking | No coaching, no consequence framing, app abandoned/stagnant | +| **ET Money** | Investment-first: SIPs, mutual funds | Strong MF interface | Treats spending as secondary; not a coach | +| **CRED** | Rewards credit card bill payment | Massive India user base, brand | Actively rewards bad behavior — antithetical to coaching | +| **Jupiter / Fi** | Neo-bank with spend analytics | Clean UI, smart analytics | Bank-first not coach-first; requires account migration | +| **INDMoney** | Portfolio aggregator + net worth tracking | Multi-asset view | Aspirational dashboard — doesn't stop the leak | +| **Excel / Notion** | Manual budgeting | Total control | Requires discipline; no accountability; no proactive nudges | +| **ChatGPT** | Conversational finance advice | Highly capable | Stateless, reactive, no data access, no push mechanism | + +**Unserved gap:** No product in the Indian market provides: + +- Proactive, consequence-first behavioral nudges (push-model, not pull) +- India-specific pattern detection (BNPL stacking, UPI merchant names, Hinglish categories, credit card trap arithmetic) +- A persistent coach relationship tied to the user's own transaction data +- The "Mirror moment" — showing perceived vs actual spend side-by-side as an emotional hook + +Closest international analogs: **Monarch Money** and **Copilot** (both US). Neither is India-specific, neither operates via WhatsApp, neither uses consequence-first tone. + +--- + +## User Pain Intensity + +**Classification: Hair on fire — for the 22–30 segment.** + +| Segment | Pain Level | Reasoning | +| ---------------------------------------------- | ----------- | -------------------------------------------------------------------------------------------------------------- | +| 22–26, first job, ₹30–50K/month | 🔴 Critical | First real income, no financial habits formed, BNPL accessible for first time, spending expands to fill income | +| 26–30, lifestyle inflation trap, ₹50–80K/month | 🔴 Critical | Income grew, lifestyle grew faster, zero net worth despite 4–5 years of earning | +| 30–38, high income first-time debt | 🟡 Moderate | Higher income = higher inertia; less urgent but bigger absolute damage | + +The 22–30 segment is the sweet spot. Pain is real, frequent, and already culturally primed by Warikoo/Sharan Hegde content. They know they should fix it; they have never had a tool that makes fixing it frictionless. + +--- + +## Opportunity Assessment + +**Market:** 500M+ smartphone users under 35 in India. ₹299/month × 10,000 paying users = ₹2.99 crore MRR target at Month 12 is aggressive but directionally valid. The behavioral coaching layer that precedes investment is entirely unserved. + +**Willingness to adopt:** High with the right trigger. Warikoo's content has primed 10M+ subscribers to believe they have a spending problem — they just haven't seen their own number yet. The "Mirror moment" (onboarding score of 38/100 + breakdown) is the hook that converts awareness to action. + +**Distribution assessment:** This is the primary risk. The Warikoo channel alignment is a realistic unlock — he has explicitly stated the gap and currently recommends nothing for it. + +**Hypothesis:** + +> If we build a mobile-first PWA that parses Indian bank statements, surfaces the perception gap as an emotional moment in onboarding, and delivers proactive consequence-first behavioral nudges via in-app feed + weekly email — for Gen Z Indians earning ₹20K–₹80K/month — then users will reduce avoidable discretionary spend by ≥30% within 60 days, second-month upload rate will exceed 60%, and ≥20% of non-investing users will initiate a first SIP. + +--- + +## Proposed MVP Experiment + +**North Star proxy:** Second-month statement upload rate (≥60%). Cleanest leading indicator of habit formation — if a user uploads again, the product worked. + +**Secondary signals:** + +- Day 7 Mirror Report share rate (viral coefficient proxy) +- Money Health Score improvement at Day 30 +- Notification feedback ("Helpful / Too harsh") ratio + +### What to build + +1. **Onboarding (5 questions):** Monthly take-home, rent/EMI, dependents, investing status, biggest financial worry → immediate Money Health Score (0–100) with breakdown. Value before PDF upload. +2. **Bank statement parse (HDFC only, Phase 1):** PDF upload → transaction extraction → Needs/Wants/Investments/Debt categorization → spend summary dashboard. +3. **Day 7 Mirror Report:** Perceived spend (from Q1) vs actual spend (from statement). Side-by-side. Single shareable card. Primary sharing trigger. +4. **Advisory feed (5 of 15 triggers):** Food delivery threshold, subscription pile-up, no investment in 30 days, BNPL detection, income received — covers 80% of behavioral impact. +5. **Weekly email digest:** Monday recap of last week's top 3 leaks + one specific action. + +### What is intentionally excluded + +- Credit card statement parsing (validate bank parsing first) +- WhatsApp integration (validate email retention first; avoid Meta dependency before product/market fit) +- Gamification badges and streaks (engagement layer; validate core value first) +- Razorpay subscription billing (get 50 beta users; monetize after retention is proven) +- Goals system / Warikoo Ladder enforcement (Phase 2 after core coaching value is validated) +- All 15 advisory triggers (start with 5; expand after data shows which ones drive behavior change) + +### What this experiment should tell us + +1. Does the Mirror moment create enough surprise to drive statement upload? (Metric: onboarding → first upload conversion rate) +2. Does the advisory feed feel like a coach or like noise? (Metric: notification feedback rate, weekly email open rate) +3. Do users come back after month 1? (Metric: second-month upload rate — North Star) +4. Is PDF parsing reliable enough across real Indian PDFs? (Metric: parse success rate, support tickets) + +--- + +## Risk Identification + +### Technical Risks + +| Risk | Severity | Notes | +| ---------------------------------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| PDF parsing reliability | 🔴 High | Indian bank statement PDFs are inconsistent — HDFC has 3–4 format variants (net banking vs branch-printed vs iMobile). Expect 85% accuracy initially; need 95%+ for trust. **Start with HDFC only and validate accuracy before adding banks.** | +| Gemini/LLM merchant classification | 🟡 Medium | Hinglish merchant names ("SWGY*ORDERID", "PAYTM*12345") require custom mapping. Generic LLM will mis-categorize. Build India-specific merchant → category mapping table as first-class artifact. | +| No real-time transaction feed | 🟡 Medium | MVP depends on manual PDF upload — no live feed. Limits advisory timeliness. Accepted limitation; Phase 3 would explore RBI Account Aggregator framework. | +| Vercel timeout on large PDFs | 🟡 Medium | 3-month HDFC statement with 200+ transactions + AI categorization may exceed 10s serverless limit. Move PDF processing to background job (queue + async result polling). | + +### Market Risks + +| Risk | Severity | Notes | +| ---------------------------- | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Trust with financial data | 🔴 High | Asking users to upload bank statements is a high-trust ask. Lead with privacy architecture in onboarding: PDF deleted after parse, only structured data retained, card masking, RLS on all tables. | +| Warikoo alignment dependency | 🟡 Medium | Distribution thesis partially depends on Warikoo channel. Build Mirror Report card as independent organic sharing loop that doesn't require any influencer relationship. | +| User motivation decay | 🟡 Medium | Finance apps see high Day-1 engagement and rapid falloff. Design second-month upload as a ritual (reminder + reward), not a task. | + +### Distribution Risks + +| Risk | Severity | Notes | +| ---------------------------- | --------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| Manual PDF upload friction | 🟡 Medium | Multi-step friction (find → download → upload). Mitigate with bank-specific in-product instructions. | +| CAC without organic flywheel | 🟡 Medium | ₹299/month requires CAC < ₹150. Day 7 Mirror Report shareable card is the primary k-factor investment. Design it as a social moment. | + +--- + +## Final Recommendation + +**Recommendation: Build.** + +Three factors drive this decision: + +1. **Gap confirmed from primary research** — 13 Warikoo transcripts, 238K characters, 100+ Money Matters episodes. Zero product recommendation for behavioral coaching. This is primary signal from the most trusted voice in Indian personal finance. +2. **Mirror moment is a testable, concrete hypothesis** — Onboarding → perceived vs actual spend gap is verifiable in the first product interaction. If users aren't surprised, the hypothesis fails cleanly and early. +3. **Distribution channel is pre-aligned** — Warikoo has created the demand. His 10M+ subscribers are primed, aware, and motivated. MoneyMirror closes the action gap his content creates. + +**MVP constraints:** + +- HDFC bank statement parsing only — validate reliability before adding banks +- In-app feed + weekly email only — no WhatsApp until email retention is validated +- 5 advisory triggers only — validate behavioral impact before building all 15 +- No paywall in beta — 50 real users uploading real statements; monetize after second-month upload rate is measured + +**Primary risk to monitor:** PDF parsing reliability. If HDFC parse accuracy is below 90% on diverse real-world statements, the entire advisory engine is built on noisy data. Validate this before shipping any other feature. diff --git a/experiments/ideas/issue-009.md b/experiments/ideas/issue-009.md new file mode 100644 index 0000000..ced4339 --- /dev/null +++ b/experiments/ideas/issue-009.md @@ -0,0 +1,302 @@ +# Issue 009 — MoneyMirror: AI-Powered Personal Finance Coach + +**Created:** 2026-04-01 +**Revised:** 2026-04-01 +**Stage:** create-issue +**Source:** 13 @warikoo YouTube video transcripts (238,000+ chars analyzed) + founder product brief + +--- + +## Issue Title + +MoneyMirror — AI-Powered Personal Finance Coach for Gen Z India + +--- + +## Problem + +Gen Z Indians earning ₹20K–₹80K/month consistently underestimate their actual spending by 60–75%. The root cause is not ignorance — it is the complete absence of a continuous, personalized accountability system between payday and zero. + +Three compounding failures drive this: + +**1. The Perception Gap** +People believe they spend ₹30–50K/month. When you add up subscriptions they forgot about, food delivery they normalized, convenience fees they never noticed, and EMIs they underestimate — they are spending 60–75% more than they think. No tool tells them this clearly, proactively, and without sugarcoating. + +**2. Invisible Leaks at Scale** +Subscription creep (₹3,000–5,000/month across OTT + food delivery + SaaS memberships), convenience premiums (Zomato markup 50%+, 80M orders/month Feb 2025), BNPL debt traps (Simpl, LazyPay, Slice), and the minimum payment credit card trap all drain 15–25% of monthly income without the user making a conscious spending decision. + +**3. The Accountability Vacuum** +Ankur Warikoo has spoken to over 100 real people about their finances across his Money Matters series. The pattern is the same every time. He recommends Zerodha for stocks, Coin for MFs, Ditto for insurance. He has never recommended anything for budgeting, expense tracking, or behavioral coaching — because it does not exist yet. Existing apps (Walnut, ET Money) show you what happened after the fact. You look, feel vague guilt, do nothing. CRED rewards bad behavior. ChatGPT answers if you remember to ask. Excel works if you are disciplined enough. None of these are a coach that shows up for you. + +The result: 75% of Indian families carry debt, less than 3% are adequately insured, and most have under 1 month of emergency savings. Financial nihilism ("₹1 crore isn't enough anyway, so why bother?") is the dominant mindset in the 22–30 cohort. + +--- + +## Target User + +**Primary:** Gen Z Indians, age 22–30, monthly take-home ₹20K–₹80K + +- Salaried professionals in Tier 1/2 cities +- UPI-native, mobile-first +- Finance-aware but not finance-practicing — they follow Warikoo, Sharan Hegde, Pranjal Kamra but do not consistently act on the advice +- Have never seen a clear picture of their own spending + +**Secondary:** Millennials age 30–38 in the lifestyle inflation trap — high income, zero net worth growth, first-time debt. + +--- + +## Why This Problem Matters + +**For the user:** The cost of inaction compounds. +₹5,000/month in avoidable food delivery + subscription spend = ₹60,000/year = ₹3.2L+ over 5 years if invested instead. A Gen Z Indian who builds the financial awareness habit at 24 vs 34 does not just save more — they retire with 3–4x more wealth due to compounding. + +**For the market:** India has 500M+ smartphone users under 35. The behavioral coaching layer of personal finance — the step that comes _before_ investing — is entirely unserved at scale. Warikoo's 10M+ subscriber base represents a primed, unmonetized demand signal. His content creates the problem awareness. MoneyMirror closes the action gap. + +**For society:** RBI's financial literacy mandate, 40%+ YoY growth in BNPL credit outstanding, and India's 2047 wealth aspiration make this the right problem at exactly the right time. + +--- + +## Opportunity + +If the problem is solved, the following outcome is achievable: + +- **Product:** A mobile-first PWA where users upload their bank and credit card statements and the AI tells them exactly what happened, what it will cost them over time if they continue, and the three specific things to fix — with no sugarcoating. A coach that comes to you via email (Phase 1) and WhatsApp (Phase 2), not a dashboard you have to remember to open. +- **Market:** ₹299/month × 10,000 paying users = ₹2.99 crore MRR target at Month 12. CAC < ₹150 via referral loop + shareable monthly report cards. Gross margin ~80%. +- **Moat:** India-specific merchant intelligence (UPI handles, BNPL codes, Hinglish merchant names, credit card fee detection) + behavioral data flywheel + the Day 7 Mirror Report as a viral sharing trigger. +- **Growth engine:** Month 1 shareable report card ("I reduced food delivery by 42% using MoneyMirror") creates organic k-factor > 1.2. The Mirror Report moment — seeing perceived vs actual spend side by side — is the primary sharing trigger. +- **Strategic:** Natural alignment with @warikoo's stated gap across 100+ videos. Co-creation or affiliate partnership is a realistic distribution channel. + +--- + +## Initial Hypothesis + +If we build a mobile-first web app (PWA) that parses Indian bank and credit card statements, identifies 15 specific problematic financial patterns, delivers consequence-first advisory messages in-app and via email (Phase 1) / WhatsApp (Phase 2), and enforces Warikoo's financial priority ladder in the goals system — for Gen Z Indians earning ₹20K–₹80K/month — it will: + +1. Reduce avoidable discretionary spend by ≥30% within 60 days of active use +2. Drive first SIP initiation for ≥20% of users who had no active investment before onboarding +3. Generate a Money Health Score improvement of 8–12 points in the first 30 days +4. Achieve second-month statement upload rate of ≥60% (the primary retention signal) + +The Day 30 definition of success: user has a higher savings rate, lower food delivery spend, at least one cancelled unused subscription, and if previously un-invested — one SIP started. + +--- + +## Full Product Vision + +### Platform + +Mobile-first PWA. No app store required. Phone number + OTP login — no email required at signup. + +--- + +### Onboarding (5 Questions) + +1. Monthly take-home salary +2. Rent or home loan EMI amount +3. Number of financial dependents +4. Whether they currently invest anything +5. Single biggest financial worry + +Immediately generates a Money Health Score (0–100) with a breakdown explaining every component. Score of 38/100 with a clear "Here is why" creates the hook. User is now motivated to upload a statement. + +--- + +### Statement Parsing — Bank + +Upload a PDF bank statement. AI extracts every transaction, categorizes using the Needs / Wants / Investments / Debt framework, and presents a complete picture of the last 1–3 months. + +**Supported banks (Phase 1):** HDFC, SBI, ICICI, Axis, Kotak +**PDF is deleted immediately after parse.** Only structured transaction data is retained. + +--- + +### Statement Parsing — Credit Card (Core Feature) + +Credit card statement parsing is not an afterthought. When a credit card statement is uploaded, the AI extracts: + +- Outstanding balance, minimum payment due, due date, credit limit +- All transactions categorized +- Easy-to-miss charges: forex markup fees, late payment fees, finance charges, annual fee warnings +- Reward points earned this cycle + +**Credit card intelligence layer:** + +- **Minimum payment trap:** "Your outstanding is ₹18,400. Paying the minimum ₹920/month means you pay it off in 38 months and pay ₹14,600 in interest. Paying ₹3,500/month clears it in 6 months and costs ₹1,800 in interest. The difference is ₹12,800." +- **Reward points reality check:** Are you spending more to earn points that are worth less than you think? +- **Utilization ratio:** Outstanding ÷ credit limit. Flagged when above 30% (CIBIL score risk). +- **Auto-pay flag:** If no auto-pay detected, alert fires 3 days before due date. + +**Supported cards (Phase 1):** HDFC, ICICI, SBI, Axis + +--- + +### Dashboard + +**Weekly view (resets every Monday):** + +- Spend breakdown across 4 categories with budget vs actual +- Top 3 leaks this week +- Single AI-generated sentence summarizing the week +- Savings rate as a hero metric + +**Monthly view:** + +- 3–6 month trends per category +- Subscription audit panel: every recurring charge listed, unused ones flagged +- Savings rate prominently displayed +- EMI load % (what share of income is committed to debt repayment) +- Money Health Score with component breakdown + +**The Day 7 Mirror Report:** +Side-by-side: what the user estimated their monthly spend to be at onboarding vs what it actually is from their statements. Most users see a gap of 50–80%. This is the moment of genuine surprise that makes people share the product. + +--- + +### AI Advisory Engine — 15 Specific Triggers + +The advisory engine is not a chatbot. It is a proactive system that monitors transactions and fires specific messages when patterns are detected. Tone: direct, consequence-first, Warikoo-style. Maximum 1 notification per day per user. + +Every message follows this internal structure: Fact → Exact number → Long-term cost if unchanged → One specific alternative → Reminder of stated goal. + +**The 15 triggers:** + +| # | Trigger | What fires | +| --- | ------------------------------------------------------ | ------------------------------------------------------------------------------------- | +| 1 | Food delivery > ₹3,000 in a week | Total weekly cost → annual projection → home-cook alternative → investment equivalent | +| 2 | Subscription pile-up above threshold | Full list of recurring charges → which have zero usage → total monthly cost | +| 3 | New EMI detected | Full true cost over loan term vs lump sum → what this commits per month | +| 4 | 3+ purchases after 10PM in one week | Names the late-night pattern → questions whether these would be bought at 10AM | +| 5 | No investment transaction for 30 days | Exact SIP opportunity cost → what ₹2,000/month from age 24 becomes | +| 6 | Month-over-month improvement in any category | Specific acknowledgment → what the freed money could do | +| 7 | Wants spend up >10% MoM | Shows the trend → projects where it goes in 6 months | +| 8 | Any BNPL transaction (Simpl, LazyPay, Slice, Uni) | Cumulative BNPL balance across all services → real cost | +| 9 | Credit card due date in 3 days, no full payment set up | Exact minimum payment vs full payment interest cost | +| 10 | Category budget hit 80% | Projects month-end overspend → one specific cut available | +| 11 | Savings below 1 month of expenses | Explains emergency fund risk with a real scenario | +| 12 | Income received | Immediate allocation prompt before it disappears | +| 13 | Goal milestone hit (25%, 50%, 75%, 100%) | Specific celebration + next step | +| 14 | November — 80C deduction gap | Exact remaining 80C headroom + ELSS recommendation with tax saving amount | +| 15 | 3+ same-merchant orders in 24 hours | Frequency spike + real daily/monthly cost | + +**Advisory modes:** + +- Hardcore (default): Direct, consequence-first, no softening +- Coaching: Gentler tone, more questions, less commands + +--- + +### Goals System — Warikoo Priority Ladder (Enforced) + +Users cannot create an investment goal before completing prior steps. This is enforced in the product, not just suggested. + +``` +Step 1 ☐ Term insurance + health insurance ← LOCKED until confirmed +Step 2 ☐ Build 3–6 month emergency fund ← Unlocks after Step 1 +Step 3 ☐ Pay off high-interest debt (>15%) ← Unlocks after Step 2 +Step 4 ☐ Start investing (Nifty 50 index SIP) ← Unlocks after Step 3 +Step 5 ☐ Optimize and grow ← Unlocks after Step 4 +``` + +Each goal has: exact monthly action required, sacrifice trade-off, progress bar, and milestone check-ins. + +--- + +### Notification Architecture + +**Phase 1 (MVP — email + in-app):** + +- In-app advisory feed: every triggered message with read/unread state, severity indicator, and feedback button (helpful / too harsh) +- Email: advisory message on trigger day + weekly recap every Monday + full monthly report on the 1st + +**Phase 2 (after email retention is validated):** + +- WhatsApp via WATI (Indian startup, live in 24 hours, ~₹2,500/month — faster than applying directly to Meta) +- Same content as email but with 95%+ open rates +- Quick Log: user texts "spent 450 on lunch" → logged automatically without opening the app + +--- + +### Gamification + +**Streaks:** Budget streak (consecutive days under daily budget), Investment streak (consecutive months SIP invested), No-Craving streak (days without late-night or impulse purchase category) + +**8 Achievements:** + +- First Line of Defense — confirmed term + health insurance +- Emergency Ready — 3-month emergency fund complete +- Debt Slayer — paid off a debt ahead of schedule +- Investor Initiated — first SIP set up +- Consistency King — 6-month unbroken SIP streak +- Habit Hacker — broke a craving pattern for 30 days +- Score 75 — Money Health Score crossed 75/100 +- Mirror Shared — shared a MoneyMirror report card publicly + +**Monthly shareable report card:** Instagram/LinkedIn-ready image card. Shows score change, biggest win, and one key metric (e.g. "reduced food delivery 42%"). This is the primary organic growth mechanism. + +--- + +### Tech Stack (Revised) + +| Layer | Technology | Reason | +| -------------- | ------------------------------------------------------------- | -------------------------------------------------------------------- | +| Frontend | Next.js 14 (PWA) | Mobile-first, no app store, SSR | +| Styling | Tailwind CSS + Radix UI | Fast, accessible | +| Charts | Recharts | React-native, smooth | +| Backend | Next.js API routes (Phase 1) / FastAPI (Phase 2 if ML needed) | Single codebase for MVP speed | +| Database | PostgreSQL (Neon or Supabase) | Relational, serverless-friendly, RLS built-in | +| Auth | Supabase Auth | Phone OTP + social login, India-first | +| AI Advisory | GPT-4o / Claude 3.5 Sonnet | Structured behavioral coaching prompts | +| PDF Parsing | PyMuPDF + pdfplumber | Digital bank statement extraction | +| Email | Resend + React Email | Transactional, beautiful templates, free tier sufficient for Phase 1 | +| WhatsApp (Ph2) | WATI | Indian startup, live in 24h, Meta-compliant | +| Payments | Razorpay | Indian gateway — UPI + cards + wallets | +| Monitoring | Sentry | Error tracking | + +**Build estimate (solo or small team):** + +- Phase 1 (PDF parse + dashboard + AI advisory + email): 7–9 weeks +- Phase 2 (WhatsApp + credit card intelligence + gamification): +4–5 weeks +- Full MVP ready for beta: ~12 weeks + +--- + +## Non-Negotiables + +These are engineering constraints that cannot be compromised at any pipeline stage: + +1. **Paisa integers only.** All monetary amounts stored as integers in paisa. ₹450 = stored as `45000`. No floating point anywhere near money. Absolute rule. + +2. **PDF deleted after parse.** Statement PDFs are processed server-side, transactions extracted to structured JSON, then the original file is deleted immediately. Raw files are never persisted. + +3. **Card masking.** Only the last 4 digits of any bank account or card number are ever stored. Full numbers are never written to any database table. + +4. **SEBI disclaimer on every investment-adjacent screen or message.** Exact copy: _"This is not financial advice. MoneyMirror is not SEBI-registered."_ + +5. **Maximum 1 notification per day per user** across all channels combined. + +6. **Row Level Security on all financial data tables.** Users can only ever access their own records. This is set at the database level, not just the application layer. + +7. **Privacy Policy live before first user signs up.** Not a placeholder. An actual policy. + +8. **Warikoo Priority Ladder enforced in product.** Investment goals are blocked until Step 1 (insurance) and Step 2 (emergency fund) are confirmed. This is a hard UI gate, not an advisory message. + +--- + +## Source Material + +- 13 @warikoo YouTube video transcripts, 238,000+ characters analyzed +- Primary signals: "Salary Aati Toh Hai Jaati Kahan Hai", 100+ Money Matters episodes on subscription traps, BNPL debt, EMI lies, financial nihilism +- Gap confirmed: Warikoo recommends Zerodha, Coin, Ditto, INDMoney — zero recommendation for budgeting or behavioral coaching exists +- Competitive scan: Walnut (passive, abandoned), ET Money (investment-first), CRED (rewards bad behavior), Jupiter/Fi (bank-first not coach-first), Excel (no accountability), ChatGPT (stateless, reactive not proactive) + +--- + +## One Sentence + +MoneyMirror is the product Ankur Warikoo has been describing the need for across a hundred videos — the one that comes to you, tells you the truth about your money, and does not let you pretend you did not hear it. + +--- + +## Next Step + +Send to Research Agent for validation via `/explore`. diff --git a/experiments/linear-sync/issue-009.json b/experiments/linear-sync/issue-009.json new file mode 100644 index 0000000..f180815 --- /dev/null +++ b/experiments/linear-sync/issue-009.json @@ -0,0 +1,37 @@ +{ + "issue_number": "009", + "issue_title": "MoneyMirror — AI-Powered Personal Finance Coach for Gen Z India", + "team_id": "70aea0d1-a706-481f-a0b7-3e636709ba77", + "team_name": "Vijaypmworkspace", + "project_id": "c0052da3-a2c3-4c24-aba0-bf833e122c2d", + "project_name": "issue-009 — MoneyMirror — AI-Powered Personal Finance Coach for Gen Z India", + "project_url": "https://linear.app/vijaypmworkspace/project/issue-009-moneymirror-ai-powered-personal-finance-coach-for-gen-z-8464834e8c78", + "root_issue_id": "VIJ-11", + "root_issue_identifier": "VIJ-11", + "root_issue_url": "https://linear.app/vijaypmworkspace/issue/VIJ-11/issue-009-moneymirror-ai-powered-personal-finance-coach-for-gen-z", + "labels": { + "discovery": "b05592c1-47a6-4fdd-9a08-7a6287d4f1f8", + "planning": "c115a797-9a9a-4474-aec9-977473efa69a", + "execution": "7c533cde-cb79-4377-a158-9807bc567ebc", + "review": "2a0f0e88-6e46-4b24-8456-6020a76150a1", + "blocked": "51308c88-4551-4fad-a286-c5dbdc735abc", + "release_ready": "3f059d92-75e0-4412-90f6-0fe1bcfc2a1e", + "completed": "3d396021-0cab-46d0-8320-c10ddd72af6d", + "feature": "70095fc1-871c-427e-bc1f-2c29b8d87aa8" + }, + "documents": {}, + "tasks": {}, + "last_sync_mode": "drift-correction", + "last_sync_timestamp": "2026-04-03T10:10:24Z", + "pipeline_status": "learning", + "linear_status": "Done", + "closeout_document_id": "97bb3d9b-6f13-49c7-9f06-827d15ad6cd6", + "closeout_document_url": "https://linear.app/vijaypmworkspace/document/issue-009-closeout-snapshot-aee923543aa2", + "pr_link": "https://github.com/shadowdevcode/ai-product-os/pull/14", + "comments": { + "execute_plan_status": "d765a19d-0486-419e-ae0e-8a2dab21afad", + "qa_test_status": "6c7a878e-4dc3-45f5-a909-04dcc245cccf", + "deploy_check_release": "5353d196-fa72-4977-a4cb-7c0104a77e32", + "drift_correction": "2c932588-28d3-4000-af4d-06eb80f35aa5" + } +} diff --git a/experiments/plans/manifest-009.json b/experiments/plans/manifest-009.json new file mode 100644 index 0000000..cbe295e --- /dev/null +++ b/experiments/plans/manifest-009.json @@ -0,0 +1,104 @@ +{ + "issue": "issue-009", + "project": "MoneyMirror", + "phases": [ + { + "id": "phase-1", + "name": "Foundation", + "parallel": false, + "depends_on": [], + "tasks": [ + { + "id": "T1", + "name": "Initialize Next.js 14 project", + "agent": "frontend-engineer", + "files_to_create": ["package.json", "tailwind.config.js"], + "files_to_modify": [], + "verification": "npm run build", + "test_file": "__tests__/init.test.ts" + }, + { + "id": "T2", + "name": "Setup Supabase Auth & Neon Schema", + "agent": "backend-architect", + "files_to_create": ["schema.sql"], + "files_to_modify": [], + "verification": "psql -c '\\dt'", + "test_file": "__tests__/schema.test.ts" + }, + { + "id": "T3", + "name": "Implement Onboarding & Scoring", + "agent": "product-agent", + "files_to_create": ["src/app/onboarding/page.tsx", "src/lib/scoring.ts"], + "files_to_modify": [], + "verification": "npm run test src/lib/scoring.test.ts", + "test_file": "src/lib/scoring.test.ts" + } + ] + }, + { + "id": "phase-2", + "name": "Parsing Engine", + "parallel": false, + "depends_on": ["phase-1"], + "tasks": [ + { + "id": "T4", + "name": "PDF Text Extraction Service", + "agent": "backend-engineer", + "files_to_create": ["src/lib/pdf-parser.ts"], + "files_to_modify": [], + "verification": "npm run test src/lib/pdf-parser.test.ts", + "test_file": "src/lib/pdf-parser.test.ts" + }, + { + "id": "T5", + "name": "Gemini HDFC Parser", + "agent": "backend-engineer", + "files_to_create": ["src/app/api/statement/parse/route.ts"], + "files_to_modify": [], + "verification": "curl -X POST http://localhost:3000/api/statement/parse", + "test_file": "__tests__/api/parse.test.ts" + } + ] + }, + { + "id": "phase-3", + "name": "The Mirror Dashboard", + "parallel": true, + "depends_on": ["phase-2"], + "tasks": [ + { + "id": "T8", + "name": "Mirror Card & Dashboard UI", + "agent": "frontend-engineer", + "files_to_create": ["src/app/dashboard/page.tsx", "src/components/MirrorCard.tsx"], + "files_to_modify": [], + "verification": "npm run build", + "test_file": "__tests__/ui/dashboard.test.tsx" + }, + { + "id": "T9", + "name": "Advisory Engine Triggers", + "agent": "backend-engineer", + "files_to_create": ["src/lib/advisory-engine.ts"], + "files_to_modify": [], + "verification": "npm run test src/lib/advisory.test.ts", + "test_file": "src/lib/advisory.test.ts" + } + ] + } + ], + "env_vars": [ + "NEXT_PUBLIC_SUPABASE_URL", + "NEXT_PUBLIC_SUPABASE_ANON_KEY", + "SUPABASE_SERVICE_ROLE_KEY", + "GEMINI_API_KEY", + "RESEND_API_KEY", + "POSTHOG_KEY", + "CRON_SECRET" + ], + "schema_tables": ["profiles", "statements", "transactions", "advisory_feed"], + "posthog_events": ["onboarding_completed", "statement_uploaded", "mirror_report_shared"] +} diff --git a/experiments/plans/plan-009.md b/experiments/plans/plan-009.md new file mode 100644 index 0000000..3929c6b --- /dev/null +++ b/experiments/plans/plan-009.md @@ -0,0 +1,192 @@ +# Plan 009 — MoneyMirror: AI-Powered Personal Finance Coach + +**Issue:** 009 +**Project:** MoneyMirror +**Stage:** plan +**Status:** active +**Date:** 2026-04-02 + +--- + +## 1. Plan Summary + +MoneyMirror is a mobile-first PWA built for Gen Z Indians to solve the "perception gap" in personal finance. The MVP validates the core value proposition: **Can seeing the truth about your money (perceived vs. actual spend) drive a statement upload and behavioral change?** + +We will build a high-trust, low-friction parsing engine for HDFC bank statements, a "Mirror Report" sharing hook, and a proactive AI advisory system delivering no-sugarcoating financial coaching. + +--- + +## 2. Product Specification (Product Agent) + +### Product Goal + +Reveal the 60–75% "perception gap" in spending and drive a second-month statement upload rate of ≥60%. + +### Target User + +- **Primary:** Gen Z Indians (22–30), earning ₹20K–₹80K/month. +- **Behaviors:** UPI-native, Zomato/Swiggy frequenters, finance-aware but action-light. + +### User Journey + +1. **Onboarding:** Answer 5 questions -> Get instant "Money Health Score." +2. **Mirror Trigger:** See "Perceived Spend" baseline. High-friction barrier (PDF upload) is motivated by the score. +3. **The Mirror Moment:** Upload HDFC PDF -> AI generates actual spend list -> Side-by-side comparison. +4. **Coaching:** Receive 2–3 "no sugarcoating" advisory triggers (subscription leaks, convenience tax). +5. **Retention:** Weekly Monday morning email recap summarizing leaks. + +### MVP Scope + +- **Include:** Phone/OTP login, HDFC PDF parsing (last 3 months), Perceived vs. Actual dashboard, Top 5 advisory triggers, Resend email integration. +- **Exclude:** WhatsApp integration (Phase 2), Credit Card parsing (Phase 2), Gamification, Net Worth tracking. + +### Success Metrics + +- **North Star:** 2nd-month Statement Upload Rate (≥60%). +- **Activation:** Onboarding Score -> PDF Upload Conversion (≥40%). +- **Viral:** "Mirror Report" card shares (≥10% of active users). + +--- + +## 3. UX Design (Design Agent) + +### User Flow + +- **Landing:** Minimalist, high-urgency headline -> "Find my score." +- **Progressive Onboarding:** One question per screen. Animated score reveal. +- **Upload Center:** Instructions for HDFC NetBanking PDF -> Drag & Drop -> "Cleaning the mirror..." (Processing). +- **Dashboard:** "The Truth" (side-by-side card), Scrollable Advisory Feed, Category "Leaks" list. + +### UI Components (shadcn/ui + Tailwind 4) + +- `MirrorCard`: 50/50 split showing "Perceived: ₹30k" vs "Actual: ₹48k." +- `LeakBadge`: Highlighted transaction with consequence text (e.g., "₹450 on Zomato convenience fees = 2 days of rent"). +- `ScoreDial`: High-fidelity SVG dial (Red -> Green). + +--- + +## 4. System Architecture (Backend Architect Agent) + +### System Overview + +- **Frontend/Backend:** Next.js 14 Monolith on Vercel. +- **AI Engine:** Gemini 1.5 Flash (Processing) + Gemini 1.5 Pro (Refining Advisory). +- **Notifications:** Resend (Transactional Email). +- **Telemetry:** PostHog (Event tracking). + +### API Endpoints + +- `POST /api/auth/otp`: Supabase Auth trigger. +- `POST /api/statement/parse`: Receives PDF -> AI Processing -> Returns structured JSON. +- `GET /api/dashboard`: Fetches mirror stats + advisory feed. +- `POST /api/cron/weekly-recap`: Triggers Resend email fan-out. + +### Data Flow + +1. User uploads PDF to `api/statement/parse`. +2. Serverless function extracts text (PDF.js) -> Sends to Gemini Flash with structured schema. +3. Raw PDF is **deleted immediately**. +4. Categorized transactions saved to Neon DB as **paisa integers (BigInt)**. +5. Advisory engine runs (Gemini Pro) to generate 3 highlights. + +### Security Pre-Approval Gate + +1. **RLS:** Enabled on `profiles`, `statements`, `transactions`, `advisory_feed`. +2. **Worker Auth:** `POST /api/cron/*` requires `CRON_SECRET` header. +3. **Rate Limiting:** `api/statement/parse` limited to 3 uploads/day per user. +4. **Env Vars:** `SUPABASE_URL`, `SUPABASE_SERVICE_ROLE_KEY`, `GEMINI_API_KEY`, `RESEND_API_KEY`, `POSTHOG_KEY`, `CRON_SECRET`. + +### Mandatory Pre-Approval (Serverless + AI) + +- **AI Timeout:** `Promise.race` at **9s** for Gemini calls. +- **Paisa Storage:** Store ₹450.50 as `45050`. +- **Privacy:** `statements` table stores metadata, NOT the original file. + +--- + +## 5. Database Schema (Database Architect Agent) + +### Database: Neon (PostgreSQL) + +```sql +-- Profiles: User financial context +CREATE TABLE profiles ( + id UUID REFERENCES auth.users(id) PRIMARY KEY, + perceived_spend BIGINT NOT NULL, -- in paisa + target_savings_rate INT DEFAULT 20, + money_health_score INT, + onboarded_at TIMESTAMP WITH TIME ZONE DEFAULT now() +); + +-- Statements: Metadata for uploads +CREATE TABLE statements ( + id UUID DEFAULT gen_random_uuid() PRIMARY KEY, + user_id UUID REFERENCES profiles(id), + bank_name TEXT NOT NULL, -- 'HDFC' + period_start DATE, + period_end DATE, + created_at TIMESTAMP WITH TIME ZONE DEFAULT now() +); + +-- Transactions: The structured data +CREATE TABLE transactions ( + id UUID DEFAULT gen_random_uuid() PRIMARY KEY, + statement_id UUID REFERENCES statements(id), + user_id UUID REFERENCES profiles(id), + amount BIGINT NOT NULL, -- in paisa + merchant_name TEXT, + category TEXT, -- 'Needs', 'Wants', 'Investment', 'Debt' + transaction_date DATE, + is_leak BOOLEAN DEFAULT false +); + +-- Advisory Feed: Coaching messages +CREATE TABLE advisory_feed ( + id UUID DEFAULT gen_random_uuid() PRIMARY KEY, + user_id UUID REFERENCES profiles(id), + title TEXT, + message TEXT, + severity TEXT, -- 'info', 'warning', 'critical' + is_read BOOLEAN DEFAULT false, + created_at TIMESTAMP WITH TIME ZONE DEFAULT now() +); +``` + +--- + +## 6. Implementation Tasks + +### Phase 1: Foundation (3 days) + +- [ ] T1: Initialize Next.js 14 project with Tailwind 4 + shadcn/ui. +- [ ] T2: Setup Supabase Auth (Phone/OTP) and Neon DB schema. +- [ ] T3: Implement 5-question onboarding flow + scoring algorithm. + +### Phase 2: The Parsing Engine (4 days) + +- [ ] T4: Implement PDF.js text extraction service. +- [ ] T5: Build Gemini 1.5 Flash parser with structured HDFC schema. +- [ ] T6: Implement transaction categorization logic (Needs/Wants/Leaks). +- [ ] T7: Add PDF secure deletion post-parse. + +### Phase 3: The Mirror (3 days) + +- [ ] T8: Build Dashboard UI with "Side-by-Side" Mirror Card. +- [ ] T9: Implement Advisory Engine (Top 5 triggers: Swiggy/Zomato, Subscriptions, BNPL). +- [ ] T10: Setup Shareable Mirror Report card generator (HTML-to-Canvas). + +### Phase 4: Polish & Retention (2 days) + +- [ ] T11: Integrate Resend for Weekly Email Recap. +- [ ] T12: Setup PostHog telemetry for Mirror Moment conversion. + +--- + +## 7. Risks & Mitigation + +| Risk | Severity | Mitigation | +| ---------------- | --------- | ---------------------------------------------------------------------------------- | +| PDF Format Drift | 🔴 High | Start with HDFC exclusively. Use LLM for structural flexibility rather than regex. | +| User Data Trust | 🔴 High | Explicit "Zero Retention" policy for raw PDFs displayed during upload. | +| AI Latency | 🟡 Medium | 9s timeout fallback. Use Optimistic UI for categorization. | +| CAC/Distribution | 🟡 Medium | Focus on shareable "Mirror Report" viral loop; align with Warikoo persona. | diff --git a/experiments/results/metric-plan-009.md b/experiments/results/metric-plan-009.md new file mode 100644 index 0000000..96bf765 --- /dev/null +++ b/experiments/results/metric-plan-009.md @@ -0,0 +1,106 @@ +# Metric Plan — MoneyMirror: AI-Powered Personal Finance Coach + +**Issue:** 009 +**Project:** MoneyMirror +**Date:** 2026-04-02 +**Agent:** Analytics Agent (Metric Plan) +**Status:** COMPLETE — Instrumentation verified by QA + +--- + +## North Star Metric + +**Metric:** **Second-month Statement Upload Rate** +**Definition:** % of users who upload at least one bank statement in their second 30-day period after onboarding. +**Hypothesis Connection:** This is the primary signal of habit formation and the product's durable utility. If a user returns to "clean the mirror" again, the accountability mechanism is working. + +--- + +## Supporting Metrics + +| Metric | Definition | Target | Proxy For | +| ----------------------- | ----------------------------------------------------- | ------------ | ----------------------------------------- | +| **Activation Rate** | Onboarding Score Revealed → First PDF Upload Success | ≥40% | Motivation of "Money Health Score" hook | +| **Mirror Moment Delta** | Average (Actual Spend - Perceived Spend) % | ≥50% | Magnitude of the "Perception Gap" | +| **Recap Engagement** | Weekly Monday Recap Email Open Rate | ≥30% | Proactive coaching effectiveness | +| **Advisory Utility** | Ratio of "Helpful" vs "Too Harsh" button clicks | ≥8:1 | Coaching tone/accuracy fit | +| **Score Velocity** | Change in Money Health Score between 1st & 2nd Upload | +5 to 10 pts | Behavioral change (savings/category cuts) | + +--- + +## Event Tracking Plan + +PostHog is the primary telemetry platform. All events are configured with a single-emission source rule. + +### 1. Conversion & Activation + +- **`onboarding_completed`** + - **Trigger:** Final onboarding question answered. + - **Properties:** `perceived_spend_paisa`, `salary_paisa`, `primary_worry`, `current_investor`. +- **`statement_parse_started`** + - **Trigger:** PDF upload initiated. +- **`statement_parse_success`** (Critical) + - **Trigger:** Gemini parse complete + DB transaction success. + - **Properties:** `total_debits_paisa`, `transaction_count`, `period_days`, `parse_duration_ms`. +- **`mirror_viewed`** + - **Trigger:** Side-by-side card loaded in dashboard with state. + +### 2. Engagement & Coaching + +- **`advisory_feedback_clicked`** + - **Trigger:** User clicks "Helpful" or "Too Harsh" on an Advisory. + - **Properties:** `type` (Subscription, Leak, BNPL), `sentiment` (helpful/harsh). +- **`weekly_recap_email_sent`** + - **Trigger:** Cron worker executes Resend request. +- **`share_intent_sent`** + - **Trigger:** System share sheet triggered via "Share My Mirror". + +### 3. Failure Monitoring + +- **`statement_parse_timeout`** + - **Trigger:** 9s Gemini abort controller fires. +- **`statement_parse_failed`** + - **Trigger:** PDF.js extraction or LLM JSON parse error. + +--- + +## Funnel Definition + +The path from awareness to behavioral change: + +1. **Awareness:** Home page visit (`landing_page_viewed`) +2. **Setup:** Onboarding complete (`onboarding_completed`) +3. **Hook:** Money Health Score revealed (In-app state) +4. **Action:** PDF Upload Success (`statement_parse_success`) +5. **Aha! Moment:** Perceived vs Actual comparison viewed (`mirror_viewed`) +6. **Retention:** Weekly digest opened (Email event) +7. **Habit:** Month 2 Statement Upload (North Star) + +--- + +## Success Thresholds + +| Metric | Threshold | Alert Trigger | +| ---------------------- | --------- | --------------------------------------- | +| **North Star** | ≥60% | <40% (Re-examine coaching value) | +| **Parse Success Rate** | ≥95% | <85% (HDFC format drift / PDF.js issue) | +| **Onboarding → PDF %** | ≥40% | <20% (Trust barrier or friction) | +| **Email Open Rate** | ≥35% | <15% (Spam filter or bad timing) | + +--- + +## Implementation Notes + +- **Primary Tool:** PostHog (Integrated via `posthog-js` on frontend and `posthog-node` on backend). +- **Database Logic:** Ground-truth calculation for 60-day reduction (Hypothesis #1) to be performed via Neon SQL queries comparing `category` sums across monthly statement batches. +- **Instrumentation Audit:** All 10 core events verified as functional by QA (Ref: `qa-test-009.md`). +- **Dashboarding:** Funnel and North Star cohorts to be built in PostHog. Revenue queries to be calculated in Neon against `profiles` table. + +--- + +## Rules Check + +- [x] Connects to hypothesis? Yes (Perception Gap, 2nd-month retention). +- [x] Simple & measurable? Yes (Upload rate). +- [x] No vanity metrics? Yes (Focus on retention and behavioral delta). +- [x] Verified wired? Yes (Confirmed in `qa-test-009.md`). diff --git a/experiments/results/peer-review-009-r2.md b/experiments/results/peer-review-009-r2.md new file mode 100644 index 0000000..0bfe392 --- /dev/null +++ b/experiments/results/peer-review-009-r2.md @@ -0,0 +1,132 @@ +# Peer Review Round 2: MoneyMirror (issue-009) + +**Date:** 2026-04-02 +**Agent:** Peer Review Agent (Round 2 — re-review after fixes) +**Status:** APPROVED — all prior blockers fixed +**Input:** Updated implementation (`apps/money-mirror/`), `peer-review-009.md`, `plan-009.md` + +--- + +## Prior Findings — Verification + +### ✅ A1 — Dashboard rehydration path added + +`apps/money-mirror/src/app/api/dashboard/route.ts` now provides the planned authenticated dashboard read path, and `apps/money-mirror/src/app/dashboard/page.tsx` hydrates from persisted DB state on first load. Refreshes and weekly recap deep links now reconstruct the latest processed mirror instead of dropping users back into a blank upload state. + +### ✅ R1 — Parse persistence is now fail-closed + +`apps/money-mirror/src/app/api/statement/parse/route.ts` now: + +- reads onboarding spend from `profiles.id` instead of a non-existent `user_id` column +- inserts `statements` as `processing` +- aborts with 500 if `transactions` insert fails +- deletes the parent row on failure +- marks the statement `processed` only after child rows persist +- emits success telemetry only after the full write succeeds + +This removes the partial-write corruption path. + +### ✅ R2 — Weekly recap failure accounting is now truthful + +`apps/money-mirror/src/app/api/cron/weekly-recap/worker/route.ts` returns HTTP 502 on email-send failure, and `apps/money-mirror/src/app/api/cron/weekly-recap/route.ts` rejects unsuccessful worker responses before counting success. `weekly_recap_completed` now reflects real delivery outcomes. + +### ✅ P1 — Coaching feed now renders in the core flow + +The dashboard no longer calls the authenticated advisories route without auth. The post-upload path reuses the authenticated `/api/dashboard` response, so advisories arrive with the rest of the mirror data and the "Truth Bombs" section can render correctly. + +### ✅ A2 — Weekly recap coverage no longer stops at the first 1000 rows + +`apps/money-mirror/src/app/api/cron/weekly-recap/route.ts` now paginates through processed statements in 1000-row batches and deduplicates user IDs across the full eligible set before fan-out. + +--- + +## Challenge Mode — Assumption Audit (Round 2) + +### Assumption 1: "Lazy clients and build-safe route setup could mask runtime config problems" + +**Why it might be risky:** Moving Supabase and Resend initialization out of module scope fixes build stability, but a bad deployment could now fail only at request time. + +**Counterargument strength:** Strong. Runtime failure is the correct failure mode for missing secrets in a dynamic route. The build now validates the app shape without requiring production credentials, while request handlers still throw if configuration is absent. + +### Assumption 2: "Dashboard hydration by latest statement is sufficient for the MVP" + +**Why it might be risky:** A user with multiple uploaded statements may expect explicit history selection rather than implicit "latest statement" behavior. + +**Counterargument strength:** Strong. The current plan only requires a persisted dashboard, not a statement history browser. The new route also accepts `statement_id`, which gives a clean extension path without blocking the MVP. + +### Assumption 3: "Deleting the parent statement on transaction failure is acceptable" + +**Why it might be risky:** A delete-on-failure strategy can hide evidence of failed parse attempts if auditability is needed later. + +**Counterargument strength:** Strong. For this product phase, correctness of saved user-facing financial data matters more than retaining broken partial rows. The route still emits failure telemetry, which is the right audit trail for MVP. + +--- + +## Multi-Perspective Challenge (Round 2) + +### 1. Reliability Engineer — "What breaks at 3am?" + +No blocking reliability issue remains in the peer-reviewed paths. The parse flow is now fail-closed, recap metrics match actual worker outcomes, and build-safe client initialization removes the prior deploy-time crash path. + +### 2. Adversarial User / Confused User — "How does a real person break this?" + +No blocking issue remains in the reviewed flows. A user can now refresh `/dashboard` or arrive from the recap email and still see persisted mirror data instead of being forced into duplicate uploads. + +### 3. Future Maintainer — "What will confuse the next engineer?" + +No blocking issue remains. The implementation now aligns with the planned route surface: `/api/dashboard` exists, dashboard hydration is explicit, and the worker/master contract for recap failures is coherent. + +--- + +## Lens 1: Architecture & Scalability + +No blocking issues. + +The architecture now matches the product plan materially better: + +- persisted authenticated dashboard read path exists +- dashboard rehydrates from DB, not from ephemeral client memory +- recap fan-out paginates instead of silently truncating at 1000 rows + +--- + +## Lens 2: Edge Cases, Security & Reliability + +No blocking issues. + +Validated improvements: + +- no false parse success on incomplete writes +- no stale `processed` parent rows without transactions +- no false-positive recap success counts on failed emails +- build-safe route initialization for missing env vars at compile time + +--- + +## Lens 3: Product Coherence & PM Alignment + +No blocking issues. + +The core product loop now behaves as promised: + +- upload → persisted mirror +- refresh/deep-link → same persisted mirror +- coaching feed shows in the primary dashboard experience +- recap email CTA lands on a meaningful dashboard state + +--- + +## Prompt Autopsy Check + +No new prompt gaps beyond the ones already captured in `peer-review-009.md`. The fixes validated those proposed rule additions rather than surfacing a new class of failure. + +--- + +## Verdict + +**APPROVED.** + +Validation: + +- `npm test` — PASS +- `npm run build` — PASS diff --git a/experiments/results/peer-review-009.md b/experiments/results/peer-review-009.md new file mode 100644 index 0000000..2f4d77e --- /dev/null +++ b/experiments/results/peer-review-009.md @@ -0,0 +1,160 @@ +# Peer Review: MoneyMirror (issue-009) + +**Date:** 2026-04-02 +**Agent:** Peer Review Agent +**Status:** BLOCKED — 4 MUST-FIX items, 1 MEDIUM item +**Input:** Code review results, full implementation (`apps/money-mirror/`), `schema.sql`, `plan-009.md` + +--- + +## Challenge Mode — Assumption Audit + +### Assumption 1: "The dashboard can live entirely in client memory after upload" + +**Why it's risky:** The product plan defines a persisted dashboard flow (`GET /api/dashboard`) and the weekly recap email deep-links users back to `/dashboard`. The implementation never hydrates saved statement data on page load. `dashboard/page.tsx` always starts in `"upload"` state and only shows results after a successful parse response in the same tab session. + +**Failure mode:** A user uploads a statement, sees the mirror once, then refreshes, comes back later, or clicks the weekly recap email CTA. `/dashboard` shows the upload prompt instead of their saved mirror. The retention loop promised in the plan does not exist in production behavior. + +**Counterargument strength:** Weak. This is not an MVP simplification. It breaks a core promised journey and invalidates the email CTA. + +### Assumption 2: "Saving the statement row without its transactions is an acceptable non-fatal path" + +**Why it's risky:** `statement/parse` writes the parent `statements` row first, treats transaction insertion failure as non-fatal, still emits `statement_parse_success`, and returns a success payload. + +**Failure mode:** The user sees a successful parse, but the persisted system state is corrupted: dashboard advisories, future reloads, and weekly recap all depend on `transactions`. They will now read an incomplete statement that was marked `processed` even though the core child rows never landed. + +**Counterargument strength:** Weak. This is a classic partial-write integrity bug on the main value path. + +### Assumption 3: "Cron success can be inferred from HTTP 2xx alone" + +**Why it's risky:** The master cron counts failures by rejected fetches, but the worker returns HTTP 200 even when Resend fails. The master therefore reports success for failed emails. + +**Failure mode:** PM sees `weekly_recap_completed` with inflated `succeeded` counts while users received nothing. The product loop appears healthy in telemetry while the actual retention channel is broken. + +**Counterargument strength:** Weak. The current implementation still lies about delivery outcomes after the prior review fix. + +--- + +## Multi-Perspective Challenge + +### 1. Reliability Engineer — "What breaks at 3am?" + +**Finding:** `apps/money-mirror/src/app/api/statement/parse/route.ts:260-323` marks a statement as `processed`, logs success telemetry, and returns 200 even if `transactions` insert fails at lines 296-303. This leaves an internally inconsistent state that every downstream feature reads as valid. + +### 2. Adversarial User / Confused User — "How does a real person break this?" + +**Finding:** `apps/money-mirror/src/app/dashboard/page.tsx:45-107` never reloads saved mirror data. A user who refreshes, opens `/dashboard` from the weekly email, or returns the next day is pushed back into the upload flow and is likely to re-upload the same statement, burn through the 3/day limit, and create duplicate rows instead of seeing their existing report. + +### 3. Future Maintainer — "What will confuse the next engineer?" + +**Finding:** The plan explicitly calls for `GET /api/dashboard`, but the codebase only has `GET /api/dashboard/advisories` and a transient client state flow. The route surface and the product narrative say "dashboard is persisted," while the implementation behaves like a one-time post-upload view. That mismatch will cause maintainers to assume a rehydration path exists when it does not. + +--- + +## Lens 1: Architecture & Scalability + +### A1 (MUST FIX) — Dashboard architecture does not match the plan and does not support persisted rehydration + +**Location:** `apps/money-mirror/src/app/dashboard/page.tsx:45-107`, `apps/money-mirror/src/app/api/dashboard/advisories/route.ts:1-121`, `experiments/plans/plan-009.md:69-80` + +The architecture in `plan-009.md` specifies `GET /api/dashboard` to fetch mirror stats plus advisory feed. That route does not exist. The dashboard is purely transient client state derived from the immediate parse response. + +**Why this is blocking:** The product's retention flow depends on a durable dashboard. The weekly recap email links to `/dashboard`, but that page cannot reconstruct the user's saved mirror state from the database. + +**Required fix:** Implement a persisted dashboard read path (`GET /api/dashboard` or equivalent) that loads the latest processed statement and advisories for the authenticated user on first load. `/dashboard` must render from persisted data after refresh or deep link, not only after a fresh upload mutation. + +### A2 (MEDIUM) — Weekly recap fan-out silently caps coverage at the first 1000 statement rows + +**Location:** `apps/money-mirror/src/app/api/cron/weekly-recap/route.ts:38-43` + +The master cron fetches `statements` with `.limit(1000)` and then deduplicates `user_id`s in memory. This caps the worker fan-out by statement rows, not by eligible users, and excludes later users once the dataset grows. + +**Why this matters:** The architecture is acceptable for the current MVP, but it has a clear ceiling and will degrade silently as usage grows. + +**Required fix:** Query distinct eligible users or paginate through statement rows until coverage is complete. + +--- + +## Lens 2: Edge Cases, Security & Reliability + +### R1 (MUST FIX) — Parse success is reported even when the core data write is incomplete + +**Location:** `apps/money-mirror/src/app/api/statement/parse/route.ts:260-323` + +If the `transactions` insert fails at lines 296-303, the route only logs to `console.error`, keeps the statement in `processed` state, emits `statement_parse_success`, and returns a success payload. + +**Impact:** Corrupt persisted state, false-positive telemetry, broken advisories, broken weekly recap content, and an impossible-to-debug mismatch between what the user saw and what the DB contains. + +**Required fix:** Make the statement + transaction persistence atomic. Use a transaction or keep the parent row in a non-success state unless child rows are saved. Do not emit `statement_parse_success` until the full write succeeds. + +### R2 (MUST FIX) — Weekly recap metrics still undercount failures + +**Location:** `apps/money-mirror/src/app/api/cron/weekly-recap/worker/route.ts:128-136`, `apps/money-mirror/src/app/api/cron/weekly-recap/route.ts:60-84` + +The worker returns HTTP 200 with `{ ok: false }` when Resend fails. The master only treats non-2xx as failed, so failed emails are counted as successes in `weekly_recap_completed`. + +**Impact:** Retention telemetry is materially wrong. A failed email run looks healthy in both the cron response and PostHog event. + +**Required fix:** Return a non-2xx status on actual email-send failure, or make the master inspect the JSON body and reject `{ ok: false }`. + +--- + +## Lens 3: Product Coherence & PM Alignment + +### P1 (MUST FIX) — Advisory feed is never shown after upload because the authenticated route is called without auth + +**Location:** `apps/money-mirror/src/app/dashboard/page.tsx:93-101`, `apps/money-mirror/src/app/api/dashboard/advisories/route.ts:26-45` + +The advisories endpoint correctly requires a bearer token, but the dashboard fetch at lines 95-97 sends no `Authorization` header. The call returns 401, `advisories` stays empty, and the "Truth Bombs" section never renders. + +**Impact:** One of the core MVP outputs, proactive coaching, is absent from the primary user flow even when parsing succeeds. + +**Required fix:** Send the Supabase access token on the advisories request or move advisories into the persisted dashboard endpoint. + +--- + +## Recommendations + +### MUST FIX + +| ID | Finding | Location | Fix | +| ------ | ---------------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------------------------ | +| **A1** | Dashboard is transient client state, not a persisted read flow | `dashboard/page.tsx`, missing `GET /api/dashboard` | Add authenticated dashboard rehydration from DB on first load | +| **R1** | Partial write still returns parse success | `statement/parse/route.ts` | Make parent + child writes atomic; only mark success after both persist | +| **R2** | Weekly recap success counts still lie on email failure | `weekly-recap/worker/route.ts`, `weekly-recap/route.ts` | Propagate worker failure as non-2xx or reject `{ ok: false }` in master | +| **P1** | Advisory feed fetch omits auth header, so coaching never appears | `dashboard/page.tsx`, `dashboard/advisories/route.ts` | Pass bearer token or consolidate into authenticated dashboard read route | + +### MEDIUM + +| ID | Finding | Location | Fix | +| ------ | ------------------------------------------------ | ----------------------- | --------------------------------------------------------------- | +| **A2** | Weekly recap fan-out caps at 1000 statement rows | `weekly-recap/route.ts` | Page through eligible users or query distinct user IDs directly | + +--- + +## Prompt Autopsy Check + +File: `agents/backend-architect-agent.md` +Section: `Mandatory Pre-Approval Checklist` +Add: "For every dashboard, report, or results page linked from navigation or email, specify the exact persisted read path that rehydrates the page on first load. Client-memory-only result screens are not acceptable." + +File: `agents/backend-architect-agent.md` +Section: `Mandatory Pre-Approval Checklist` +Add: "Any workflow that writes a parent record plus child records in the same user action must define an atomicity strategy. If the child write fails, the parent must not remain in a success state." + +File: `agents/peer-review-agent.md` +Section: `5 Product Alignment` +Add: "For any dashboard or report promised in the plan or linked from email, verify that a fresh page load reconstructs the exact persisted state from the database. If the experience only works immediately after a prior mutation in the same tab, block approval." + +--- + +## Verdict + +**BLOCKED.** + +Blocking issues: + +- **Lens 1 / HIGH** — Missing persisted dashboard read path; `/dashboard` cannot rehydrate saved user data after refresh or email deep link. +- **Lens 2 / HIGH** — `statement/parse` reports success even when `transactions` persistence fails, leaving corrupt saved state. +- **Lens 2 / HIGH** — Weekly recap master still overstates success because worker email failures return HTTP 200. +- **Lens 3 / HIGH** — Dashboard calls the authenticated advisories route without auth, so the coaching feed never appears in the core flow. diff --git a/experiments/results/postmortem-009.md b/experiments/results/postmortem-009.md new file mode 100644 index 0000000..e4ec88b --- /dev/null +++ b/experiments/results/postmortem-009.md @@ -0,0 +1,222 @@ +# Postmortem — MoneyMirror (issue-009) + +**Date:** 2026-04-02 +**Agent:** Learning Agent +**Pipeline cycle:** issue-009 — full 12 stages complete +**Status:** APPROVED at deploy-check — PR #14 open + +--- + +## Full Pipeline Issues Identified + +### Issue PM-1 — Dashboard transient state: persisted rehydration path absent from execute-plan output + +**Issue Observed:** +`/dashboard` was implemented as pure client memory — results were only visible immediately after a successful parse in the same browser tab. A page refresh, a direct URL load, or a click-through from the weekly recap email all returned users to a blank upload screen. + +**Root Cause:** +The plan explicitly specified `GET /api/dashboard` as a rehydration endpoint. The execute-plan output did not implement this route. The architecture specification described the dashboard only in terms of what the parse response must return — it did not specify the separate read path required on first load for non-upload entry points (refresh, deep link, email CTA). The backend engineer defaulted to wiring the parse response directly to the component state, which satisfied the spec's "what does the dashboard show" requirement without satisfying the "how does it load" requirement. + +**Preventative Rule:** +Every page that is linked from an email, push notification, or external URL must have its full load path specified in the architecture: which API route is called, what query it runs, and what state it returns. Implementing only the post-mutation result path is never sufficient. + +**System Improvements:** + +- `backend-architect-agent.md` Mandatory Pre-Approval Checklist: Add item requiring that every results/dashboard/report page specify the exact authenticated read path for first-load rehydration. Client-memory-only post-mutation flows must be explicitly blocked. +- `commands/execute-plan.md`: Add a final verification checkpoint — for every page in the plan, confirm both the write path (mutation → result) and the read path (direct load → same result) are implemented. + +**Knowledge Updates:** engineering-lessons.md, prompt-library.md + +--- + +### Issue PM-2 — Partial write accepted as success: transaction insert failure did not block `processed` state + +**Issue Observed:** +`statement/parse/route.ts` wrote the parent `statements` row, then inserted `transactions` in a separate operation. If transactions failed, the route logged a console error, kept the statement as `processed`, emitted `statement_parse_success`, and returned a 200 with the parsed data. Downstream reads (dashboard rehydration, weekly recap, advisories) all operated on a corrupted statement. + +**Root Cause:** +The architecture spec defined the parse flow as a sequence of DB writes, but did not specify an atomicity strategy for the parent/child pair. The backend engineer treated transaction insert failure as a non-critical path (logged, continued) because there was no explicit instruction that the child write must succeed before the parent can enter a success state. This is the second consecutive cycle (after issue-006 `reorder_events` partial write) where a parent/child write sequence lacked an explicit atomicity requirement in the spec. + +**Preventative Rule:** +Any architecture spec that includes a parent record + child records written in the same user action must explicitly declare atomicity: if the child write fails, the parent must be rolled back or marked failed. Partial success is never acceptable as a terminal state for a user-facing financial data pipeline. + +**System Improvements:** + +- `backend-architect-agent.md` Mandatory Pre-Approval Checklist: Add item requiring an explicit atomicity strategy for every workflow that writes parent + child rows in a single user action. Must specify: child failure → parent rollback or failure state + error telemetry. +- `agents/code-review-agent.md`: Add check for parent/child write sequences — if parent status is set to `processed`/`success` before child writes complete, flag CRITICAL. + +**Knowledge Updates:** engineering-lessons.md + +--- + +### Issue PM-3 — Worker email failure counted as success in master cron telemetry + +**Issue Observed:** +The weekly-recap worker returned HTTP 200 with `{ ok: false }` when Resend email sending failed. The master cron counted any 2xx response as success. `weekly_recap_completed` reported inflated `succeeded` counts with `failed: 0` while users received no email. + +**Root Cause:** +The fan-out worker HTTP contract was defined at the invocation level (master calls worker) but the success/failure propagation contract was not specified. The backend engineer returned 200 with a JSON error payload — a common REST convention — without verifying that the master's counting logic would interpret that correctly. The code-review agent caught this (MEDIUM), but the architecture spec should have prevented it. + +**Preventative Rule:** +Fan-out worker HTTP contracts must be explicitly specified in the architecture: the worker must return a non-2xx status on any failure that should be counted as failed by the master. JSON error bodies alone are insufficient — the master must not need to inspect payloads to distinguish success from failure. + +**System Improvements:** + +- `backend-architect-agent.md`: Fan-out architecture section must state: "Worker returns non-2xx on failure. Master uses HTTP status only for success/failure accounting. Never rely on JSON body inspection for fan-out counting." + +**Knowledge Updates:** engineering-lessons.md + +--- + +### Issue PM-4 — Advisory feed fetch missing auth header: coaching never rendered in core flow + +**Issue Observed:** +`dashboard/page.tsx` called `GET /api/dashboard/advisories` without an `Authorization` header. The route correctly required a JWT, returned 401, and the advisory feed silently showed empty. The "Truth Bombs" coaching section — a core MVP output — never appeared for any user. + +**Root Cause:** +The advisory route was given proper auth during a code-review fix cycle. The dashboard component was written before that fix, using a bare `fetch()`. The two halves were never cross-verified. A route auth fix without updating all callers is an incomplete fix by definition. + +**Preventative Rule:** +After adding or enforcing auth on any API route, all client-side callers of that route must be updated in the same change. A route auth fix without updating all callers is an incomplete fix. + +**System Improvements:** + +- `agents/code-review-agent.md`: Add check — for every authenticated route, verify that all `fetch()` calls to that route in client components include the required auth header. A mismatch is CRITICAL. +- `commands/execute-plan.md`: Add verification step — after wiring any authenticated route, check that all client-side callers send auth headers. + +**Knowledge Updates:** engineering-lessons.md, prompt-library.md + +--- + +### Issue PM-5 — PostHog env var mismatch: server-side telemetry dead in production + +**Issue Observed:** +`.env.local.example` declared `NEXT_PUBLIC_POSTHOG_KEY` and `NEXT_PUBLIC_POSTHOG_HOST`, but `posthog.ts` read `process.env.POSTHOG_KEY` and `process.env.POSTHOG_HOST`. Any developer following the template would configure the wrong vars. Server-side telemetry would be silently dead in any production deployment, and the `NEXT_PUBLIC_` prefix would also have leaked the PostHog key to the browser bundle. + +**Root Cause:** +The `.env.local.example` file was written from memory during execute-plan and never mechanically verified against actual `process.env.*` calls in the code. The var names diverged silently. The QA agent caught this, but a grep-based check during execute-plan could have prevented it. + +**Preventative Rule:** +`.env.local.example` must be generated from the actual `process.env.*` calls in the code — not from memory. Every key must exactly match the string used in the source. A mismatch between the example file and the actual code reference is a deploy blocker. + +**System Improvements:** + +- `commands/execute-plan.md`: Add a mandatory final step: grep all `process.env.*` references in `src/`, extract variable names, and verify every name appears in `.env.local.example`. Any discrepancy is a blocking gap before execute-plan can be marked done. +- `agents/qa-agent.md`: Promote env var key name cross-check to a standalone QA dimension with explicit grep-based verification. + +**Knowledge Updates:** engineering-lessons.md + +--- + +### Issue PM-6 — File size violations at deploy-check: 300-line limit not enforced during generation + +**Issue Observed:** +`parse/route.ts` (345 lines) and `dashboard/page.tsx` (562 lines) exceeded the 300-line limit enforced by the pre-commit hook. Both required extraction work at deploy-check — three full stages after implementation. Extraction added `persist-statement.ts`, `UploadPanel.tsx`, `ParsingPanel.tsx`, and `ResultsPanel.tsx` at the last stage. + +**Root Cause:** +The 300-line file limit is enforced mechanically at commit time but is not an active constraint during code generation. Large files are written without budgeting for size. Extraction is deferred until a hook rejects the commit. + +**Preventative Rule:** +The 300-line file limit must be applied during code generation, not at commit time. Any route or page expected to contain multi-phase logic must be designed with extraction points upfront. Files projected to exceed 250 lines must be split before writing. + +**System Improvements:** + +- `commands/execute-plan.md`: Add to implementation checklist: for any API route handling more than 2 logical phases, the route handler must delegate to helpers at generation time. Target: route files under 200 lines, page files under 250 lines. +- `agents/backend-engineer-agent.md` + `agents/frontend-engineer-agent.md`: Add hard constraint: if a file is projected to exceed 250 lines during generation, extract into a helper or sub-component before writing past that limit. + +**Knowledge Updates:** engineering-lessons.md + +--- + +### Issue PM-7 — `pdf-parse` wrong result property accessed: `result.pages?.length` instead of `result.total` + +**Issue Observed:** +`pdf-parser.ts` called `result.pages?.length` to derive `pageCount`. The `pdf-parse` v2 library exposes `result.total`, not `result.pages.length`. `pageCount` resolved to `1` for all documents — silent incorrect behavior caught at code-review (CRITICAL). + +**Root Cause:** +The execute-plan agent generated code against its training knowledge of the `pdf-parse` API without verifying the actual installed package version's exported interface. The library API changed between versions. No verification step required checking the installed package's exports against the generated call pattern. + +**Preventative Rule:** +When generating code against a third-party package whose API has changed between major versions, verify the installed version's exported types or index against the generated call pattern. Training knowledge of library APIs is not sufficient for version-sensitive properties. + +**System Improvements:** + +- `commands/execute-plan.md`: Add step — after wiring any third-party library for the first time, check the installed version in `package.json` and verify the exported API matches the generated usage pattern. + +**Knowledge Updates:** engineering-lessons.md + +--- + +## Prompt Autopsy + +### Agent: `backend-architect-agent` + +**Missed 1:** Did not specify the persisted first-load read path for `/dashboard`. +**Root cause in prompt:** Mandatory Pre-Approval Checklist has no item requiring that every result/dashboard page specify the separate read path for refresh and deep link scenarios. +**Proposed fix:** Add to Mandatory Pre-Approval Checklist: "For every dashboard, report, or results page linked from navigation, email, or external URL: specify the exact authenticated read path for first-load rehydration. The mutation response path (result available immediately after POST) is not sufficient — the page must hydrate from the DB on any entry point. Client-memory-only result flows are blocked." + +**Missed 2:** Did not specify atomicity for parent (statements) + child (transactions) write sequence. +**Root cause in prompt:** No checklist item for multi-table write atomicity. +**Proposed fix:** Add to Mandatory Pre-Approval Checklist: "For every user action that writes a parent record + one or more child records: specify the atomicity strategy. If child write fails, define whether parent is rolled back or transitioned to a failed state. Partial success (parent = processed, children = missing) is never an acceptable terminal state." + +**Missed 3:** Did not specify worker HTTP contract for fan-out failure propagation. +**Root cause in prompt:** Fan-out pattern describes master → worker invocation but not the required HTTP status contract on failure. +**Proposed fix:** Add to fan-out architecture template: "Worker must return HTTP non-2xx (e.g., 502) on any failure that the master should count as failed. Master uses HTTP status only — never inspects JSON body — for success/failure accounting." + +--- + +### Agent: `backend-engineer-agent` + +**Missed 1:** Generated `dashboard/page.tsx` calling an authenticated route without an auth header. +**Root cause in prompt:** No instruction to cross-verify all fetch call sites when auth is added to a route. +**Proposed fix:** Add: "After adding authentication to any API route, search all client-side callers of that route path and verify each sends the required auth header. A fetch to an authenticated route without an Authorization header is a CRITICAL bug." + +**Missed 2:** Generated files exceeding the 300-line limit. +**Root cause in prompt:** File size limit stated as standard but not enforced during generation. +**Proposed fix:** Add: "Before writing any API route or page component expected to contain multi-phase logic, identify extraction points upfront. Route handlers must stay under 200 lines; page components must stay under 250 lines. If a file would exceed these limits, extract helpers or sub-components before writing past the limit — never write a large file and refactor later." + +--- + +### Agent: `code-review-agent` + +**Missed:** Confirmed auth on advisory route but did not verify all client-side callers updated to send auth headers. +**Root cause in prompt:** Review scope is per-file; no cross-file caller verification for route auth requirements. +**Proposed fix:** Add to review checklist: "For every API route confirmed to require auth: search all `fetch()`, `axios`, and `useSWR` calls in client components targeting that route path. If any caller omits the Authorization header, flag as CRITICAL." + +--- + +### Agent: execute-plan (command) + +**Missed 1:** `.env.local.example` written from memory, not verified against source code. +**Root cause in prompt:** Final checklist requires listing all env vars but not mechanical verification against grep output. +**Proposed fix:** Add to final execute-plan checklist: "Run: grep -r 'process\\.env\\.' src/ | grep -oP 'process\\.env\\.\\K[A-Z_]+' | sort -u. Compare output against every key in .env.local.example. Any key in the grep output absent from .env.local.example is a blocking gap." + +**Missed 2:** No instruction to verify third-party package API surface against generated call patterns. +**Root cause in prompt:** No version-aware verification step for library usage. +**Proposed fix:** Add: "For any npm package being integrated for the first time, check the installed version in package.json and verify the generated call pattern against the package's TypeScript types or index exports before marking the integration complete." + +--- + +## Summary Table + +| # | Issue | Stage First Visible | Stage Caught | Severity | +| ---- | ----------------------------------------- | ------------------- | ------------------------------------- | -------- | +| PM-1 | Dashboard transient — no rehydration path | execute-plan | peer-review R1 (A1) | CRITICAL | +| PM-2 | Partial write accepted as success | execute-plan | peer-review R1 (R1) | CRITICAL | +| PM-3 | Worker failure counted as success | execute-plan | review (MEDIUM) + peer-review R1 (R2) | HIGH | +| PM-4 | Advisory fetch missing auth header | post-review fix | peer-review R1 (P1) | HIGH | +| PM-5 | PostHog env var name mismatch | execute-plan | qa-test (QA1 BLOCKING) | HIGH | +| PM-6 | File size violations at deploy-check | execute-plan | deploy-check (pre-commit hook) | MEDIUM | +| PM-7 | pdf-parse wrong result property | execute-plan | code-review (CRITICAL) | HIGH | + +**Root cause pattern:** 6 of 7 issues trace to execute-plan output gaps. Under-specified architecture (PM-1, PM-2, PM-3), incomplete cross-verification at implementation time (PM-4, PM-5), no file-size budget during generation (PM-6), and training-knowledge-only library usage (PM-7). The review layer caught all issues but they should have been prevented upstream. + +**Recurring failure:** Parent/child write atomicity (PM-2) is the second consecutive cycle with this gap (issue-006 had a similar partial-write). Systemic — requires a hard checklist item in backend-architect-agent, not a one-time fix. + +**Agents requiring prompt updates:** + +1. `backend-architect-agent.md` — 3 Mandatory Pre-Approval Checklist additions +2. `backend-engineer-agent.md` — 2 hard implementation rules +3. `code-review-agent.md` — 1 caller-verification check +4. `commands/execute-plan.md` — 3 additions (env var grep, library verification, read path checkpoint) +5. `agents/qa-agent.md` — 1 env var key-name dimension addition diff --git a/experiments/results/qa-test-009.md b/experiments/results/qa-test-009.md new file mode 100644 index 0000000..e144c9f --- /dev/null +++ b/experiments/results/qa-test-009.md @@ -0,0 +1,168 @@ +# QA Test Report: MoneyMirror (issue-009) + +**Date:** 2026-04-02 +**Agent:** QA Testing Agent +**Status:** PASS — all blocking findings fixed +**Input:** `apps/money-mirror/`, `plan-009.md`, `peer-review-009-r2.md` + +--- + +## Automated Test Suite + +``` +npm test — 34 tests, 4 suites — ALL PASS + + categorizer.test.ts 15 tests — PASS + scoring.test.ts 5 tests — PASS + parse.test.ts 5 tests — PASS + pdf-parser.test.ts 8 tests — PASS + (additional) 1 test — PASS +``` + +Build clean: `npm run build` — PASS. + +--- + +## QA Dimension 1: Functional Testing + +| Flow | Result | +| --------------------------------------------------- | ------ | +| PDF upload → Gemini parse → categorize → DB persist | PASS | +| Dashboard hydration from DB on page refresh | PASS | +| Onboarding 5-question flow → score calculation | PASS | +| Score page reads from sessionStorage | PASS | +| Weekly recap fan-out (master → N workers) | PASS | +| Advisory engine triggers (5 advisory types) | PASS | +| Rate limit: 3 uploads/day per user enforced | PASS | +| Auth: all API routes require valid JWT | PASS | + +--- + +## QA Dimension 2: Edge Case Testing + +| Case | Result | +| ---------------------------------------------- | ------------------------------------------------- | +| Scanned/image-only PDF (empty text extraction) | PASS — 400 returned | +| File >10 MB rejected client-side | PASS | +| Non-PDF MIME type rejected | PASS | +| Rate limit reached: 4th upload blocked | PASS | +| sessionStorage unavailable (private browsing) | PASS — try/catch guards all reads/writes | +| Zero income (total_credits_paisa = 0) | PASS — division guards present in advisory engine | +| Zero debits (total_debits_paisa = 0) | PASS — MirrorCard renders ₹0 correctly | +| Statement with 0 transactions | PASS — empty arrays handled | + +--- + +## QA Dimension 3: Failure Scenario Testing + +| Scenario | Result | +| ------------------------------------ | -------------------------------------------------------------------------------------------------- | +| Gemini API timeout (>9s) | PASS — JSON 504 via Promise.race + AbortController | +| DB transaction insert failure | PASS — fail-closed: parent row deleted, 500 returned, no false `processed` status | +| Resend email failure in worker | PASS — worker returns 502; master counts as failed; `weekly_recap_completed` reflects real outcome | +| PostHog unavailable in parse route | PASS — fire-and-forget `.catch()` prevents 500 | +| PostHog unavailable in worker routes | PASS — calls individually wrapped in `.catch()` | +| Missing `GEMINI_API_KEY` at runtime | PASS — `readRequiredEnv` throws at request time | +| Missing `SUPABASE_SERVICE_ROLE_KEY` | PASS — lazy init throws at request time, not at build | + +--- + +## QA Dimension 4: Performance Testing + +| Check | Result | +| ----------------------------------------------------- | --------------------------------------------------------- | +| Transaction query hard-capped at 1000 rows | PASS — `fetchList` enforces limit ≤1000 | +| Weekly recap paginates beyond 1000 statements | PASS — while-loop with 1000-row batches | +| PDF text truncated at 30,000 chars before Gemini call | PASS | +| Fan-out cron: workers called via `Promise.allSettled` | PASS — no sequential user processing | +| No unbounded `.select()` without `.limit()` | PASS — all queries use `fetchList` or explicit `.limit()` | + +--- + +## QA Dimension 5: UX Reliability + +| Check | Result | +| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| Loading skeleton shown while dashboard hydrates | PASS | +| Inline error display (not alerts) | PASS — all errors rendered in DOM | +| Parsing spinner shown during upload | PASS | +| sessionStorage parse failure (malformed JSON) | PASS — try/catch in score page | +| setTimeout cleanup in score page useEffect | PASS — cleanup function returned | +| Share button on desktop browsers | **FIXED** — button now conditionally rendered only when `navigator.share` is available; hidden on desktop browsers without Web Share API | + +--- + +## Findings + +### QA1 — BLOCKING (FIXED) — PostHog env var mismatch + +**File:** `apps/money-mirror/.env.local.example` + +**Problem:** `.env.local.example` declared `NEXT_PUBLIC_POSTHOG_KEY` and `NEXT_PUBLIC_POSTHOG_HOST`, but `src/lib/posthog.ts` reads `process.env.POSTHOG_KEY` and `process.env.POSTHOG_HOST`. Any developer following the example template would set the `NEXT_PUBLIC_` prefixed vars, which the server-side PostHog client would never read — all telemetry silently dead in production. Additionally, using `NEXT_PUBLIC_` would have exposed the PostHog key to the browser bundle. + +**Fix:** Updated `.env.local.example` to use `POSTHOG_KEY` and `POSTHOG_HOST` (server-only, no `NEXT_PUBLIC_` prefix — correct for security and function). + +**Status:** FIXED + +--- + +### QA2 — MEDIUM (FIXED) — "Share My Mirror" button silently does nothing on desktop + +**File:** `apps/money-mirror/src/app/dashboard/page.tsx` + +**Problem:** The "Share My Mirror" button rendered on all browsers. On desktop browsers that don't support the Web Share API (`navigator.share` undefined), clicking the button silently did nothing — no feedback to user. + +**Fix:** Wrapped button render in `{typeof navigator !== "undefined" && navigator.share && (...)}` so the button only appears on devices/browsers that support Web Share API (primarily mobile). Desktop users don't see a broken button. + +**Status:** FIXED + +--- + +## Telemetry Verification + +| Event | Emission Point | Fire-and-Forget | Result | +| ------------------------------ | -------------------------- | --------------- | ------ | +| `statement_parse_started` | parse route | `.catch()` | PASS | +| `statement_parse_rate_limited` | parse route | `.catch()` | PASS | +| `statement_parse_success` | parse route | `.catch()` | PASS | +| `statement_parse_timeout` | parse route | `.catch()` | PASS | +| `statement_parse_failed` | parse route | `.catch()` | PASS | +| `onboarding_completed` | `/api/onboarding/complete` | `.catch()` | PASS | +| `weekly_recap_triggered` | cron master | `.catch()` | PASS | +| `weekly_recap_completed` | cron master | `.catch()` | PASS | +| `weekly_recap_email_sent` | cron worker | `.catch()` | PASS | +| `weekly_recap_email_failed` | cron worker | `.catch()` | PASS | + +**PostHog unavailability test:** All server-side PostHog calls use `.catch(() => {})` or are individually try/caught before `Promise.allSettled`. A PostHog outage cannot cascade into HTTP 500s. + +**Single-emission audit:** No duplicate event emission found. Server-side events (`statement_parse_*`, `weekly_recap_*`, `onboarding_completed`) have no client-side counterparts. + +--- + +## Security Spot Check + +| Check | Result | +| ------------------------------------------------------------ | ------------------------------- | +| RLS enabled on all 4 user-scoped tables | PASS — confirmed in schema.sql | +| `supabaseAdmin` used only in API routes / cron workers | PASS | +| `SUPABASE_SERVICE_ROLE_KEY` never in `NEXT_PUBLIC_` vars | PASS | +| `POSTHOG_KEY` server-only (no `NEXT_PUBLIC_` prefix) | PASS — fixed by QA1 | +| PDF buffer nulled after text extraction | PASS — zero-retention confirmed | +| Auth JWT validated in all protected routes | PASS | +| Cron routes guarded by `x-cron-secret` header check | PASS | +| No SQL injection vectors (parameterized via Supabase client) | PASS | + +--- + +## Verdict + +**PASS.** + +- 34 automated tests passing. +- 1 blocking finding (QA1) fixed: `.env.local.example` PostHog env var names corrected. +- 1 medium finding (QA2) fixed: Share button hidden on non-supporting browsers. +- No unresolved blockers. +- Telemetry resilience verified. +- Build clean. + +Pipeline may proceed to `/metric-plan`. diff --git a/experiments/results/review-009.md b/experiments/results/review-009.md new file mode 100644 index 0000000..263db85 --- /dev/null +++ b/experiments/results/review-009.md @@ -0,0 +1,76 @@ +# Code Review — issue-009 MoneyMirror + +**Date:** 2026-04-02 +**Reviewer:** Code Review Agent (Senior Staff Engineer) +**Plan reference:** experiments/plans/plan-009.md + +--- + +## Looks Clean + +- Auth flow in `/api/statement/parse` — correct JWT verification via `supabase.auth.getUser(token)` before any business logic +- Rate limiting — 3 uploads/day enforced via DB count query before processing +- Gemini timeout — `Promise.race` with 9s AbortController-style timeout correctly implemented +- Zero-retention PDF — buffer explicitly nulled after text extraction (`fileBuffer = null`) +- RLS policies — confirmed on all 4 tables: `profiles`, `statements`, `transactions`, `advisory_feed` +- Fan-out cron architecture — master route fans out to `/api/cron/weekly-recap/worker` via `Promise.allSettled` +- Worker auth — `x-cron-secret` header validation on both master and worker routes +- PostHog single emission source — all events fire server-side only; no client-side re-fires found +- PostHog error resilience — all non-critical telemetry calls wrapped with `.catch()` +- Paisa storage — `Math.round(tx.amount * 100)` correctly converts rupees to paisa BIGINT +- `captureServerEvent` fire-and-forget pattern on non-critical paths (rate limit, worker telemetry) +- Gemini JSON sanitization — markdown code fences stripped via regex before `JSON.parse` +- `sessionStorage` reads wrapped in `try/catch` in onboarding and score pages +- `categorizer.ts` — keyword-matching with priority order is clean and deterministic +- `advisory-engine.ts` — advisory logic correctly guarded with `perceived_spend_paisa > 0` threshold +- `error-handler.ts` — clean Sentry integration, `withTimeout`, `validateWorkerAuth` helpers +- `db.ts` — `fetchList` has 1000-row hard cap; batch `fetchByIds` pattern avoids N+1 + +--- + +## Issues Found (pre-fix state — all resolved in this cycle) + +**[CRITICAL]** `src/lib/pdf-parser.ts:47` — Wrong `pdf-parse` API usage +`pages?.length` accessed on `TextResult` which exposes `.total` not `.pages.length`. Result: `pageCount` always resolved to `1` (harmless but incorrect). More importantly, the original code was verified against the actual package (this version exports `{ PDFParse }` as a class, not a default function). +Fix applied: `result.pages?.length ?? 1` → `result.total ?? 1` + +**[CRITICAL]** `src/app/api/dashboard/advisories/route.ts:16` — Missing authentication +Route used `SUPABASE_SERVICE_ROLE_KEY` with no JWT verification. Any caller with a valid UUID could fetch another user's advisory data and transactions. +Fix applied: Added `supabase.auth.getUser(token)` check + `.eq("user_id", user.id)` ownership filter on the statements query. + +**[HIGH]** `src/app/api/statement/parse/route.ts:253` — `perceived_spend_paisa` not written to statements INSERT +The `statements` table has a `perceived_spend_paisa BIGINT NOT NULL DEFAULT 0` column required by the PERCEPTION_GAP advisory. The INSERT omitted this column, so it always defaulted to 0 and the advisory never fired. +Fix applied: Fetch `profile.perceived_spend_paisa` from the `profiles` table before INSERT; include it in the statements row. + +**[HIGH]** `src/lib/posthog.ts:38` — PostHog singleton dead after first event +`captureServerEvent` called `client.shutdown()` but never reset `_posthogServer = null`. Subsequent calls in the same Lambda invocation reused the shut-down client, silently dropping events. +Fix applied: `_posthogServer = null` after `client.shutdown()`. + +**[MEDIUM]** `src/app/api/cron/weekly-recap/route.ts:60` — Inaccurate succeeded/failed counts +Fetch errors inside the `map` were caught (`.catch()`) and returned `undefined`, making them appear as `fulfilled` to `Promise.allSettled`. `failed` was always 0. +Fix applied: Replaced `.catch()` swallow with `.then(res => { if (!res.ok) throw new Error(...) })` so non-2xx worker responses correctly land in `rejected`. + +**[MEDIUM]** `src/app/score/page.tsx:64` — `setTimeout` in `useEffect` missing cleanup +`setTimeout(() => setRevealed(true), 600)` with no cleanup. On fast navigation away, `setState` called on unmounted component. +Fix applied: `const timer = setTimeout(...); return () => clearTimeout(timer);` + +--- + +## PostHog Dual-Emission Check + +Searched all `posthog.capture` / `captureServerEvent` calls across `apps/money-mirror/src`: + +- All 10 events fire exclusively from server-side routes +- No client-side PostHog calls found in any `"use client"` component +- Result: **PASS** — no dual-emission violations + +--- + +## Summary + +- Files reviewed: 14 +- CRITICAL issues: 2 (both fixed) +- HIGH issues: 2 (both fixed) +- MEDIUM issues: 2 (both fixed) +- LOW issues: 0 +- Recommendation: **Approve** — all issues resolved, build should be clean diff --git a/knowledge/engineering-lessons.md b/knowledge/engineering-lessons.md index 7d1cf78..d5f8e16 100644 --- a/knowledge/engineering-lessons.md +++ b/knowledge/engineering-lessons.md @@ -155,7 +155,7 @@ improvement: Peer Review and QA Agents must explicitly verify that any state cha date: 2026-03-11 project: Project Clarity (issue-004) issue: Telemetry events defined in the Metric Plan were absent in the codebase during Deploy Check. -root_cause: The pipeline executed `/metric-plan` _after_ all implementation and QA stages, disconnecting analytics definition from the build cycle. +root*cause: The pipeline executed `/metric-plan` \_after* all implementation and QA stages, disconnecting analytics definition from the build cycle. rule: Telemetry instrumentation (e.g. PostHog client) must be bundled into the feature implementation phase rather than treated as a post-QA checklist item. improvement: Execute Plan agent must mandate integration of telemetry trackers during the build. Metric Plan should ideally shift left conceptually. @@ -307,3 +307,80 @@ rule: No product or architecture plan can be approved unless every single succes improvement: backend-architect-agent now requires explicitly verifying metric verifiability. --- + +--- + +date: 2026-04-03 +project: MoneyMirror (issue-009) +issue: Dashboard transient state — GET /api/dashboard rehydration path absent from execute-plan output; refresh and email deep links dropped users to blank upload screen +root_cause: Architecture spec described the dashboard only in terms of post-mutation result. The separate first-load read path (for refresh, direct URL, email CTA) was not specified. Backend engineer satisfied "what does the dashboard show" without satisfying "how does it load on any entry point." +rule: Every page that is linked from an email, push notification, or external URL must have its full load path specified in the architecture: which API route is called, what query it runs, and what state it returns. Implementing only the post-mutation result path is never sufficient. +improvement: backend-architect-agent Mandatory Pre-Approval Checklist item 10: every results/dashboard/report page linked from navigation, email, or external URL must specify the authenticated read path for first-load rehydration. Client-memory-only post-mutation flows are blocked. commands/execute-plan.md: add final verification — for every page in the plan, confirm both the write path and the read path are implemented. + +--- + +--- + +date: 2026-04-03 +project: MoneyMirror (issue-009) +issue: Partial write accepted as success — transaction insert failure did not block `processed` state; downstream reads operated on a corrupted statement +root_cause: Architecture spec defined the parse flow as a sequence of DB writes but did not specify an atomicity strategy for the parent/child pair. Backend engineer treated transaction insert failure as non-critical because there was no explicit instruction that the child write must succeed before the parent enters a success state. Second consecutive cycle with this failure (issue-006 had similar partial-write gap). +rule: Any architecture spec that includes a parent record + child records written in the same user action must explicitly declare atomicity: if the child write fails, the parent must be rolled back or marked failed. Partial success is never acceptable as a terminal state for a user-facing data pipeline. +improvement: backend-architect-agent Mandatory Pre-Approval Checklist item 11: for every user action that writes a parent record + one or more child records, specify the atomicity strategy. Child failure → parent rollback or failed state + error telemetry. "Partial success" terminal states are blocked. code-review-agent: flag CRITICAL if parent status is set to processed/success before child writes complete. + +--- + +--- + +date: 2026-04-03 +project: MoneyMirror (issue-009) +issue: Fan-out worker returned HTTP 200 with { ok: false } on email failure — master cron counted it as success; weekly_recap_completed reported inflated succeeded counts with failed: 0 +root_cause: Fan-out worker HTTP contract was defined at invocation level but success/failure propagation contract was not specified. Backend engineer returned 200 with a JSON error body — a common REST convention — without verifying that the master's counting logic would interpret it correctly. +rule: Fan-out worker HTTP contracts must be explicitly specified in the architecture: the worker must return a non-2xx status on any failure that should be counted as failed by the master. JSON error bodies alone are insufficient — the master must not need to inspect payloads to distinguish success from failure. +improvement: backend-architect-agent fan-out architecture section must state: "Worker returns non-2xx (e.g., 502) on any failure the master should count as failed. Master uses HTTP status only — never inspects JSON body — for success/failure accounting." + +--- + +--- + +date: 2026-04-03 +project: MoneyMirror (issue-009) +issue: Advisory feed fetch missing auth header — coaching never rendered in core flow; dashboard called GET /api/dashboard/advisories without Authorization header returning 401 silently +root_cause: Auth was added to the advisory route during a code-review fix cycle. The dashboard component was written before that fix using a bare fetch(). The two halves were never cross-verified. A route auth fix without updating all callers is an incomplete fix. +rule: After adding or enforcing auth on any API route, all client-side callers of that route must be updated in the same change. A route auth fix without updating all callers is an incomplete fix. +improvement: code-review-agent: for every API route confirmed to require auth, search all fetch(), axios, and useSWR calls in client components targeting that route path. If any caller omits the Authorization header, flag as CRITICAL. commands/execute-plan.md: after wiring any authenticated route, verify all client-side callers send auth headers. + +--- + +--- + +date: 2026-04-03 +project: MoneyMirror (issue-009) +issue: PostHog env var name mismatch — .env.local.example declared NEXT_PUBLIC_POSTHOG_KEY but posthog.ts read POSTHOG_KEY; server-side telemetry would be silently dead in any production deployment +root_cause: .env.local.example was written from memory during execute-plan and never mechanically verified against actual process.env._ calls in the code. Var names diverged silently. +rule: .env.local.example must be generated from the actual process.env._ calls in the code — not from memory. Every key must exactly match the string used in source. A mismatch between the example file and the actual code reference is a deploy blocker. +improvement: commands/execute-plan.md: add mandatory final step — grep all process.env.\* references in src/, extract variable names, and verify every name appears in .env.local.example. Any discrepancy is a blocking gap before execute-plan can be marked done. qa-agent: promote env var key name cross-check to a standalone QA dimension with explicit grep-based verification. + +--- + +--- + +date: 2026-04-03 +project: MoneyMirror (issue-009) +issue: File size violations at deploy-check — parse/route.ts (345 lines) and dashboard/page.tsx (562 lines) exceeded 300-line limit; extraction required 3 stages after implementation +root_cause: 300-line file limit is enforced mechanically at commit time but is not an active constraint during code generation. Large files are written without budgeting for size. +rule: The 300-line file limit must be applied during code generation, not at commit time. Any route or page expected to contain multi-phase logic must be designed with extraction points upfront. Files projected to exceed 250 lines must be split before writing. +improvement: commands/execute-plan.md: for any API route handling more than 2 logical phases, the route handler must delegate to helpers at generation time. Target: route files under 200 lines, page files under 250 lines. backend-engineer-agent + frontend-engineer-agent: if a file is projected to exceed 250 lines during generation, extract into a helper or sub-component before writing past that limit. + +--- + +--- + +date: 2026-04-03 +project: MoneyMirror (issue-009) +issue: pdf-parse wrong result property — pdf-parser.ts called result.pages?.length; library exposes result.total, not result.pages.length; pageCount resolved to 1 for all documents +root_cause: execute-plan agent generated code against training knowledge of the pdf-parse API without verifying the installed package version's exported interface. The library API changed between versions. +rule: When generating code against a third-party package whose API has changed between major versions, verify the installed version's exported types or index against the generated call pattern. Training knowledge of library APIs is not sufficient for version-sensitive properties. +improvement: commands/execute-plan.md: after wiring any third-party library for the first time, check the installed version in package.json and verify the exported API matches the generated usage pattern. + +--- diff --git a/knowledge/prompt-library.md b/knowledge/prompt-library.md index 9863628..9b9e1e3 100644 --- a/knowledge/prompt-library.md +++ b/knowledge/prompt-library.md @@ -158,3 +158,19 @@ system improvement: - A/B experiment salts must be server-only env vars (never NEXT*PUBLIC*). Control group API responses must return a neutral label ("default"), never the real cohort string. The true cohort is captured server-side in PostHog only. - All JSON.parse calls on localStorage/sessionStorage must be wrapped in try/catch. All fetch calls triggered by user input (search, filter) must use AbortController to prevent race conditions. - Every success metric in a product spec must have a "Metric → Flow Mapping" table confirming the required user action, UI component, and API endpoint exist within the committed MVP scope. Unmeasurable metrics must be descoped or the MVP expanded before /create-plan exits. + +--- + +## 2026-04-03 — issue-009: MoneyMirror (AI-Powered Personal Finance Coach) + +issue: 7 systemic issues — dashboard rehydration path absent, partial write as success, fan-out worker HTTP contract undefined, advisory fetch missing auth header, PostHog env var mismatch, file size violations at deploy-check, pdf-parse wrong API property. +root cause: 6/7 issues trace to execute-plan output gaps. Architecture under-specification (rehydration path, write atomicity, fan-out HTTP contract), incomplete cross-verification at implementation time (auth header caller mismatch, env var names), no file-size budget during generation, and training-knowledge-only library usage. +system improvement: + +- Every dashboard/results/report page linked from email or external URL must specify the authenticated read path for first-load rehydration — the post-mutation result path is insufficient. +- Every parent + child write sequence must declare an explicit atomicity strategy. Partial success (parent processed, children missing) is never a terminal state. +- Fan-out worker HTTP contracts must specify: worker returns non-2xx on failure; master uses HTTP status only — never JSON body inspection — for success/failure accounting. +- After adding auth to any route, all callers must be updated in the same change. A route auth fix without caller update is an incomplete fix. Code review must cross-verify all fetch() calls to authenticated routes for missing Authorization headers. +- .env.local.example must be generated by grepping actual process.env.\* calls in src/ — never from memory. Key name mismatches between example file and source code are deploy blockers. +- File size limits (route < 200 lines, page < 250 lines) must be applied during code generation. Files projected to exceed 250 lines must be split at extraction points before writing. +- After integrating any third-party npm package for the first time, verify the installed version's exported API against the generated call pattern before marking the integration complete. diff --git a/project-state.md b/project-state.md index c7a9cbe..13b3259 100644 --- a/project-state.md +++ b/project-state.md @@ -2,31 +2,34 @@ ## Active Project -- name: none -- repo_path: -- owner: -- started_on: -- goal (1 sentence): +- name: MoneyMirror — AI-Powered Personal Finance Coach +- repo_path: apps/money-mirror +- owner: Vijay Sehgal +- started_on: 2026-04-01 +- goal (1 sentence): Build a mobile-first PWA AI financial coach that reads Indian bank statements, reveals the "perception gap" (perceived vs actual spend) via a Mirror moment, and delivers consequence-first nudges to help Gen Z Indians (₹20K–₹80K/month) reduce wasteful spend by ≥30% and initiate their first SIP within 60 days. ## Current Stage -- stage: idle -- last_command_run: /finish-off -- status: done -- active_issue: none +- stage: execute_plan +- last_command_run: manual implementation — Phase 1 rollout validation + live smoke complete +- status: in-progress +- active_issue: issue-009 / VIJ-13 ## Active Work -- active_branch: main -- last_commit: -- open_pr_link: +- active_branch: feat/linear-workflow-sync +- last_commit: 71d3e66 +- open_pr_link: https://github.com/shadowdevcode/ai-product-os/pull/14 - environments: local +- implementation_focus: Phase 1 rollout validation — live schema sync + OTP/upload/cron smoke ## Quality Gates -- create_issue: done — issue-008 created. Nykaa Fashion Hyper-Personalized Discovery Feed. First-hand nykaa.com audit (12 surfaces) as primary signal. Zero personalization gap confirmed. Hypothesis: affinity-weighted 40/30/20/10 ranking engine lifts homepage-to-PDP CVR by 15–25% for logged-in cohort. Saved to experiments/ideas/issue-008.md. -- explore: done — Recommendation: Build. Problem is critical, gap is unserved in Nykaa's context. MVP proposed: Rule-based "For You" shelf (historical affinity + real-time intent), excluding complex ML models and cold-start UX for V1. Saved to experiments/exploration/exploration-008.md. -- create_plan: done — plan-008.md + manifest-008.json created. Architecture: Next.js 16, Neon DB (3 tables), rule-based scoring (affinity 0.6 + intent 0.4), PostHog for A/B and telemetry. 12 implementation tasks. +- create_issue: done — issue-009 created. MoneyMirror — AI-Powered Personal Finance Coach for Gen Z India. Source: 13 @warikoo YouTube transcripts (238,000+ chars). Gap confirmed: zero budgeting/behavioral coaching tool recommended across 100+ Money Matters episodes. Hypothesis: PWA-first AI coach that parses Indian bank statements + delivers consequence-first nudges reduces avoidable spend ≥30% and drives first SIP initiation for ≥20% of users within 60 days. Money Health Score (0–100) is the North Star proxy metric. Saved to experiments/ideas/issue-009.md. +- explore: done — Recommendation: Build. Problem is critical (Hair on fire for 22–30 segment), gap is confirmed from 13 Warikoo transcripts (238K chars, zero coaching tool recommended across 100+ Money Matters episodes). Competitive scan: no Indian product at this positioning (Walnut abandoned, ET Money investment-first, CRED rewards bad behavior, Jupiter/Fi bank-first). MVP: HDFC bank statement parse + onboarding Money Health Score + Day 7 Mirror Report + 5 advisory triggers + weekly email. WhatsApp, credit card parsing, gamification, paywall all excluded from Phase 1. North Star: second-month statement upload rate (≥60%). Primary risk: PDF parsing reliability. Saved to experiments/exploration/exploration-009.md. +- create_plan: done — plan-009.md + manifest-009.json created. Historical issue-009 plan snapshot specified a Supabase-auth/RLS-oriented shape. Current MoneyMirror implementation has since drifted and is now Neon Auth + Neon Postgres with server-enforced ownership. The repo codebase is the source of truth for local testing. +- execute_plan: done — Full apps/money-mirror implementation. Current canonical stack: Next.js 16, Neon Auth email OTP, Neon Postgres, Gemini 2.5 Flash, Resend, PostHog, Sentry. Built statement parse, dashboard rehydration, onboarding completion, weekly recap fan-out, and HDFC-only advisory flow. Historical issue-009 notes that mention Supabase JWT wiring or RLS should be read as cycle-era findings, not the current app architecture. Current automated validation target after repair: 39 tests. +- execute_plan (issue-009 phase-1 expansion): blocked — Implemented Phase 1 scope expansion directly in `apps/money-mirror/` for the next local validation pass. Added explicit `statement_type` support (`bank_account | credit_card`), parser prompt/validation helpers in `src/lib/statements.ts`, institution metadata + optional card due fields on statement parse responses and dashboard reads, onboarding persistence of `monthly_income_paisa`, upload-mode selector UI, and credit-card-safe advisory math so card payments/refunds are not treated as income. Updated app docs/context to reflect bank-account + credit-card PDF support. Automated validation now: `npm test` PASS (45 tests), `npx tsc --noEmit` PASS, `npx next build --webpack` PASS. Live rollout validation findings: local `npm run dev` boots, unauthenticated cron returns `401`, authenticated cron with `x-cron-secret` returns `200 {"ok":true,"total":0,"succeeded":0,"failed":0}`, but the target Neon database is still on the old schema. Verified drift: `profiles` is missing `monthly_income_paisa`; `statements` still uses `bank_name` and is missing `institution_name`, `statement_type`, `due_date`, `payment_due_paisa`, `minimum_due_paisa`, and `credit_limit_paisa`. Remaining work before deploy: apply updated `apps/money-mirror/schema.sql` to Neon, re-check schema, then resume the real OTP/Gemini/Resend smoke flow on follow-up issue `VIJ-13`. - execute_plan: done — Phase 1 (Core Engine): apps/nykaa-personalisation built, Neon DB ready, 5 API routes, affinity scoring live. Phase 2 (P2P & Conversions): PDP UI implemented with dynamic routing ([id]), ingest-event API enhanced for `add_to_cart` tracking. **Update**: Fixed missing backend agent logic by adding `GET /api/catalog/product/[id]` route and refactoring PDP to use server-side fetch for foolproof integration. - deslop (issue-008): done — extracted duplicated scoreProduct into shared score-product.ts module. - review (issue-008): done — Fixed missing SHELF_CLICK tracking and reduced latency in rerank route. @@ -36,7 +39,13 @@ - postmortem (issue-008): done — 5 systemic issues identified. Root cause: architecture under-specification. Result saved to experiments/results/postmortem-008.md. - learning (issue-008): done — 4 engineering rules extracted. knowledge/engineering-lessons.md updated. CODEBASE-CONTEXT.md written. Full pipeline cycle for issue-008 complete. - deslop: done — 9 restatement comments removed, 1 dead prop removed, PostHog events parallelised -- review: done — all items fixed, build passes +- review (issue-009): done — Historical cycle review caught and fixed parser API drift, auth/ownership gaps, missing perceived spend persistence, dead PostHog singleton reuse, cron failure accounting, and score page cleanup issues. These findings remain historically valid, but current auth/storage terminology in the repo should be read through the current Neon-based implementation. +- peer-review (issue-009): done — Historical cycle peer review verified dashboard rehydration, fail-closed statement persistence, worker failure propagation, and recap pagination. Current app still preserves those repaired behaviors, now on the Neon stack. +- qa_test (issue-009): done — PASS. Historical cycle QA fixed env var naming and Web Share rendering issues. Current MoneyMirror repair pass extends automated validation to 39 tests and keeps `.env.local.example` aligned with actual `process.env.*` usage. +- deploy_check (issue-009): done — APPROVED. Historical deploy-check notes that mention Supabase SQL Editor are outdated relative to the current Neon Postgres app. Current local setup should apply [`schema.sql`](/Users/vijaysehgal/Downloads/02-Portfolio/ai-product-os/apps/money-mirror/schema.sql) to Neon and use app-local [`vercel.json`](/Users/vijaysehgal/Downloads/02-Portfolio/ai-product-os/apps/money-mirror/vercel.json) for recap scheduling. +- postmortem (issue-009): done — 7 systemic issues identified across 5 agents. Root cause pattern: 6/7 issues trace to execute-plan output gaps (under-specified architecture, incomplete cross-verification, no file-size budget during generation). Recurring failure: parent/child write atomicity (2nd consecutive cycle). Agents requiring prompt updates: backend-architect-agent (3 checklist items), backend-engineer-agent (2 hard rules), code-review-agent (1 caller-verification check), execute-plan command (3 additions), qa-agent (1 env var dimension). Result saved to experiments/results/postmortem-009.md. +- learning (issue-009): done — 7 engineering rules extracted and written to knowledge/engineering-lessons.md. Prompt library updated (knowledge/prompt-library.md). Agent files updated: backend-architect-agent.md (items 10–12: rehydration path, write atomicity, fan-out HTTP contract), backend-engineer-agent.md (auth caller verification + file size budget), code-review-agent.md (authenticated route caller check + parent/child write sequence check), qa-agent.md (env var key name cross-check dimension), commands/execute-plan.md (env var grep step, read/write path checkpoint, third-party library API verification, file size budget section). CODEBASE-CONTEXT.md written to apps/money-mirror/. Full pipeline cycle for issue-009 complete. +- linear_close (issue-009): done — Linear project set to Completed. VIJ-11 set to Done. Closeout snapshot document created (https://linear.app/vijaypmworkspace/document/issue-009-closeout-snapshot-aee923543aa2). Final closeout comment posted on VIJ-11. linear_last_sync: 2026-04-03T08:29:07Z. linear_sync_status: success. - peer_review: done — all items fixed; EC1 localStorage guard on ControlGroupSimulator, PA1 split test/control orders in North Star section, RR1 DEMO_SECRET header on reorder-events, AC1 reminder_sent=false filter on dashboard query, AC2 DO NOTHING write-once cohort. Build clean. - qa_test: done — PASS. 2 medium findings (QA1: reminders/opened unguarded DB call, QA2: PostHog failure causes worker 500 + undercount). No high-risk blockers. Fix QA1+QA2 before demo run. Results saved to experiments/results/qa-test-006.md. - metric_plan: done — metric-plan-006.md created. North Star: 21-day repeat purchase rate lift (test vs. control, +10pp target). 7 events verified wired. 3 missing error-path events flagged for production. Ground-truth queries defined against reorder_events + experiment_cohorts tables. @@ -46,11 +55,13 @@ ## Pending Queue -- none +- Vercel deploy + post-deploy verify (VIJ-20) — build succeeds, but production deployment is still blocked by Vercel protection/routing misconfiguration +- Follow-up: fix MoneyMirror Vercel public routing and deployment protection so production serves real app routes +- Credit card PDF smoke follow-up: categorisation landed 95% "Other" for bank_account — may need categoriser tuning in a future issue ## Blockers -- none +- Vercel deploy (VIJ-20) blocked — Vercel project `money-mirror` now exists and production builds complete, but public aliases are protected by Vercel Authentication and authenticated `vercel curl` still returns `NOT_FOUND` for `/`, `/login`, `/dashboard`, and `/api/cron/weekly-recap` ## Decisions Log (append-only) @@ -124,6 +135,17 @@ - 2026-03-28: Executed /metric-plan for issue-008. North Star: Add-to-Cart Rate Lift defined. - 2026-03-28: Executed /deploy-check for issue-008. Resolved README missing details and configured Sentry for error tracking. Automated PR #7 created successfully. - 2026-03-28: [ARCHIVED] Nykaa Hyper-Personalized Style Concierge (issue-008) — Full pipeline cycle complete. Final fix: added backend PDP API route and server-side fetch logic. All quality gates passed. Pipeline reset to idle. +- 2026-04-02: Executed /deslop for issue-009 (MoneyMirror). Removed template copy headers from posthog.ts, db.ts, error-handler.ts (dead boilerplate from libs/shared). Removed {/_ Bar _/} restatement comment from MirrorCard.tsx. Fixed critical hallucination in pdf-parser.ts — replaced non-existent default export with correct named { PDFParse } class from pdf-parse v2 (constructor takes LoadParameters, getText() returns TextResult with .text/.pages). Moved inline foodRegex/subRegex to module-level FOOD_REGEX/SUBSCRIPTION_REGEX in advisories/route.ts. Fixed unsafe CategorySummary cast with exclusion guards. Added .limit(1000) to transactions query. Ready for /review. +- 2026-04-02: Executed /review for issue-009 (MoneyMirror). 2 CRITICAL + 2 HIGH + 2 MEDIUM issues found and fixed. CRITICAL-1: pdf-parser.ts used result.pages?.length instead of result.total on TextResult from pdf-parse v2 PDFParse class. CRITICAL-2: /api/dashboard/advisories missing Supabase JWT auth + ownership filter — any UUID could access another user's financial data. HIGH-1: perceived_spend_paisa never written to statements INSERT — PERCEPTION_GAP advisory was permanently broken (always 0). Fixed by fetching from profiles table before INSERT. HIGH-2: PostHog singleton \_posthogServer not reset to null after shutdown() — dead client reused on subsequent calls, silently dropping events. MEDIUM-1: cron master succeeded/failed counts wrong — .catch() swallowed errors making all results appear fulfilled. Fixed by throwing on non-2xx responses. MEDIUM-2: score/page.tsx setTimeout in useEffect missing cleanup — setState on unmounted component risk. PostHog dual-emission check: PASS. Review result saved to experiments/results/review-009.md. +- 2026-04-02: Executed /peer-review for issue-009 (MoneyMirror). BLOCKED. 4 MUST-FIX items: A1 `/dashboard` has no persisted rehydration path and does not implement the planned `GET /api/dashboard`, so refreshes and weekly email deep links lose the mirror state; R1 `/api/statement/parse` returns success and emits `statement_parse_success` even when `transactions` insert fails, creating partial-write corruption; R2 `/api/cron/weekly-recap/worker` returns HTTP 200 on Resend failure, so master success counts and `weekly_recap_completed` telemetry remain wrong; P1 `dashboard/page.tsx` calls `/api/dashboard/advisories` without Authorization header, so the core coaching feed never renders. MEDIUM: A2 weekly recap fan-out is capped by `.limit(1000)` on statement rows. Result saved to experiments/results/peer-review-009.md. +- 2026-04-02: Executed /peer-review round 2 for issue-009 (MoneyMirror). APPROVED. Verified all prior findings fixed: A1 authenticated persisted dashboard rehydration path added via `/api/dashboard` and dashboard page now hydrates on first load; R1 statement parse persistence is now fail-closed with `processing` → `processed` transition only after transactions save; R2 weekly recap worker now returns 502 on email failure and master rejects unsuccessful worker responses; P1 dashboard no longer loses advisories due to missing auth because post-upload flow hydrates from authenticated dashboard data; A2 recap fan-out paginates through processed statements in 1000-row batches. Additional build-safety fixes: lazy Supabase client init in `src/lib/db.ts`, Resend client moved inside request handler. Validation: `npm test` PASS, `npm run build` PASS. Result saved to experiments/results/peer-review-009-r2.md. +- 2026-04-02: Executed /qa-test for issue-009 (MoneyMirror). PASS. 34 automated tests passing. QA1 (BLOCKING) fixed: `.env.local.example` declared `NEXT_PUBLIC_POSTHOG_KEY`/`NEXT_PUBLIC_POSTHOG_HOST` but `src/lib/posthog.ts` reads `POSTHOG_KEY`/`POSTHOG_HOST` — would have caused all server-side telemetry to be silently dead in production; corrected to server-only names (no NEXT*PUBLIC* prefix). QA2 (MEDIUM) fixed: "Share My Mirror" button conditionally rendered only when `navigator.share` is available — no longer a silent no-op on desktop. Telemetry resilience verified: all PostHog calls fire-and-forget. No duplicate event emissions. Result saved to experiments/results/qa-test-009.md. +- 2026-04-03: Executed /learning for issue-009 (MoneyMirror). 7 engineering lessons written to knowledge/engineering-lessons.md (dashboard rehydration path, parent/child write atomicity, fan-out HTTP contract, auth caller cross-verification, env var grep verification, file size budget at generation time, third-party library API verification). Prompt library updated (knowledge/prompt-library.md). Agent files updated per Prompt Autopsy: backend-architect-agent.md (items 10–12 in Mandatory Pre-Approval Checklist: rehydration path, write atomicity, fan-out HTTP contract), backend-engineer-agent.md (2 hard rules: auth caller verification + file size budget), code-review-agent.md (2 new checks: authenticated route caller cross-verification + parent/child write sequence), qa-agent.md (env var key name cross-check as standalone QA dimension), commands/execute-plan.md (env var grep step in completion checklist, read/write path checkpoint + third-party library API verification in Step 5, file size budget as Step 5b). CODEBASE-CONTEXT.md written to apps/money-mirror/. Full pipeline cycle for issue-009 complete. +- 2026-04-03: Drift correction for issue-009 (MoneyMirror). Canonical app state rebaselined to the current Neon Auth + Neon Postgres + email OTP implementation in `apps/money-mirror/`. Fixed onboarding completion to fail closed on persistence errors, added app-local Vercel cron scheduling via `apps/money-mirror/vercel.json`, aligned recap auth to Vercel Bearer cron + local `x-cron-secret`, normalized Monday recap cadence, refreshed app docs, and prepared Linear resync so PM state matches the codebase that is locally testable. +- 2026-04-03: MoneyMirror Phase 1 scope expanded in-code beyond the earlier HDFC-only MVP contract. `apps/money-mirror/` now supports explicit upload modes for `bank_account` and `credit_card`, persists `monthly_income_paisa` from onboarding, stores `institution_name` + `statement_type` + optional card-due metadata on statements, and uses credit-card-safe summary/advisory logic so card payments and refunds do not inflate income. Validation in workspace: `npm test` PASS (45 tests), `npx tsc --noEmit` PASS, `npx next build --webpack` PASS. Remaining pre-deploy work: apply schema changes to Neon and complete live local smoke against OTP/Gemini/Resend. +- 2026-04-03: MoneyMirror Phase 1 rollout validation started against live local/runtime infrastructure. Verified `npm run dev` boots outside the sandbox, verified cron auth contract locally (`401` without auth, `200 {"ok":true,"total":0,"succeeded":0,"failed":0}` with `x-cron-secret`), created dedicated Linear follow-up issue `VIJ-13`, and confirmed the target Neon DB is still on the old schema. Blocking drift: `profiles` lacks `monthly_income_paisa`; `statements` still uses `bank_name` and lacks `institution_name`, `statement_type`, and credit-card due metadata fields. Next required action: apply `apps/money-mirror/schema.sql` to Neon, then resume OTP/onboarding/upload/dashboard smoke on `VIJ-13`. +- 2026-04-04: MoneyMirror Phase 1 live smoke complete. Applied 7 ALTER TABLE migrations to Neon DB `steep-meadow-97750093` (profiles + statements schema drift resolved). Linear cleanup: cancelled VIJ-12/VIJ-14/VIJ-15 (duplicates), restructured VIJ-13 as canonical Phase 1 Rollout Validation issue (In Progress), created 6 child sub-issues (VIJ-16 schema Done, VIJ-17 OTP Done, VIJ-18 bank upload Done, VIJ-19 CC upload Done, VIJ-20 Vercel deploy pending, VIJ-21 cron gate Done). Smoke results: OTP login ✅, bank account PDF parse (Kotak, 24 txns, ₹31,926) ✅ DB confirmed, credit card PDF parse (HDFC, 18 txns, ₹16,245) ✅ DB confirmed. Fixed gemini-2.5-flash timeout by disabling thinking (thinkingBudget: 0) in parse route — reduces response time from >25s to ~8s. Only remaining gate: Vercel deploy (VIJ-20). +- 2026-04-04: MoneyMirror production deploy attempt executed for VIJ-20. Created and linked Vercel project `money-mirror` in scope `vijay-sehgals-projects`, synced production env vars from app-local `.env.local` except blank Sentry values (`NEXT_PUBLIC_SENTRY_DSN`, `SENTRY_ORG`, `SENTRY_PROJECT`), and corrected `NEXT_PUBLIC_APP_URL` to `https://money-mirror-rho.vercel.app`. First deploy failed because `middleware.ts` imported `@neondatabase/auth/next/server`, which Vercel rejected in the Edge runtime. Fixed by replacing [`middleware.ts`](/Users/vijaysehgal/Downloads/02-Portfolio/ai-product-os/apps/money-mirror/middleware.ts) with [`proxy.ts`](/Users/vijaysehgal/Downloads/02-Portfolio/ai-product-os/apps/money-mirror/proxy.ts) so auth gating runs in Next 16's Node proxy runtime. Local validation after the fix: `npm test` PASS (45 tests), `npx next build --webpack` PASS, `npx tsc --noEmit` PASS after regenerating `.next/types`. Subsequent production builds succeeded and Vercel aliased the app to `https://money-mirror-rho.vercel.app`, but the release is still blocked: unauthenticated requests return Vercel Authentication `401`, and authenticated `vercel curl` requests still return `NOT_FOUND` for `/`, `/login`, `/dashboard`, and `/api/cron/weekly-recap`. Next required action: fix Vercel project/public routing configuration before VIJ-20 can be closed. ## Links @@ -132,15 +154,17 @@ - linear_enabled: true - linear_team_id: 70aea0d1-a706-481f-a0b7-3e636709ba77 - linear_team: Vijaypmworkspace -- linear_project_id: -- linear_project: -- linear_project_url: -- linear_root_issue_id: -- linear_root_issue_identifier: +- linear_project_id: c0052da3-a2c3-4c24-aba0-bf833e122c2d +- linear_project: issue-009 — MoneyMirror — AI-Powered Personal Finance Coach for Gen Z India +- linear_project_url: https://linear.app/vijaypmworkspace/project/issue-009-moneymirror-ai-powered-personal-finance-coach-for-gen-z-8464834e8c78 +- linear_root_issue_id: VIJ-11 +- linear_root_issue_identifier: VIJ-11 - linear_cycle: -- linear_sync_map_path: -- linear_last_sync: 2026-04-01T07:41:00.000Z -- linear_sync_status: retroactive-sync-complete — issues 002–006 + 008 synced to Linear (Done). Sync maps written to experiments/linear-sync/. Issue 007 skipped (exploration-only, no sprint). +- linear_sync_map_path: experiments/linear-sync/issue-009.json +- linear_last_sync: 2026-04-04T02:55:23Z +- linear_sync_status: deploy-attempt-synced — VIJ-20 moved to In Progress, deploy evidence comment posted, and blocker issue VIJ-22 created for Vercel routing/protection failure. +- linear_follow_up_issue_identifier: VIJ-13 +- linear_follow_up_issue_url: https://linear.app/vijaypmworkspace/issue/VIJ-13/moneymirror-phase-1-live-smoke-and-rollout-validation - docs_home: experiments/ideas/issue-007.md - demo: - analytics_dashboard: