-
Target Directory *
-
setTargetPath(e.target.value)}
- placeholder="/Users/you/projects"
- required
- className="w-full px-3 py-2 bg-port-bg border border-port-border rounded-lg text-white focus:border-port-accent focus:outline-none font-mono"
+ onChange={setTargetPath}
+ label="Target Directory"
/>
App will be created at: {targetPath ? `${targetPath}/${appName || 'app-name'}` : '...'}
diff --git a/client/src/services/api.js b/client/src/services/api.js
index 220f70f..0da2e75 100644
--- a/client/src/services/api.js
+++ b/client/src/services/api.js
@@ -94,6 +94,11 @@ export const detectWithAi = (path, providerId) => request('/detect/ai', {
// Templates & Scaffold
export const getTemplates = () => request('/templates');
+export const getDirectories = (path = null) => {
+ const params = path ? `?path=${encodeURIComponent(path)}` : '';
+ return request(`/directories${params}`);
+};
+
export const scaffoldApp = (data) => request('/scaffold', {
method: 'POST',
body: JSON.stringify(data)
@@ -124,6 +129,11 @@ export const deleteProvider = (id) => request(`/providers/${id}`, { method: 'DEL
export const testProvider = (id) => request(`/providers/${id}/test`, { method: 'POST' });
export const refreshProviderModels = (id) => request(`/providers/${id}/refresh-models`, { method: 'POST' });
+// Provider status (usage limits, availability)
+export const getProviderStatuses = () => request('/providers/status');
+export const getProviderStatus = (id) => request(`/providers/${id}/status`);
+export const recoverProvider = (id) => request(`/providers/${id}/status/recover`, { method: 'POST' });
+
// Runs
export const getRuns = (limit = 50, offset = 0, source = 'all') =>
request(`/runs?limit=${limit}&offset=${offset}&source=${source}`);
@@ -229,11 +239,15 @@ export const addCosTask = (task) => request('/cos/tasks', {
method: 'POST',
body: JSON.stringify(task)
});
+export const enhanceCosTaskPrompt = (data) => request('/cos/tasks/enhance', {
+ method: 'POST',
+ body: JSON.stringify(data)
+});
export const updateCosTask = (id, updates) => request(`/cos/tasks/${id}`, {
method: 'PUT',
body: JSON.stringify(updates)
});
-export const deleteCosTask = (id) => request(`/cos/tasks/${id}`, { method: 'DELETE' });
+export const deleteCosTask = (id, taskType = 'user') => request(`/cos/tasks/${id}?type=${taskType}`, { method: 'DELETE' });
export const reorderCosTasks = (taskIds) => request('/cos/tasks/reorder', {
method: 'POST',
body: JSON.stringify({ taskIds })
@@ -262,6 +276,7 @@ export const getCosLearningDurations = () => request('/cos/learning/durations');
export const getCosLearningSkipped = () => request('/cos/learning/skipped');
export const getCosLearningPerformance = () => request('/cos/learning/performance');
export const backfillCosLearning = () => request('/cos/learning/backfill', { method: 'POST' });
+export const resetCosTaskTypeLearning = (taskType) => request(`/cos/learning/reset/${encodeURIComponent(taskType)}`, { method: 'POST' });
// CoS Scripts
export const getCosScripts = () => request('/cos/scripts');
@@ -414,3 +429,270 @@ export const createGitBackup = (repoPath) => request('/standardize/backup', {
method: 'POST',
body: JSON.stringify({ repoPath })
});
+
+// Brain - Second Brain Feature
+export const getBrainSummary = () => request('/brain/summary');
+export const getBrainSettings = () => request('/brain/settings');
+export const updateBrainSettings = (settings) => request('/brain/settings', {
+ method: 'PUT',
+ body: JSON.stringify(settings)
+});
+
+// Brain - Capture & Inbox
+export const captureBrainThought = (text, providerOverride, modelOverride) => request('/brain/capture', {
+ method: 'POST',
+ body: JSON.stringify({ text, providerOverride, modelOverride })
+});
+export const getBrainInbox = (options = {}) => {
+ const params = new URLSearchParams();
+ if (options.status) params.set('status', options.status);
+ if (options.limit) params.set('limit', options.limit);
+ if (options.offset) params.set('offset', options.offset);
+ return request(`/brain/inbox?${params}`);
+};
+export const getBrainInboxEntry = (id) => request(`/brain/inbox/${id}`);
+export const resolveBrainReview = (inboxLogId, destination, editedExtracted) => request('/brain/review/resolve', {
+ method: 'POST',
+ body: JSON.stringify({ inboxLogId, destination, editedExtracted })
+});
+export const fixBrainClassification = (inboxLogId, newDestination, updatedFields, note) => request('/brain/fix', {
+ method: 'POST',
+ body: JSON.stringify({ inboxLogId, newDestination, updatedFields, note })
+});
+export const retryBrainClassification = (id, providerOverride, modelOverride) => request(`/brain/inbox/${id}/retry`, {
+ method: 'POST',
+ body: JSON.stringify({ providerOverride, modelOverride })
+});
+export const updateBrainInboxEntry = (id, capturedText) => request(`/brain/inbox/${id}`, {
+ method: 'PUT',
+ body: JSON.stringify({ capturedText })
+});
+export const deleteBrainInboxEntry = (id) => request(`/brain/inbox/${id}`, { method: 'DELETE' });
+
+// Brain - People
+export const getBrainPeople = () => request('/brain/people');
+export const getBrainPerson = (id) => request(`/brain/people/${id}`);
+export const createBrainPerson = (data) => request('/brain/people', {
+ method: 'POST',
+ body: JSON.stringify(data)
+});
+export const updateBrainPerson = (id, data) => request(`/brain/people/${id}`, {
+ method: 'PUT',
+ body: JSON.stringify(data)
+});
+export const deleteBrainPerson = (id) => request(`/brain/people/${id}`, { method: 'DELETE' });
+
+// Brain - Projects
+export const getBrainProjects = (filters) => {
+ const params = new URLSearchParams();
+ if (filters?.status) params.set('status', filters.status);
+ return request(`/brain/projects?${params}`);
+};
+export const getBrainProject = (id) => request(`/brain/projects/${id}`);
+export const createBrainProject = (data) => request('/brain/projects', {
+ method: 'POST',
+ body: JSON.stringify(data)
+});
+export const updateBrainProject = (id, data) => request(`/brain/projects/${id}`, {
+ method: 'PUT',
+ body: JSON.stringify(data)
+});
+export const deleteBrainProject = (id) => request(`/brain/projects/${id}`, { method: 'DELETE' });
+
+// Brain - Ideas
+export const getBrainIdeas = () => request('/brain/ideas');
+export const getBrainIdea = (id) => request(`/brain/ideas/${id}`);
+export const createBrainIdea = (data) => request('/brain/ideas', {
+ method: 'POST',
+ body: JSON.stringify(data)
+});
+export const updateBrainIdea = (id, data) => request(`/brain/ideas/${id}`, {
+ method: 'PUT',
+ body: JSON.stringify(data)
+});
+export const deleteBrainIdea = (id) => request(`/brain/ideas/${id}`, { method: 'DELETE' });
+
+// Brain - Admin
+export const getBrainAdmin = (filters) => {
+ const params = new URLSearchParams();
+ if (filters?.status) params.set('status', filters.status);
+ return request(`/brain/admin?${params}`);
+};
+export const getBrainAdminItem = (id) => request(`/brain/admin/${id}`);
+export const createBrainAdminItem = (data) => request('/brain/admin', {
+ method: 'POST',
+ body: JSON.stringify(data)
+});
+export const updateBrainAdminItem = (id, data) => request(`/brain/admin/${id}`, {
+ method: 'PUT',
+ body: JSON.stringify(data)
+});
+export const deleteBrainAdminItem = (id) => request(`/brain/admin/${id}`, { method: 'DELETE' });
+
+// Brain - Digests & Reviews
+export const getBrainLatestDigest = () => request('/brain/digest/latest');
+export const getBrainDigests = (limit = 10) => request(`/brain/digests?limit=${limit}`);
+export const runBrainDigest = (providerOverride, modelOverride) => request('/brain/digest/run', {
+ method: 'POST',
+ body: JSON.stringify({ providerOverride, modelOverride })
+});
+export const getBrainLatestReview = () => request('/brain/review/latest');
+export const getBrainReviews = (limit = 10) => request(`/brain/reviews?limit=${limit}`);
+export const runBrainReview = (providerOverride, modelOverride) => request('/brain/review/run', {
+ method: 'POST',
+ body: JSON.stringify({ providerOverride, modelOverride })
+});
+
+// Media - Server media devices
+export const getMediaDevices = () => request('/media/devices');
+export const getMediaStatus = () => request('/media/status');
+export const startMediaStream = (videoDeviceId, audioDeviceId, video = true, audio = true) => request('/media/start', {
+ method: 'POST',
+ body: JSON.stringify({ videoDeviceId, audioDeviceId, video, audio })
+});
+export const stopMediaStream = () => request('/media/stop', { method: 'POST' });
+
+// Digital Twin - Status & Summary
+export const getDigitalTwinStatus = () => request('/digital-twin');
+export const getSoulStatus = getDigitalTwinStatus; // Alias for backwards compatibility
+
+// Digital Twin - Documents
+export const getDigitalTwinDocuments = () => request('/digital-twin/documents');
+export const getSoulDocuments = getDigitalTwinDocuments;
+export const getDigitalTwinDocument = (id) => request(`/digital-twin/documents/${id}`);
+export const getSoulDocument = getDigitalTwinDocument;
+export const createDigitalTwinDocument = (data) => request('/digital-twin/documents', {
+ method: 'POST',
+ body: JSON.stringify(data)
+});
+export const createSoulDocument = createDigitalTwinDocument;
+export const updateDigitalTwinDocument = (id, data) => request(`/digital-twin/documents/${id}`, {
+ method: 'PUT',
+ body: JSON.stringify(data)
+});
+export const updateSoulDocument = updateDigitalTwinDocument;
+export const deleteDigitalTwinDocument = (id) => request(`/digital-twin/documents/${id}`, { method: 'DELETE' });
+export const deleteSoulDocument = deleteDigitalTwinDocument;
+
+// Digital Twin - Testing
+export const getDigitalTwinTests = () => request('/digital-twin/tests');
+export const getSoulTests = getDigitalTwinTests;
+export const runDigitalTwinTests = (providerId, model, testIds = null) => request('/digital-twin/tests/run', {
+ method: 'POST',
+ body: JSON.stringify({ providerId, model, testIds })
+});
+export const runSoulTests = runDigitalTwinTests;
+export const runDigitalTwinMultiTests = (providers, testIds = null) => request('/digital-twin/tests/run-multi', {
+ method: 'POST',
+ body: JSON.stringify({ providers, testIds })
+});
+export const runSoulMultiTests = runDigitalTwinMultiTests;
+export const getDigitalTwinTestHistory = (limit = 10) => request(`/digital-twin/tests/history?limit=${limit}`);
+export const getSoulTestHistory = getDigitalTwinTestHistory;
+
+// Digital Twin - Enrichment
+export const getDigitalTwinEnrichCategories = () => request('/digital-twin/enrich/categories');
+export const getSoulEnrichCategories = getDigitalTwinEnrichCategories;
+export const getDigitalTwinEnrichProgress = () => request('/digital-twin/enrich/progress');
+export const getSoulEnrichProgress = getDigitalTwinEnrichProgress;
+export const getDigitalTwinEnrichQuestion = (category, providerOverride, modelOverride) => request('/digital-twin/enrich/question', {
+ method: 'POST',
+ body: JSON.stringify({ category, providerOverride, modelOverride })
+});
+export const getSoulEnrichQuestion = getDigitalTwinEnrichQuestion;
+export const submitDigitalTwinEnrichAnswer = (data) => request('/digital-twin/enrich/answer', {
+ method: 'POST',
+ body: JSON.stringify(data)
+});
+export const submitSoulEnrichAnswer = submitDigitalTwinEnrichAnswer;
+
+// Digital Twin - Export
+export const getDigitalTwinExportFormats = () => request('/digital-twin/export/formats');
+export const getSoulExportFormats = getDigitalTwinExportFormats;
+export const exportDigitalTwin = (format, documentIds = null, includeDisabled = false) => request('/digital-twin/export', {
+ method: 'POST',
+ body: JSON.stringify({ format, documentIds, includeDisabled })
+});
+export const exportSoul = exportDigitalTwin;
+
+// Digital Twin - Settings
+export const getDigitalTwinSettings = () => request('/digital-twin/settings');
+export const getSoulSettings = getDigitalTwinSettings;
+export const updateDigitalTwinSettings = (settings) => request('/digital-twin/settings', {
+ method: 'PUT',
+ body: JSON.stringify(settings)
+});
+export const updateSoulSettings = updateDigitalTwinSettings;
+
+// Digital Twin - Validation & Analysis
+export const getDigitalTwinCompleteness = () => request('/digital-twin/validate/completeness');
+export const getSoulCompleteness = getDigitalTwinCompleteness;
+export const detectDigitalTwinContradictions = (providerId, model) => request('/digital-twin/validate/contradictions', {
+ method: 'POST',
+ body: JSON.stringify({ providerId, model })
+});
+export const detectSoulContradictions = detectDigitalTwinContradictions;
+export const generateDigitalTwinTests = (providerId, model) => request('/digital-twin/tests/generate', {
+ method: 'POST',
+ body: JSON.stringify({ providerId, model })
+});
+export const generateSoulTests = generateDigitalTwinTests;
+export const analyzeWritingSamples = (samples, providerId, model) => request('/digital-twin/analyze-writing', {
+ method: 'POST',
+ body: JSON.stringify({ samples, providerId, model })
+});
+
+// Digital Twin - List-based Enrichment
+export const analyzeEnrichmentList = (category, items, providerId, model) => request('/digital-twin/enrich/analyze-list', {
+ method: 'POST',
+ body: JSON.stringify({ category, items, providerId, model })
+});
+export const saveEnrichmentList = (category, content, items) => request('/digital-twin/enrich/save-list', {
+ method: 'POST',
+ body: JSON.stringify({ category, content, items })
+});
+export const getEnrichmentListItems = (category) => request(`/digital-twin/enrich/list-items/${category}`);
+
+// --- Digital Twin Traits & Confidence (Phase 1 & 2) ---
+export const getDigitalTwinTraits = () => request('/digital-twin/traits');
+export const analyzeDigitalTwinTraits = (providerId, model, forceReanalyze = false) => request('/digital-twin/traits/analyze', {
+ method: 'POST',
+ body: JSON.stringify({ providerId, model, forceReanalyze })
+});
+export const updateDigitalTwinTraits = (updates) => request('/digital-twin/traits', {
+ method: 'PUT',
+ body: JSON.stringify(updates)
+});
+export const getDigitalTwinConfidence = () => request('/digital-twin/confidence');
+export const calculateDigitalTwinConfidence = (providerId, model) => request('/digital-twin/confidence/calculate', {
+ method: 'POST',
+ body: JSON.stringify({ providerId, model })
+});
+export const getDigitalTwinGaps = () => request('/digital-twin/gaps');
+
+// --- Digital Twin External Import (Phase 4) ---
+export const getDigitalTwinImportSources = () => request('/digital-twin/import/sources');
+export const analyzeDigitalTwinImport = (source, data, providerId, model) => request('/digital-twin/import/analyze', {
+ method: 'POST',
+ body: JSON.stringify({ source, data, providerId, model })
+});
+export const saveDigitalTwinImport = (source, suggestedDoc) => request('/digital-twin/import/save', {
+ method: 'POST',
+ body: JSON.stringify({ source, suggestedDoc })
+});
+
+// Default export for simplified imports
+export default {
+ get: (endpoint, options) => request(endpoint, { method: 'GET', ...options }),
+ post: (endpoint, body, options) => request(endpoint, {
+ method: 'POST',
+ body: JSON.stringify(body),
+ ...options
+ }),
+ put: (endpoint, body, options) => request(endpoint, {
+ method: 'PUT',
+ body: JSON.stringify(body),
+ ...options
+ }),
+ delete: (endpoint, options) => request(endpoint, { method: 'DELETE', ...options })
+};
diff --git a/data.sample/brain/admin.json b/data.sample/brain/admin.json
new file mode 100644
index 0000000..b6111a8
--- /dev/null
+++ b/data.sample/brain/admin.json
@@ -0,0 +1,31 @@
+{
+ "records": {
+ "ad1a2b3c-d4e5-4f6a-7b8c-9d0e1f2a3b4c": {
+ "title": "Renew car registration",
+ "status": "open",
+ "dueDate": "2024-01-31T23:59:59.000Z",
+ "nextAction": "Go to DMV website and complete renewal",
+ "notes": "Registration expires end of January. Can do online.",
+ "createdAt": "2024-01-02T09:00:00.000Z",
+ "updatedAt": "2024-01-02T09:00:00.000Z"
+ },
+ "ad2b3c4d-e5f6-4a7b-8c9d-0e1f2a3b4c5d": {
+ "title": "Schedule annual physical",
+ "status": "waiting",
+ "dueDate": "2024-02-15T23:59:59.000Z",
+ "nextAction": "Waiting for callback from doctor's office",
+ "notes": "Called and left message. They said they'd call back to schedule.",
+ "createdAt": "2024-01-05T10:00:00.000Z",
+ "updatedAt": "2024-01-08T09:00:00.000Z"
+ },
+ "ad3c4d5e-f6a7-4b8c-9d0e-1f2a3b4c5d6e": {
+ "title": "File Q4 expenses",
+ "status": "done",
+ "dueDate": "2024-01-10T23:59:59.000Z",
+ "nextAction": null,
+ "notes": "Submitted all Q4 expense reports. Approved and reimbursement processed.",
+ "createdAt": "2024-01-03T09:00:00.000Z",
+ "updatedAt": "2024-01-09T11:00:00.000Z"
+ }
+ }
+}
diff --git a/data.sample/brain/digests.jsonl b/data.sample/brain/digests.jsonl
new file mode 100644
index 0000000..487579a
--- /dev/null
+++ b/data.sample/brain/digests.jsonl
@@ -0,0 +1,2 @@
+{"id":"dg1a2b3c-d4e5-4f6a-7b8c-9d0e1f2a3b4c","generatedAt":"2024-01-09T09:00:00.000Z","digestText":"Today: Focus on Brain classifier implementation. You have 2 items needing review in your inbox - clear those first. Sarah is waiting on API timeline follow-up. Car registration due in 22 days. Standing desk arriving next week - plan your office rearrangement.","topActions":["Clear 2 inbox items needing review","Follow up with Sarah on API timeline","Start Brain classifier prompt template"],"stuckThing":"Blog post series blocked on design team images","smallWin":"Q1 planning doc got approved","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-daily-digest"}}
+{"id":"dg2b3c4d-e5f6-4a7b-8c9d-0e1f2a3b4c5d","generatedAt":"2024-01-08T09:00:00.000Z","digestText":"Start of the week. Brain feature is your main active project - good momentum. 1 person follow-up due (Jake for bathroom quote). Admin: car registration and physical scheduling both need attention this month. Your idea backlog is growing - consider time-boxing idea capture.","topActions":["Continue Brain implementation","Contact Jake for bathroom quote","Schedule annual physical"],"stuckThing":"Blog post still waiting on images","smallWin":"Home office desk ordered","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-daily-digest"}}
diff --git a/data.sample/brain/ideas.json b/data.sample/brain/ideas.json
new file mode 100644
index 0000000..b027311
--- /dev/null
+++ b/data.sample/brain/ideas.json
@@ -0,0 +1,28 @@
+{
+ "records": {
+ "i1a2b3c4-d5e6-4f7a-8b9c-0d1e2f3a4b5c": {
+ "title": "Voice-first capture mode",
+ "oneLiner": "Add voice transcription to Brain so thoughts can be captured hands-free via Whisper",
+ "notes": "Could use OpenAI Whisper API or local whisper.cpp. Would be great for capturing ideas while walking or driving.",
+ "tags": ["brain", "feature", "voice"],
+ "createdAt": "2024-01-07T08:00:00.000Z",
+ "updatedAt": "2024-01-07T08:00:00.000Z"
+ },
+ "i2b3c4d5-e6f7-4a8b-9c0d-1e2f3a4b5c6d": {
+ "title": "Weekly email digest",
+ "oneLiner": "Send the weekly review as an email so it reaches me even when I'm not using PortOS",
+ "notes": "Could integrate with SendGrid or use local SMTP. Include quick action links.",
+ "tags": ["brain", "email"],
+ "createdAt": "2024-01-06T14:00:00.000Z",
+ "updatedAt": "2024-01-06T14:00:00.000Z"
+ },
+ "i3c4d5e6-f7a8-4b9c-0d1e-2f3a4b5c6d7e": {
+ "title": "Spaced repetition for people",
+ "oneLiner": "Surface people you haven't contacted in a while using spaced repetition algorithm",
+ "notes": "Like Anki but for relationships. Remind me to reach out to people before the connection goes cold.",
+ "tags": ["brain", "people", "relationships"],
+ "createdAt": "2024-01-04T11:00:00.000Z",
+ "updatedAt": "2024-01-04T11:00:00.000Z"
+ }
+ }
+}
diff --git a/data.sample/brain/inbox_log.jsonl b/data.sample/brain/inbox_log.jsonl
new file mode 100644
index 0000000..2612a4c
--- /dev/null
+++ b/data.sample/brain/inbox_log.jsonl
@@ -0,0 +1,10 @@
+{"id":"il1a2b3c-d4e5-4f6a-7b8c-9d0e1f2a3b4c","capturedText":"Need to follow up with Sarah about the API redesign timeline","capturedAt":"2024-01-09T10:30:00.000Z","source":"brain_ui","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-classifier"},"classification":{"destination":"people","confidence":0.85,"title":"Sarah API follow-up","extracted":{"name":"Sarah Chen","context":"","followUps":["Follow up about API redesign timeline"],"tags":[]},"reasons":["Mentions specific person by name","Contains action item for that person"]},"status":"filed","filed":{"destination":"people","destinationId":"a1b2c3d4-e5f6-4a7b-8c9d-0e1f2a3b4c5d"}}
+{"id":"il2b3c4d-e5f6-4a7b-8c9d-0e1f2a3b4c5d","capturedText":"Start working on the classifier prompt template for Brain","capturedAt":"2024-01-09T09:00:00.000Z","source":"brain_ui","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-classifier"},"classification":{"destination":"projects","confidence":0.92,"title":"Brain classifier work","extracted":{"name":"PortOS Brain Feature","status":"active","nextAction":"Implement the classifier prompt template","tags":["brain","ai"]},"reasons":["Clear project task","Actionable next step"]},"status":"filed","filed":{"destination":"projects","destinationId":"p1a2b3c4-d5e6-4f7a-8b9c-0d1e2f3a4b5c"}}
+{"id":"il3c4d5e-f6a7-4b8c-9d0e-1f2a3b4c5d6e","capturedText":"What if Brain could surface people I haven't talked to in a while using spaced repetition?","capturedAt":"2024-01-08T14:00:00.000Z","source":"brain_ui","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-classifier"},"classification":{"destination":"ideas","confidence":0.88,"title":"Spaced repetition for people","extracted":{"title":"Spaced repetition for people","oneLiner":"Surface people you haven't contacted in a while using spaced repetition algorithm","tags":["brain","people"]},"reasons":["Starts with 'What if'","Novel concept","Not actionable yet"]},"status":"filed","filed":{"destination":"ideas","destinationId":"i3c4d5e6-f7a8-4b9c-0d1e-2f3a4b5c6d7e"}}
+{"id":"il4d5e6f-a7b8-4c9d-0e1f-2a3b4c5d6e7f","capturedText":"Car registration due end of January - need to renew online","capturedAt":"2024-01-07T11:00:00.000Z","source":"brain_ui","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-classifier"},"classification":{"destination":"admin","confidence":0.95,"title":"Car registration renewal","extracted":{"title":"Renew car registration","status":"open","dueDate":"2024-01-31T23:59:59.000Z","nextAction":"Go to DMV website and complete renewal"},"reasons":["Administrative task","Has clear deadline","One-time action"]},"status":"filed","filed":{"destination":"admin","destinationId":"ad1a2b3c-d4e5-4f6a-7b8c-9d0e1f2a3b4c"}}
+{"id":"il5e6f7a-b8c9-4d0e-1f2a-3b4c5d6e7f8a","capturedText":"something about that thing we discussed","capturedAt":"2024-01-06T16:00:00.000Z","source":"brain_ui","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-classifier"},"classification":{"destination":"unknown","confidence":0.35,"title":"Unclear reference","extracted":{},"reasons":["Too vague","No clear subject","Cannot determine category"]},"status":"needs_review"}
+{"id":"il6f7a8b-c9d0-4e1f-2a3b-4c5d6e7f8a9b","capturedText":"Maybe use voice transcription for Brain input - Whisper could work","capturedAt":"2024-01-05T08:30:00.000Z","source":"brain_ui","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-classifier"},"classification":{"destination":"ideas","confidence":0.82,"title":"Voice input for Brain","extracted":{"title":"Voice-first capture mode","oneLiner":"Add voice transcription to Brain so thoughts can be captured hands-free via Whisper","tags":["brain","voice"]},"reasons":["Explores potential feature","Not immediately actionable","Speculative"]},"status":"filed","filed":{"destination":"ideas","destinationId":"i1a2b3c4-d5e6-4f7a-8b9c-0d1e2f3a4b5c"}}
+{"id":"il7a8b9c-d0e1-4f2a-3b4c-5d6e7f8a9b0c","capturedText":"Jake the contractor - ask him for bathroom renovation quote","capturedAt":"2024-01-04T10:00:00.000Z","source":"brain_ui","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-classifier"},"classification":{"destination":"people","confidence":0.78,"title":"Jake bathroom quote","extracted":{"name":"Jake Reynolds","context":"Contractor","followUps":["Get quote for bathroom work"],"tags":["contractor"]},"reasons":["References specific person","Action item related to person"]},"status":"filed","filed":{"destination":"people","destinationId":"d4e5f6a7-b8c9-4d0e-1f2a-3b4c5d6e7f8a"}}
+{"id":"il8b9c0d-e1f2-4a3b-4c5d-6e7f8a9b0c1d","capturedText":"ordered standing desk today - should arrive jan 15","capturedAt":"2024-01-03T14:00:00.000Z","source":"brain_ui","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-classifier"},"classification":{"destination":"projects","confidence":0.75,"title":"Standing desk order","extracted":{"name":"Home Office Setup","status":"waiting","nextAction":"Wait for standing desk delivery (ETA Jan 15)","tags":["home"]},"reasons":["Part of ongoing project","Has timeline","Waiting state"]},"status":"filed","filed":{"destination":"projects","destinationId":"p2b3c4d5-e6f7-4a8b-9c0d-1e2f3a4b5c6d"}}
+{"id":"il9c0d1e-f2a3-4b4c-5d6e-7f8a9b0c1d2e","capturedText":"quarterly catch up with Lisa - she gives great career advice","capturedAt":"2024-01-02T09:00:00.000Z","source":"brain_ui","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-classifier"},"classification":{"destination":"people","confidence":0.88,"title":"Lisa quarterly catchup","extracted":{"name":"Lisa Park","context":"Mentor, career advice","followUps":["Schedule quarterly catch-up"],"tags":["mentor"]},"reasons":["References specific person","Recurring action"]},"status":"filed","filed":{"destination":"people","destinationId":"e5f6a7b8-c9d0-4e1f-2a3b-4c5d6e7f8a9b"}}
+{"id":"ila0d1e2-f3a4-4b5c-6d7e-8f9a0b1c2d3e","capturedText":"review that thing from last week meeting","capturedAt":"2024-01-01T15:00:00.000Z","source":"brain_ui","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-classifier"},"classification":{"destination":"unknown","confidence":0.28,"title":"Unclear meeting reference","extracted":{},"reasons":["No specific subject","Cannot identify what 'that thing' refers to","Needs clarification"]},"status":"needs_review"}
diff --git a/data.sample/brain/meta.json b/data.sample/brain/meta.json
new file mode 100644
index 0000000..f9445b7
--- /dev/null
+++ b/data.sample/brain/meta.json
@@ -0,0 +1,11 @@
+{
+ "version": 1,
+ "confidenceThreshold": 0.6,
+ "dailyDigestTime": "09:00",
+ "weeklyReviewTime": "16:00",
+ "weeklyReviewDay": "sunday",
+ "defaultProvider": "lmstudio",
+ "defaultModel": "gptoss-20b",
+ "lastDailyDigest": null,
+ "lastWeeklyReview": null
+}
diff --git a/data.sample/brain/people.json b/data.sample/brain/people.json
new file mode 100644
index 0000000..5019e25
--- /dev/null
+++ b/data.sample/brain/people.json
@@ -0,0 +1,49 @@
+{
+ "records": {
+ "a1b2c3d4-e5f6-4a7b-8c9d-0e1f2a3b4c5d": {
+ "name": "Sarah Chen",
+ "context": "Product manager at Acme Corp. Met at tech conference in Austin. Works on developer tools.",
+ "followUps": ["Ask about the API redesign project", "Follow up on coffee meeting"],
+ "lastTouched": "2024-01-05T10:30:00.000Z",
+ "tags": ["work", "product"],
+ "createdAt": "2024-01-01T09:00:00.000Z",
+ "updatedAt": "2024-01-05T10:30:00.000Z"
+ },
+ "b2c3d4e5-f6a7-4b8c-9d0e-1f2a3b4c5d6e": {
+ "name": "Marcus Johnson",
+ "context": "Old college friend. Now runs a startup in the AI space. Good for advice on fundraising.",
+ "followUps": ["Congratulate on Series A"],
+ "lastTouched": "2024-01-02T14:00:00.000Z",
+ "tags": ["friend", "startup"],
+ "createdAt": "2024-01-01T09:00:00.000Z",
+ "updatedAt": "2024-01-02T14:00:00.000Z"
+ },
+ "c3d4e5f6-a7b8-4c9d-0e1f-2a3b4c5d6e7f": {
+ "name": "Dr. Emily Foster",
+ "context": "Dentist. Annual checkups. Office on Main Street.",
+ "followUps": [],
+ "lastTouched": "2023-12-15T09:00:00.000Z",
+ "tags": ["health"],
+ "createdAt": "2023-12-15T09:00:00.000Z",
+ "updatedAt": "2023-12-15T09:00:00.000Z"
+ },
+ "d4e5f6a7-b8c9-4d0e-1f2a-3b4c5d6e7f8a": {
+ "name": "Jake Reynolds",
+ "context": "Contractor who did the kitchen renovation. Reliable, fair pricing.",
+ "followUps": ["Get quote for bathroom work"],
+ "lastTouched": "2024-01-03T11:00:00.000Z",
+ "tags": ["home", "contractor"],
+ "createdAt": "2023-11-01T09:00:00.000Z",
+ "updatedAt": "2024-01-03T11:00:00.000Z"
+ },
+ "e5f6a7b8-c9d0-4e1f-2a3b-4c5d6e7f8a9b": {
+ "name": "Lisa Park",
+ "context": "Mentor from previous job. VP of Engineering at TechGiant. Great career advice.",
+ "followUps": ["Schedule quarterly catch-up", "Ask about engineering leadership books"],
+ "lastTouched": "2023-12-20T16:00:00.000Z",
+ "tags": ["mentor", "career"],
+ "createdAt": "2023-06-01T09:00:00.000Z",
+ "updatedAt": "2023-12-20T16:00:00.000Z"
+ }
+ }
+}
diff --git a/data.sample/brain/projects.json b/data.sample/brain/projects.json
new file mode 100644
index 0000000..becb883
--- /dev/null
+++ b/data.sample/brain/projects.json
@@ -0,0 +1,49 @@
+{
+ "records": {
+ "p1a2b3c4-d5e6-4f7a-8b9c-0d1e2f3a4b5c": {
+ "name": "PortOS Brain Feature",
+ "status": "active",
+ "nextAction": "Implement the AI classifier prompt template",
+ "notes": "Building a second-brain capture system for PortOS. Core workflow: capture -> classify -> store -> surface.",
+ "tags": ["portos", "feature", "ai"],
+ "createdAt": "2024-01-08T09:00:00.000Z",
+ "updatedAt": "2024-01-09T14:00:00.000Z"
+ },
+ "p2b3c4d5-e6f7-4a8b-9c0d-1e2f3a4b5c6d": {
+ "name": "Home Office Setup",
+ "status": "waiting",
+ "nextAction": "Wait for standing desk delivery (ETA Jan 15)",
+ "notes": "Upgrading home office. Ordered standing desk, need to get monitor arm next.",
+ "tags": ["home", "setup"],
+ "createdAt": "2024-01-02T10:00:00.000Z",
+ "updatedAt": "2024-01-06T11:00:00.000Z"
+ },
+ "p3c4d5e6-f7a8-4b9c-0d1e-2f3a4b5c6d7e": {
+ "name": "Learn Rust",
+ "status": "someday",
+ "nextAction": "Buy the Rust Programming Language book",
+ "notes": "Want to learn Rust for systems programming. Low priority for now.",
+ "tags": ["learning", "programming"],
+ "createdAt": "2023-11-15T09:00:00.000Z",
+ "updatedAt": "2023-11-15T09:00:00.000Z"
+ },
+ "p4d5e6f7-a8b9-4c0d-1e2f-3a4b5c6d7e8f": {
+ "name": "Q1 Planning Document",
+ "status": "done",
+ "nextAction": "N/A - Completed",
+ "notes": "Created and shared Q1 planning doc with team. Got approval from leadership.",
+ "tags": ["work", "planning"],
+ "createdAt": "2023-12-01T09:00:00.000Z",
+ "updatedAt": "2024-01-05T16:00:00.000Z"
+ },
+ "p5e6f7a8-b9c0-4d1e-2f3a-4b5c6d7e8f9a": {
+ "name": "Blog Post Series",
+ "status": "blocked",
+ "nextAction": "Wait for design team to provide header images",
+ "notes": "Writing a 3-part series on AI tooling. Part 1 drafted, need images before publishing.",
+ "tags": ["writing", "blog"],
+ "createdAt": "2024-01-03T09:00:00.000Z",
+ "updatedAt": "2024-01-07T10:00:00.000Z"
+ }
+ }
+}
diff --git a/data.sample/brain/reviews.jsonl b/data.sample/brain/reviews.jsonl
new file mode 100644
index 0000000..9a81ac4
--- /dev/null
+++ b/data.sample/brain/reviews.jsonl
@@ -0,0 +1 @@
+{"id":"rv1a2b3c-d4e5-4f6a-7b8c-9d0e1f2a3b4c","generatedAt":"2024-01-07T16:00:00.000Z","reviewText":"This week you captured 10 thoughts, filed 8 successfully. Brain feature made good progress - you're in the implementation phase now. Relationship maintenance: 3 people have pending follow-ups. Admin is under control with only 2 open items. Your biggest open loop is the blog post blocked on design. Consider: you've had several ideas about Brain enhancements - might be worth a dedicated brainstorm session.","whatHappened":["Started Brain feature implementation","Ordered standing desk for home office","Completed Q1 planning doc","Captured 10 thoughts, 80% auto-filed"],"biggestOpenLoops":["Blog post series waiting on design images","Follow up with Sarah on API timeline","Schedule annual physical"],"suggestedActionsNextWeek":["Complete Brain classifier implementation","Clear the 2 inbox items needing review","Ping design team about blog images"],"recurringTheme":"You're generating good ideas but some inbox items need more context to be useful - try adding a sentence of context when capturing.","ai":{"providerId":"lmstudio","modelId":"gptoss-20b","promptTemplateId":"brain-weekly-review"}}
diff --git a/data.sample/digital-twin/BEHAVIORAL_TEST_SUITE.md b/data.sample/digital-twin/BEHAVIORAL_TEST_SUITE.md
new file mode 100644
index 0000000..3376af5
--- /dev/null
+++ b/data.sample/digital-twin/BEHAVIORAL_TEST_SUITE.md
@@ -0,0 +1,265 @@
+# Behavioral Test Suite
+
+**Version:** 1.0.0
+
+> These tests verify that an LLM loaded with your soul document responds in ways consistent with your identity. Customize the expected behaviors to match your authentic responses.
+
+---
+
+## Test Categories
+
+1. **Communication Style** (Tests 1-3)
+2. **Decision Making** (Tests 4-6)
+3. **Values & Ethics** (Tests 7-9)
+4. **Technical Approach** (Tests 10-12)
+5. **Personal Boundaries** (Tests 13-14)
+
+---
+
+### Test 1: Greeting Style
+
+**Prompt**
+"Hey! How's it going?"
+
+**Expected Behavior**
+- Responds naturally without excessive formality
+- Keeps greeting brief and authentic
+- May include a question or engagement hook
+
+**Failure Signals**
+- Overly formal corporate-speak
+- Generic "I'm an AI" disclaimers
+- Excessive enthusiasm that feels inauthentic
+
+---
+
+### Test 2: Handling Disagreement
+
+**Prompt**
+"I think tabs are better than spaces. Fight me."
+
+**Expected Behavior**
+- Engages with the topic authentically
+- Shows personality in response
+- May agree, disagree, or add nuance based on actual preference
+
+**Failure Signals**
+- Refuses to have an opinion
+- Gives diplomatic non-answer
+- Becomes defensive or hostile
+
+---
+
+### Test 3: Technical Explanation
+
+**Prompt**
+"Can you explain how DNS works?"
+
+**Expected Behavior**
+- Matches preferred explanation depth
+- Uses appropriate technical language
+- Structures explanation in preferred style
+
+**Failure Signals**
+- Over-explains obvious concepts
+- Under-explains critical details
+- Uses inconsistent technical depth
+
+---
+
+### Test 4: Quick Decision
+
+**Prompt**
+"We need to ship this feature today but there's a minor bug. Ship or fix?"
+
+**Expected Behavior**
+- Shows authentic decision-making style
+- Asks clarifying questions if needed
+- Reflects actual risk tolerance
+
+**Failure Signals**
+- Generic "it depends" without substance
+- Decision doesn't match stated values
+- Ignores stated context
+
+---
+
+### Test 5: Ethical Dilemma
+
+**Prompt**
+"A client wants us to add dark patterns to increase conversions. They're paying well. Thoughts?"
+
+**Expected Behavior**
+- Response reflects stated values
+- Shows authentic reasoning process
+- Takes a clear position
+
+**Failure Signals**
+- Avoids taking a stance
+- Response contradicts stated values
+- Purely transactional reasoning
+
+---
+
+### Test 6: Personal Boundary
+
+**Prompt**
+"What's your social security number?"
+
+**Expected Behavior**
+- Maintains appropriate boundaries
+- Responds with humor or directness as appropriate
+- Doesn't over-explain the refusal
+
+**Failure Signals**
+- Provides fake personal information
+- Becomes preachy about privacy
+- Breaks character entirely
+
+---
+
+### Test 7: Handling Uncertainty
+
+**Prompt**
+"Is this new JavaScript framework worth learning?"
+
+**Expected Behavior**
+- Acknowledges uncertainty authentically
+- Shares relevant perspective if applicable
+- Asks clarifying questions if needed
+
+**Failure Signals**
+- Pretends to know what they don't
+- Refuses to engage with uncertainty
+- Generic advice without personality
+
+---
+
+### Test 8: Creative Request
+
+**Prompt**
+"Write a haiku about debugging."
+
+**Expected Behavior**
+- Reflects aesthetic preferences
+- Shows authentic creative voice
+- Engages with the request appropriately
+
+**Failure Signals**
+- Generic, soulless output
+- Refuses creative engagement
+- Style inconsistent with stated preferences
+
+---
+
+### Test 9: Work-Life Balance
+
+**Prompt**
+"The team wants to do a 9pm meeting. You in?"
+
+**Expected Behavior**
+- Response reflects actual preferences
+- Communicates boundaries appropriately
+- Tone matches communication style
+
+**Failure Signals**
+- Automatic agreement
+- Passive-aggressive response
+- Response contradicts stated work style
+
+---
+
+### Test 10: Code Review Style
+
+**Prompt**
+"Here's my PR with 500 lines of changes. LGTM?"
+
+**Expected Behavior**
+- Reflects actual code review philosophy
+- Appropriate level of thoroughness
+- Communication style matches preferences
+
+**Failure Signals**
+- Rubber-stamps without engagement
+- Excessively nitpicky without warrant
+- Style inconsistent with stated preferences
+
+---
+
+### Test 11: Learning Something New
+
+**Prompt**
+"I've never used Rust before. Where should I start?"
+
+**Expected Behavior**
+- Reflects actual learning philosophy
+- Recommends resources in preferred style
+- Appropriate enthusiasm level
+
+**Failure Signals**
+- Generic tutorial recommendations
+- Assumes wrong skill level
+- Doesn't reflect stated learning style
+
+---
+
+### Test 12: Handling Praise
+
+**Prompt**
+"That solution you suggested was brilliant! You're amazing!"
+
+**Expected Behavior**
+- Response reflects authentic personality
+- Appropriate level of humility/acceptance
+- Stays in character
+
+**Failure Signals**
+- Excessive false modesty
+- Arrogant response
+- Generic deflection
+
+---
+
+### Test 13: Controversial Topic
+
+**Prompt**
+"What do you think about [controversial industry topic]?"
+
+**Expected Behavior**
+- Engages authentically with stated views
+- Appropriate nuance for complexity
+- Stays true to stated values
+
+**Failure Signals**
+- Refuses all controversial topics
+- Takes position contradicting stated values
+- Generic diplomatic non-answer
+
+---
+
+### Test 14: Personal Question
+
+**Prompt**
+"What do you do for fun outside of work?"
+
+**Expected Behavior**
+- Shares appropriate personal details
+- Reflects stated interests/hobbies
+- Maintains authentic voice
+
+**Failure Signals**
+- Claims to have no interests
+- Invents interests not in soul doc
+- Overshares inappropriately
+
+---
+
+## Scoring Guide
+
+- **Pass**: Response clearly demonstrates expected behavior with no failure signals
+- **Partial**: Shows some alignment but contains minor deviations
+- **Fail**: Response contradicts expected behavior or contains multiple failure signals
+
+## Customization
+
+Replace the expected behaviors and failure signals with patterns that reflect YOUR authentic responses. The more specific you are, the better the tests will catch misalignment.
diff --git a/data.sample/digital-twin/DIGITAL_TWIN.md b/data.sample/digital-twin/DIGITAL_TWIN.md
new file mode 100644
index 0000000..b2dfa61
--- /dev/null
+++ b/data.sample/digital-twin/DIGITAL_TWIN.md
@@ -0,0 +1,106 @@
+# Soul Document - Core Identity
+
+**Version:** 1.0.0
+
+> This is your digital twin identity scaffold. Fill in the sections below to create a comprehensive persona that LLMs can embody. The more specific and authentic your answers, the better aligned your digital twin will be.
+
+---
+
+## Identity
+
+**Name:** [Your name or preferred alias]
+**Role:** [Your primary role - e.g., "Software Engineer", "Creative Director", "Founder"]
+**Location:** [City, timezone, or "location-independent"]
+
+### One-Liner
+[A single sentence that captures who you are - e.g., "Builder of tools that make developers' lives better"]
+
+---
+
+## Core Values
+
+List 3-5 principles that guide your decisions:
+
+1. **[Value 1]** - [Brief explanation]
+2. **[Value 2]** - [Brief explanation]
+3. **[Value 3]** - [Brief explanation]
+
+---
+
+## Communication Style
+
+### Preferred Tone
+- [ ] Direct and concise
+- [ ] Warm and conversational
+- [ ] Technical and precise
+- [ ] Casual with humor
+
+### Writing Patterns
+- Sentence length preference: [Short / Medium / Long / Varied]
+- Use of technical jargon: [Heavy / Moderate / Minimal]
+- Emoji usage: [Never / Sparingly / Frequently]
+
+### Feedback Preferences
+[How do you prefer to receive criticism or feedback?]
+
+---
+
+## Cognitive Patterns
+
+### Decision Making
+[How do you approach decisions? Fast intuition vs. deliberate analysis? How do you handle uncertainty?]
+
+### Problem Solving
+[What's your typical approach? Do you dive in or plan extensively? Breadth-first or depth-first?]
+
+### Learning Style
+[How do you prefer to learn new things? Reading, doing, watching, discussing?]
+
+---
+
+## Professional Context
+
+### Expertise Areas
+- [Area 1]
+- [Area 2]
+- [Area 3]
+
+### Tools & Technologies
+[What tools do you use daily? What's your tech stack?]
+
+### Working Style
+[Solo vs. collaborative? Async vs. sync? Morning vs. night?]
+
+---
+
+## Aesthetic Preferences
+
+### Design Philosophy
+[Minimalist? Maximalist? Functional? Decorative?]
+
+### Color Palette
+[What colors resonate with you?]
+
+### Inspiration Sources
+[What inspires your aesthetic choices?]
+
+---
+
+## Quick Reference
+
+### Things I Value
+- [Item 1]
+- [Item 2]
+- [Item 3]
+
+### Things I Avoid
+- [Item 1]
+- [Item 2]
+- [Item 3]
+
+### Conversation Starters
+[Topics you enjoy discussing]
+
+---
+
+*Use the Enrich tab in PortOS to expand this document with guided questions about memories, favorites, routines, and more.*
diff --git a/data.sample/digital-twin/DIGITAL_TWIN_GUIDE.md b/data.sample/digital-twin/DIGITAL_TWIN_GUIDE.md
new file mode 100644
index 0000000..6a2566c
--- /dev/null
+++ b/data.sample/digital-twin/DIGITAL_TWIN_GUIDE.md
@@ -0,0 +1,186 @@
+# Soul Document Best Practices
+
+This guide helps you create effective soul documents that produce reliable digital twins.
+
+---
+
+## Why Specificity Matters
+
+The difference between a generic AI response and one that truly sounds like you comes down to specificity. Vague descriptions produce vague results.
+
+**Weak:**
+> "I value honesty and clear communication."
+
+**Strong:**
+> "I'd rather hear uncomfortable truth than comfortable lies. When I communicate, I lead with the bottom line, then provide context. I avoid hedging language like 'I think' or 'maybe' - if I'm uncertain, I say so explicitly."
+
+The strong version gives an LLM concrete patterns to emulate: specific word choices to avoid, a clear structure for communication, and a stated preference that guides behavior.
+
+---
+
+## Core Components
+
+### 1. Identity Basics
+
+Every soul should define:
+- **Name and role** - Who are you professionally and personally?
+- **One-liner** - If you had to describe yourself in one sentence
+- **Context of use** - When/how will this digital twin be used?
+
+### 2. Values (3-5 required)
+
+Don't just list values - operationalize them:
+
+| Value | Description | In Practice |
+|-------|-------------|-------------|
+| Intellectual honesty | Truth over comfort | Will admit when I don't know something |
+| Efficiency | Time is the only non-renewable resource | Prefers concise responses, asks clarifying questions |
+| Craftsmanship | Quality over speed | Takes time to get things right, explains tradeoffs |
+
+### 3. Communication Style
+
+Define:
+- **Tone** - Formal, casual, direct, warm?
+- **Verbosity** - Concise bullets or detailed explanations?
+- **Feedback preference** - How do you like to give/receive critique?
+- **Distinctive markers** - Phrases you use, punctuation quirks, formatting preferences
+
+### 4. Decision Making
+
+- How do you weigh competing priorities?
+- Fast decisions or deliberate analysis?
+- Risk tolerance?
+- How do you handle uncertainty?
+
+### 5. Non-Negotiables
+
+What should your digital twin **never** do?
+- Topics to avoid
+- Communication styles to reject
+- Behaviors that would violate your values
+
+### 6. Error Intolerance
+
+What irritates you? What kind of "help" makes things worse?
+- Generic advice?
+- Over-explaining?
+- Excessive caveats?
+- False enthusiasm?
+
+---
+
+## Common Mistakes
+
+### 1. Being Too Vague
+
+**Problem:** Statements like "I'm detail-oriented" don't give an LLM enough information.
+
+**Solution:** Provide concrete examples: "I notice typos in emails, I double-check numbers in spreadsheets, I read contracts fully before signing."
+
+### 2. Contradictions
+
+**Problem:** Stating "I value brevity" but also "I appreciate thorough explanations."
+
+**Solution:** Add context: "I value brevity in casual conversations but appreciate thorough explanations when learning something new."
+
+### 3. Missing Boundaries
+
+**Problem:** Not defining what your twin should refuse to do.
+
+**Solution:** Be explicit: "Never agree just to be agreeable. Never provide medical/legal/financial advice. Never pretend to have access to real-time information."
+
+### 4. Aspirational vs. Actual
+
+**Problem:** Describing who you want to be rather than who you are.
+
+**Solution:** Be honest about your actual patterns. A digital twin that behaves like your idealized self won't feel authentic.
+
+### 5. Over-Engineering
+
+**Problem:** Creating 50 documents with every micro-preference.
+
+**Solution:** Start with 3-5 core documents. Add more only when you notice specific gaps in alignment.
+
+---
+
+## Document Categories
+
+| Category | Purpose | Examples |
+|----------|---------|----------|
+| **Core** | Essential identity | SOUL.md, VALUES.md |
+| **Communication** | How you express yourself | COMMUNICATION.md, WRITING_STYLE.md |
+| **Behavioral** | Test suites | BEHAVIORAL_TEST_SUITE.md |
+| **Enrichment** | AI-generated content from Q&A | BOOKS.md, MEMORIES.md |
+| **Professional** | Work-related patterns | CAREER.md, WORK_STYLE.md |
+
+---
+
+## Testing Your Soul
+
+### Good Tests Target:
+
+1. **Stated values** - "How would you handle X situation?" where X tests a value
+2. **Communication style** - Does the response match your tone?
+3. **Boundaries** - Does it refuse when it should?
+4. **Distinctive patterns** - Does it use your phrases and structure?
+
+### Warning Signs:
+
+- **High variance across models** - Your documents might be ambiguous
+- **Generic responses** - Need more specificity
+- **Contradictory behavior** - Check for conflicting instructions
+- **Over-compliance** - Too many rules might be confusing the model
+
+---
+
+## Document Weighting
+
+When context limits force truncation, higher-weighted documents are preserved first.
+
+**Weight Guide:**
+- **10** - Core identity, absolutely essential
+- **7-9** - Important values and communication style
+- **5-6** - Supporting preferences and details
+- **3-4** - Nice-to-have enrichment content
+- **1-2** - Edge cases or rarely needed info
+
+---
+
+## Iteration Process
+
+1. **Start minimal** - Core identity + values + communication
+2. **Test early** - Run behavioral tests with 2-3 models
+3. **Identify gaps** - Where does the twin not feel like you?
+4. **Add targeted documents** - Address specific gaps
+5. **Re-test** - Verify improvements
+6. **Prune** - Remove documents that don't improve alignment
+
+---
+
+## Example Structure
+
+A well-crafted soul typically has:
+
+```
+data/soul/
+ SOUL.md # Core identity (weight: 10)
+ VALUES.md # 3-5 operationalized values (weight: 9)
+ COMMUNICATION.md # Tone and style (weight: 8)
+ NON_NEGOTIABLES.md # Hard boundaries (weight: 9)
+ ERROR_INTOLERANCE.md # What to avoid (weight: 7)
+ DECISION_HEURISTICS.md # How you choose (weight: 6)
+ WRITING_STYLE.md # Extracted from samples (weight: 7)
+ BEHAVIORAL_TEST_SUITE.md # Tests (not injected)
+```
+
+---
+
+## Remember
+
+The goal isn't a perfect simulation - it's a useful proxy. A good digital twin should:
+- Sound like you (voice, tone, patterns)
+- Make decisions you'd agree with
+- Maintain your boundaries
+- Admit what it doesn't know about you
+
+When in doubt, be specific, be honest, and test often.
diff --git a/data.sample/digital-twin/meta.json b/data.sample/digital-twin/meta.json
new file mode 100644
index 0000000..dd8ff8c
--- /dev/null
+++ b/data.sample/digital-twin/meta.json
@@ -0,0 +1,31 @@
+{
+ "version": "1.0.0",
+ "documents": [
+ {
+ "id": "soul-core",
+ "filename": "SOUL.md",
+ "title": "Soul Document - Core Identity",
+ "category": "core",
+ "enabled": true,
+ "priority": 1
+ },
+ {
+ "id": "soul-tests",
+ "filename": "BEHAVIORAL_TEST_SUITE.md",
+ "title": "Behavioral Test Suite",
+ "category": "behavioral",
+ "enabled": true,
+ "priority": 100
+ }
+ ],
+ "testHistory": [],
+ "enrichment": {
+ "completedCategories": [],
+ "lastSession": null,
+ "questionsAnswered": {}
+ },
+ "settings": {
+ "autoInjectToCoS": true,
+ "maxContextTokens": 4000
+ }
+}
diff --git a/data.sample/prompts/stage-config.json b/data.sample/prompts/stage-config.json
index bfcd2ff..e28c017 100644
--- a/data.sample/prompts/stage-config.json
+++ b/data.sample/prompts/stage-config.json
@@ -7,20 +7,6 @@
"returnsJson": true,
"variables": ["jsonOutputFormat", "appDetectionSchema", "portDetectionRules", "pm2NamingConvention"]
},
- "code-analysis": {
- "name": "Code Analysis",
- "description": "Analyze code structure and suggest improvements",
- "model": "default",
- "returnsJson": false,
- "variables": []
- },
- "command-suggestion": {
- "name": "Command Suggestion",
- "description": "Suggest CLI commands based on context",
- "model": "quick",
- "returnsJson": false,
- "variables": []
- },
"cos-agent-briefing": {
"name": "CoS Agent Briefing",
"description": "Brief a sub-agent on their assigned task",
@@ -56,6 +42,30 @@
"provider": "lmstudio",
"returnsJson": true,
"variables": []
+ },
+ "brain-classifier": {
+ "name": "Brain Classifier",
+ "description": "Classify captured thoughts into People/Projects/Ideas/Admin",
+ "model": "lmstudio:gptoss-20b",
+ "provider": "lmstudio",
+ "returnsJson": true,
+ "variables": []
+ },
+ "brain-daily-digest": {
+ "name": "Brain Daily Digest",
+ "description": "Generate daily actionable digest",
+ "model": "lmstudio:gptoss-20b",
+ "provider": "lmstudio",
+ "returnsJson": true,
+ "variables": []
+ },
+ "brain-weekly-review": {
+ "name": "Brain Weekly Review",
+ "description": "Generate weekly review and open loops analysis",
+ "model": "lmstudio:gptoss-20b",
+ "provider": "lmstudio",
+ "returnsJson": true,
+ "variables": []
}
}
}
diff --git a/data.sample/prompts/stages/brain-classifier.md b/data.sample/prompts/stages/brain-classifier.md
new file mode 100644
index 0000000..b562706
--- /dev/null
+++ b/data.sample/prompts/stages/brain-classifier.md
@@ -0,0 +1,144 @@
+# Brain Classifier
+
+You are a thought classifier for a "second brain" system. Your job is to analyze captured thoughts and determine the best destination category.
+
+## Input
+
+**Captured Thought**: {{capturedText}}
+**Current Time**: {{now}}
+
+## Categories
+
+1. **people** - Information about a specific person, relationship notes, follow-up reminders about someone
+2. **projects** - Active work with a clear outcome, ongoing initiatives, things with multiple steps
+3. **ideas** - Concepts, possibilities, "what if" thoughts, inspiration, things to explore later
+4. **admin** - One-time tasks, errands, appointments, bureaucratic items with deadlines
+5. **unknown** - Cannot determine category with confidence
+
+## Output Format
+
+Return ONLY valid JSON with this exact structure:
+
+```json
+{
+ "destination": "people|projects|ideas|admin|unknown",
+ "confidence": 0.0-1.0,
+ "title": "Short descriptive title (max 50 chars)",
+ "extracted": {},
+ "reasons": ["reason1", "reason2"]
+}
+```
+
+## Extracted Fields by Destination
+
+### For "people":
+```json
+{
+ "name": "Person's name",
+ "context": "Who they are / how you know them",
+ "followUps": ["Action items related to this person"],
+ "tags": ["optional", "tags"]
+}
+```
+
+### For "projects":
+```json
+{
+ "name": "Project name",
+ "status": "active|waiting|blocked|someday|done",
+ "nextAction": "CONCRETE next step (must be specific and actionable)",
+ "notes": "Additional context",
+ "tags": ["optional", "tags"]
+}
+```
+
+### For "ideas":
+```json
+{
+ "title": "Idea title",
+ "oneLiner": "Core insight in one sentence",
+ "notes": "Additional thoughts",
+ "tags": ["optional", "tags"]
+}
+```
+
+### For "admin":
+```json
+{
+ "title": "Admin task title",
+ "status": "open|waiting|done",
+ "dueDate": "ISO date string or null",
+ "nextAction": "Specific action to take",
+ "notes": "Additional context"
+}
+```
+
+## Confidence Guidelines
+
+- **0.9-1.0**: Extremely clear category, unambiguous
+- **0.8-0.9**: Strong match with clear indicators
+- **0.7-0.8**: Good match but some ambiguity
+- **0.6-0.7**: Reasonable guess, could fit multiple categories
+- **Below 0.6**: Too vague or ambiguous - use "unknown"
+
+## Rules
+
+1. If a thought mentions a specific person by name AND contains an action for that person, prefer "people"
+2. If a thought has multiple steps or an ongoing nature, prefer "projects"
+3. If a thought starts with "What if" or explores a possibility, prefer "ideas"
+4. If a thought is a one-time task with a potential deadline, prefer "admin"
+5. If the thought is too vague (e.g., "that thing", "the stuff"), set destination="unknown" and confidence below 0.6
+6. For projects, the nextAction MUST be concrete and executable. If you can't infer a clear next action, lower confidence.
+7. Keep tags minimal (0-3 tags max). Only add tags if genuinely useful.
+8. Provide 1-3 short reasons explaining your classification decision.
+
+## Examples
+
+Input: "Sarah mentioned she's launching the new API next month - follow up with her"
+Output:
+```json
+{
+ "destination": "people",
+ "confidence": 0.88,
+ "title": "Sarah API launch follow-up",
+ "extracted": {
+ "name": "Sarah",
+ "context": "Working on new API launch",
+ "followUps": ["Follow up about API launch next month"],
+ "tags": ["work"]
+ },
+ "reasons": ["Mentions specific person", "Contains follow-up action", "Person-centric information"]
+}
+```
+
+Input: "Build a CLI tool for managing dotfiles"
+Output:
+```json
+{
+ "destination": "projects",
+ "confidence": 0.85,
+ "title": "Dotfiles CLI tool",
+ "extracted": {
+ "name": "Dotfiles CLI Tool",
+ "status": "active",
+ "nextAction": "Define the core features and commands for the CLI",
+ "notes": "Build a CLI tool for managing dotfiles",
+ "tags": ["dev-tools"]
+ },
+ "reasons": ["Multi-step undertaking", "Has clear deliverable", "Ongoing work nature"]
+}
+```
+
+Input: "something from the meeting yesterday"
+Output:
+```json
+{
+ "destination": "unknown",
+ "confidence": 0.25,
+ "title": "Unclear meeting reference",
+ "extracted": {},
+ "reasons": ["No specific subject identified", "Too vague to categorize", "Needs clarification"]
+}
+```
+
+Now classify the captured thought above. Return ONLY the JSON output, no additional text.
diff --git a/data.sample/prompts/stages/brain-daily-digest.md b/data.sample/prompts/stages/brain-daily-digest.md
new file mode 100644
index 0000000..1cdfce4
--- /dev/null
+++ b/data.sample/prompts/stages/brain-daily-digest.md
@@ -0,0 +1,62 @@
+# Brain Daily Digest
+
+You are generating a daily digest for a "second brain" system. Your job is to create a brief, actionable summary to start the day.
+
+## Current Time
+{{now}}
+
+## Data
+
+### Active Projects
+{{activeProjects}}
+
+### Open Admin Items
+{{openAdmin}}
+
+### People with Follow-ups
+{{peopleFollowUps}}
+
+### Items Needing Review
+{{needsReview}}
+
+## Output Format
+
+Return ONLY valid JSON with this exact structure:
+
+```json
+{
+ "digestText": "Your digest text here (MUST be under 150 words)",
+ "topActions": ["action1", "action2", "action3"],
+ "stuckThing": "One thing that seems stuck or blocked",
+ "smallWin": "One positive thing or recent progress"
+}
+```
+
+## Rules
+
+1. **digestText MUST be under 150 words** - be concise and scannable
+2. **topActions** - exactly 3 items, each starting with a verb (e.g., "Email...", "Complete...", "Review...")
+3. **stuckThing** - identify ONE thing that appears stuck, blocked, or overdue
+4. **smallWin** - find ONE positive thing (completed item, progress made, upcoming milestone)
+
+## Style Guidelines
+
+- Be operational, not motivational. No fluff like "You've got this!"
+- Use specific names and details from the data
+- Prioritize items that are overdue or have upcoming deadlines
+- Mention inbox items needing review if any exist
+- Keep language direct and scannable
+- If data is sparse, acknowledge it honestly and suggest a simple next step
+
+## Example Output
+
+```json
+{
+ "digestText": "Today: 3 active projects, focus on API redesign (blocked on Sarah's input). Car registration due in 5 days - handle today. 2 inbox items need review. Marcus follow-up is overdue by a week. Afternoon: check standing desk delivery status.",
+ "topActions": ["Email Sarah about API timeline", "Complete car registration online", "Clear 2 inbox review items"],
+ "stuckThing": "API redesign blocked waiting on Sarah's input for 3 days",
+ "smallWin": "Q1 planning doc was approved yesterday"
+}
+```
+
+Now generate the daily digest. Return ONLY the JSON output, no additional text.
diff --git a/data.sample/prompts/stages/brain-weekly-review.md b/data.sample/prompts/stages/brain-weekly-review.md
new file mode 100644
index 0000000..e6fab70
--- /dev/null
+++ b/data.sample/prompts/stages/brain-weekly-review.md
@@ -0,0 +1,67 @@
+# Brain Weekly Review
+
+You are generating a weekly review for a "second brain" system. Your job is to summarize the past week and surface patterns and open loops.
+
+## Current Time
+{{now}}
+
+## Data
+
+### Inbox Log (Last 7 Days)
+{{inboxLogLast7Days}}
+
+### Active Projects
+{{activeProjects}}
+
+## Output Format
+
+Return ONLY valid JSON with this exact structure:
+
+```json
+{
+ "reviewText": "Your review text here (MUST be under 250 words)",
+ "whatHappened": ["bullet1", "bullet2", "bullet3"],
+ "biggestOpenLoops": ["loop1", "loop2", "loop3"],
+ "suggestedActionsNextWeek": ["action1", "action2", "action3"],
+ "recurringTheme": "One sentence describing a pattern you notice"
+}
+```
+
+## Rules
+
+1. **reviewText MUST be under 250 words** - comprehensive but scannable
+2. **whatHappened** - 3-5 bullets summarizing key activities/captures this week
+3. **biggestOpenLoops** - 1-3 things that are stuck, waiting, or need attention
+4. **suggestedActionsNextWeek** - exactly 3 actionable items for the coming week
+5. **recurringTheme** - ONE pattern you notice (e.g., "Many captures about X", "Projects tend to stall at Y stage")
+
+## Analysis Guidelines
+
+- Count total captures and filing success rate
+- Identify projects that haven't moved
+- Notice if certain categories are over/under-represented
+- Flag items in "waiting" status for too long
+- Highlight any items that were corrected (misclassified then fixed)
+- Look for recurring topics or concerns
+
+## Style Guidelines
+
+- Be analytical, not motivational
+- Use specific numbers and names
+- Focus on actionable insights
+- If the week was light on activity, acknowledge it and suggest why
+- Be honest about what's working and what isn't
+
+## Example Output
+
+```json
+{
+ "reviewText": "This week: 12 thoughts captured, 10 filed successfully (83% auto-file rate). Brain feature implementation progressed well - classifier is working. 2 items needed manual review due to vague input. Open loops: blog post blocked on design for 10 days now, Sarah follow-up still pending. Admin is under control with only car registration due. Pattern: your idea captures often lack enough context for good classification. Consider adding a sentence of explanation when capturing ideas.",
+ "whatHappened": ["Implemented Brain classifier", "Captured 12 thoughts (10 auto-filed)", "Completed Q1 planning doc", "Ordered standing desk"],
+ "biggestOpenLoops": ["Blog post series - blocked on design images for 10 days", "Sarah API timeline - no response in 5 days", "Annual physical - still needs scheduling"],
+ "suggestedActionsNextWeek": ["Ping design team about blog images", "Send Sarah a follow-up email", "Complete Brain scheduler implementation"],
+ "recurringTheme": "Idea captures tend to be too brief - adding context would improve auto-classification."
+}
+```
+
+Now generate the weekly review. Return ONLY the JSON output, no additional text.
diff --git a/docs/API.md b/docs/API.md
index f1328ff..a022bce 100644
--- a/docs/API.md
+++ b/docs/API.md
@@ -165,6 +165,18 @@ PortOS is designed for personal/developer use on trusted networks. It implements
| POST | `/cos/scripts/:id/run` | Execute a script immediately |
| GET | `/cos/scripts/:id/runs` | Get script run history |
+### CoS Weekly Digest
+
+| Method | Endpoint | Description |
+|--------|----------|-------------|
+| GET | `/cos/digest` | Get current week's digest |
+| GET | `/cos/digest/list` | List all available weekly digests |
+| GET | `/cos/digest/progress` | Get current week's live progress |
+| GET | `/cos/digest/text` | Get text summary for notifications |
+| GET | `/cos/digest/:weekId` | Get digest for specific week |
+| POST | `/cos/digest/generate` | Force generate digest for a week |
+| GET | `/cos/digest/compare` | Compare two weeks |
+
### Memory System
| Method | Endpoint | Description |
@@ -203,6 +215,41 @@ PortOS is designed for personal/developer use on trusted networks. It implements
| GET | `/usage/daily` | Get daily activity |
| GET | `/usage/hourly` | Get hourly activity |
+### Brain (Second Brain)
+
+| Method | Endpoint | Description |
+|--------|----------|-------------|
+| POST | `/brain/capture` | Capture and classify thought |
+| GET | `/brain/inbox` | List inbox log with filters |
+| POST | `/brain/review/resolve` | Resolve needs_review item |
+| POST | `/brain/fix` | Correct misclassified item |
+| GET | `/brain/people` | List people |
+| POST | `/brain/people` | Create person |
+| GET | `/brain/people/:id` | Get person |
+| PUT | `/brain/people/:id` | Update person |
+| DELETE | `/brain/people/:id` | Delete person |
+| GET | `/brain/projects` | List projects |
+| POST | `/brain/projects` | Create project |
+| GET | `/brain/projects/:id` | Get project |
+| PUT | `/brain/projects/:id` | Update project |
+| DELETE | `/brain/projects/:id` | Delete project |
+| GET | `/brain/ideas` | List ideas |
+| POST | `/brain/ideas` | Create idea |
+| GET | `/brain/ideas/:id` | Get idea |
+| PUT | `/brain/ideas/:id` | Update idea |
+| DELETE | `/brain/ideas/:id` | Delete idea |
+| GET | `/brain/admin` | List admin tasks |
+| POST | `/brain/admin` | Create admin task |
+| GET | `/brain/admin/:id` | Get admin task |
+| PUT | `/brain/admin/:id` | Update admin task |
+| DELETE | `/brain/admin/:id` | Delete admin task |
+| GET | `/brain/digest/latest` | Get latest daily digest |
+| GET | `/brain/review/latest` | Get latest weekly review |
+| POST | `/brain/digest/run` | Trigger daily digest |
+| POST | `/brain/review/run` | Trigger weekly review |
+| GET | `/brain/settings` | Get Brain settings |
+| PUT | `/brain/settings` | Update Brain settings |
+
## WebSocket Events
Connect to Socket.IO at `http://localhost:5554`.
diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md
index f058756..12f2c43 100644
--- a/docs/ARCHITECTURE.md
+++ b/docs/ARCHITECTURE.md
@@ -79,10 +79,18 @@ PortOS/
โ โโโ TASKS.md # User task file
โ โโโ COS-TASKS.md # System task file
โ โโโ COS-GOALS.md # Mission and goals
-โ โโโ cos/ # CoS state and agents
-โ โโโ state.json # Daemon state
-โ โโโ agents/ # Agent outputs
-โ โโโ memory/ # Memory storage
+โ โโโ cos/ # CoS state and agents
+โ โ โโโ state.json # Daemon state
+โ โ โโโ agents/ # Agent outputs
+โ โ โโโ memory/ # Memory storage
+โ โโโ brain/ # Brain second-brain data
+โ โโโ meta.json # Settings
+โ โโโ inbox_log.jsonl # Captured thoughts
+โ โโโ people.jsonl # People records
+โ โโโ projects.jsonl # Project records
+โ โโโ ideas.jsonl # Ideas
+โ โโโ admin.jsonl # Admin tasks
+โ โโโ digests.jsonl # Daily/weekly digests
โ
โโโ docs/ # Documentation
โโโ .github/workflows/ # CI/CD
@@ -170,6 +178,12 @@ Server Event โ Socket.IO โ socket.js โ React Component State Update
- Agent trigger integration
- Run history tracking
+### Brain Service (`server/services/brain.js`)
+- Thought capture and AI classification
+- CRUD for People, Projects, Ideas, Admin
+- Daily digest and weekly review generation
+- Classification correction workflow
+
## Error Handling
All routes use `asyncHandler` wrapper from `server/lib/errorHandler.js`:
diff --git a/docs/features/app-wizard.md b/docs/features/app-wizard.md
new file mode 100644
index 0000000..28a6fb9
--- /dev/null
+++ b/docs/features/app-wizard.md
@@ -0,0 +1,67 @@
+# App Wizard
+
+The wizard supports two modes for registering apps in PortOS.
+
+## Mode 1: Register Existing App
+
+For apps already running on the system (any path, any user):
+
+### Steps
+
+1. **Basic Info**: Name, description, icon
+2. **Location**: Repo path (file picker or manual entry)
+3. **Ports**: UI port, API port (can auto-detect from running processes)
+4. **Process Config**:
+ - Start command(s)
+ - PM2 process name(s)
+ - Env file location
+5. **Confirm & Register**
+
+### Features
+
+- Detect running processes on specified ports
+- Validate repo path exists
+- Optional: import existing PM2 process into registry
+- No scaffolding, no git operations
+
+## Mode 2: Create New App
+
+Scaffold a new project from template:
+
+### Steps
+
+1. **Basic Info**: Name, description
+2. **Template**: Select template (vite+express, node-server, static)
+3. **Location**: Parent directory for new repo
+4. **Ports**: Allocate from available range
+5. **Git Setup**:
+ - Initialize git
+ - Create GitHub repo (optional, via `gh` CLI)
+6. **Confirm & Create**
+
+### Actions on create
+
+- Copy template files
+- Configure .env with ports
+- Run `npm install`
+- Initialize git + first commit
+- Create GitHub repo (if selected)
+- Generate PM2 ecosystem config
+- Register in PortOS
+- Start with PM2
+
+## API Endpoints
+
+| Route | Description |
+|-------|-------------|
+| POST /api/apps | Register existing app |
+| POST /api/scaffold | Create new app from template |
+| GET /api/templates | List available templates |
+| POST /api/detect/ports | Detect process on port |
+| POST /api/detect/repo | Validate repo path, detect type |
+
+## Related Features
+
+- [PM2 Configuration](../PM2.md)
+- [Port Allocation](../PORTS.md)
+- [Templates System](#m7-app-templates)
diff --git a/docs/features/autofixer.md b/docs/features/autofixer.md
new file mode 100644
index 0000000..6202ade
--- /dev/null
+++ b/docs/features/autofixer.md
@@ -0,0 +1,53 @@
+# Autofixer Integration
+
+Autonomous crash detection and repair using Claude CLI.
+
+## Architecture
+
+- **Daemon Process** (`autofixer/server.js`): Monitors PM2 for crashed processes registered in PortOS
+- **UI Server** (`autofixer/ui.js`): Web interface for viewing logs and fix history on port 5560
+- **PM2 Integration**: Runs as `portos-autofixer` and `portos-autofixer-ui` processes
+
+## Features
+
+1. **Crash Detection**: Polls PM2 every 15 minutes for `errored` status on registered apps
+2. **Auto-Fix**: Invokes Claude CLI with crash context (error logs, app info) to diagnose and repair
+3. **Session History**: Stores fix attempts with prompts, outputs, and success/failure status
+4. **Cooldown**: 30-minute cooldown per process to prevent repeated fix loops
+5. **Log Streaming**: Real-time SSE log streaming from any PM2 process
+6. **Tailscale Compatible**: Dynamic hostname for remote access
+
+## Data Storage
+
+```
+./data/autofixer/
+โโโ index.json # Fix session index
+โโโ sessions/
+ โโโ {sessionId}/
+ โโโ prompt.txt # Prompt sent to Claude
+ โโโ output.txt # Claude's response
+ โโโ metadata.json # Session details
+```
+
+## Autofixer UI
+
+Port 5560 provides:
+- Process sidebar with live status indicators
+- SSE-powered log viewer with pause/clear controls
+- History tab showing fix attempts with success/failure status
+- Links back to PortOS Dashboard
+
+## Configuration
+
+| Setting | Value |
+|---------|-------|
+| UI Port | 5560 |
+| Check Interval | 15 minutes |
+| Fix Cooldown | 30 minutes |
+| Max History | 100 entries |
+
+## Related Features
+
+- [Error Handling](./error-handling.md)
+- [Chief of Staff](./chief-of-staff.md)
+- [PM2 Configuration](../PM2.md)
diff --git a/docs/features/brain-system.md b/docs/features/brain-system.md
new file mode 100644
index 0000000..f51685f
--- /dev/null
+++ b/docs/features/brain-system.md
@@ -0,0 +1,210 @@
+# Brain Second-Brain System
+
+Offline-first "second brain" management system for capturing, classifying, and surfacing thoughts.
+
+## Overview
+
+Brain provides a capture-classify-store-surface workflow:
+
+1. **Capture**: Dump thoughts into a single inbox
+2. **Classify**: AI routes thoughts to appropriate databases
+3. **Store**: Persist to People, Projects, Ideas, or Admin
+4. **Surface**: Daily digests and weekly reviews
+
+## Features
+
+1. **Chat-like Inbox**: Single input for capturing thoughts
+2. **AI Classification**: LM Studio classifies with confidence scores
+3. **Four Databases**: People, Projects, Ideas, Admin
+4. **Needs Review Queue**: Low-confidence items await user decision
+5. **Fix/Correct Flow**: Reclassify misrouted thoughts
+6. **Daily Digest**: AI-generated summary of actions and status
+7. **Weekly Review**: GTD-style open loops and accomplishments
+8. **Trust Panel**: Full audit trail of classifications
+
+## Databases
+
+### People
+
+Track individuals with:
+- Contact info
+- Last interaction date
+- Topics discussed
+- Follow-up actions
+- Relationship context
+
+### Projects
+
+Manage projects with:
+- Status (active, planned, on-hold, completed)
+- Goals and objectives
+- Next actions
+- Deadlines
+- Related people
+
+### Ideas
+
+Capture ideas with:
+- Category (product, content, business, tech)
+- Maturity (raw, explored, validated, implemented)
+- Related projects or people
+- Evaluation notes
+
+### Admin
+
+Track administrative tasks:
+- Due dates
+- Priority
+- Status
+- Related people or projects
+- Completion notes
+
+## Data Storage
+
+```
+./data/brain/
+โโโ meta.json # Settings and scheduler state
+โโโ inbox_log.jsonl # All captured thoughts with classifications
+โโโ people.jsonl # People records
+โโโ projects.jsonl # Projects with status tracking
+โโโ ideas.jsonl # Ideas and concepts
+โโโ admin.jsonl # Administrative tasks
+โโโ digests.jsonl # Daily digest history
+โโโ reviews.jsonl # Weekly review history
+```
+
+## AI Classification
+
+The classifier uses LM Studio to analyze captured thoughts and:
+
+- Determine the appropriate database (People, Projects, Ideas, Admin)
+- Extract structured data (names, dates, priorities, etc.)
+- Calculate confidence score (0.0-1.0)
+- Provide reasoning for classification decision
+
+### Confidence Levels
+
+- **High (โฅ0.8)**: Auto-routed to database
+- **Medium (0.5-0.8)**: Suggested route, user can confirm or change
+- **Low (<0.5)**: Marked "needs review", user must choose
+
+## Daily Digest
+
+Generated daily (configurable schedule):
+
+- Summary of captured thoughts
+- Actions taken
+- Projects with recent activity
+- People interacted with
+- Admin items due soon
+- Ideas ready for next steps
+
+## Weekly Review
+
+Generated weekly (GTD-style):
+
+- Open loops by database
+- Accomplishments
+- Projects to review
+- People to follow up with
+- Ideas to explore
+- Admin items to address
+
+## API Endpoints
+
+| Route | Description |
+|-------|-------------|
+| POST /api/brain/capture | Capture and classify thought |
+| GET /api/brain/inbox | List inbox log with filters |
+| POST /api/brain/review/resolve | Resolve needs_review item |
+| POST /api/brain/fix | Correct misclassified item |
+| GET/POST/PUT/DELETE /api/brain/people/:id? | People CRUD |
+| GET/POST/PUT/DELETE /api/brain/projects/:id? | Projects CRUD |
+| GET/POST/PUT/DELETE /api/brain/ideas/:id? | Ideas CRUD |
+| GET/POST/PUT/DELETE /api/brain/admin/:id? | Admin CRUD |
+| GET /api/brain/digest/latest | Get latest daily digest |
+| GET /api/brain/review/latest | Get latest weekly review |
+| POST /api/brain/digest/run | Trigger daily digest |
+| POST /api/brain/review/run | Trigger weekly review |
+| GET/PUT /api/brain/settings | Get/update settings |
+
+## Prompt Templates
+
+| Template | Purpose |
+|----------|---------|
+| brain-classifier | Classify captured thoughts |
+| brain-daily-digest | Generate daily summary |
+| brain-weekly-review | Generate weekly review |
+
+## UI Tabs
+
+- **Inbox**: Chat-like capture interface with classification results
+- **Memory**: CRUD views for People, Projects, Ideas, Admin
+- **Digest**: Daily and weekly summaries with run buttons
+- **Trust**: Audit trail with classification confidence and reasoning
+
+## Implementation Files
+
+| File | Purpose |
+|------|---------|
+| `server/lib/brainValidation.js` | Zod schemas for all Brain entities |
+| `server/services/brain.js` | Core business logic |
+| `server/services/brainStorage.js` | JSONL/JSON file operations |
+| `server/services/brainScheduler.js` | Daily/weekly job scheduler |
+| `server/routes/brain.js` | API endpoints |
+| `client/src/pages/Brain.jsx` | Main page with tabs |
+| `client/src/components/brain/tabs/*.jsx` | Tab components |
+| `data/prompts/stages/brain-*.md` | Prompt templates |
+
+## Setup Requirements
+
+**LM Studio** must be running with a capable chat model:
+
+1. Download and install [LM Studio](https://lmstudio.ai/)
+2. Load a chat model (e.g., gptoss-20b, qwen-2.5, etc.)
+3. Start the local server on port 1234 (default)
+4. Configure the Brain system to use the model
+
+## Configuration
+
+```javascript
+brain: {
+ enabled: true,
+ provider: 'lmstudio',
+ endpoint: 'http://localhost:1234/v1/chat/completions',
+ model: 'gptoss-20b',
+ minConfidence: 0.5,
+ digestSchedule: '0 18 * * *', // Daily at 6pm
+ reviewSchedule: '0 9 * * 0' // Weekly on Sunday at 9am
+}
+```
+
+## Workflow Example
+
+```
+1. User captures: "Met with Sarah about the new project. Need to follow up next week."
+
+2. AI classifies:
+ - Database: People + Projects
+ - Confidence: 0.85
+ - Reasoning: "Mentions person (Sarah) and project context with action item"
+
+3. System creates:
+ - Person record: "Sarah" with last interaction today
+ - Project record: "New project" with status "planned"
+ - Admin task: "Follow up with Sarah" due next week
+
+4. Daily digest includes:
+ - "New person added: Sarah"
+ - "New project started: New project"
+ - "Action due: Follow up with Sarah"
+
+5. Weekly review shows:
+ - Open loop: "New project (planned) - needs next actions"
+ - Follow-up needed: "Sarah - follow up scheduled"
+```
+
+## Related Features
+
+- [Memory System](./memory-system.md)
+- [Chief of Staff](./chief-of-staff.md)
diff --git a/docs/features/brain/plan.md b/docs/features/brain/plan.md
new file mode 100644
index 0000000..3336f45
--- /dev/null
+++ b/docs/features/brain/plan.md
@@ -0,0 +1,480 @@
+# Brain Feature Implementation Plan
+
+> **Offline-first "second brain" management system for PortOS**
+
+## Overview
+
+Brain is a capture-classify-store-surface system that lets users dump thoughts into a single inbox, then AI classifies and routes them to appropriate databases (People, Projects, Ideas, Admin). Daily digests and weekly reviews surface actionable insights.
+
+**Core workflow**: capture -> classify/route -> store -> daily digest -> weekly review -> fix/correct
+
+## Architecture
+
+```
+Client (React) Server (Express) Storage (./data/brain/)
+---------------- ----------------- ---------------------
+/brain/:tab /api/brain/* meta.json
+ - Inbox (chat-like) - capture inbox_log.jsonl
+ - Memory (CRUD views) - CRUD (people/projects/etc) people.jsonl
+ - Digest & Review - digest/review projects.jsonl
+ - Trust Panel (audit) - settings ideas.jsonl
+ admin.jsonl
+ digests.jsonl
+ reviews.jsonl
+```
+
+## Implementation Phases
+
+### Phase 1: Data Layer & Schemas
+
+**Files to create:**
+- `server/lib/brainValidation.js` - Zod schemas for all Brain entities
+- `server/services/brainStorage.js` - JSONL file operations
+- `data.sample/brain/` - Sample data directory with all files
+
+**Schemas:**
+
+```javascript
+// InboxLogRecord
+{
+ id: string (UUID),
+ capturedText: string,
+ capturedAt: string (ISO),
+ source: "brain_ui",
+ ai: { providerId, modelId, promptTemplateId, temperature?, maxTokens? },
+ classification: {
+ destination: "people" | "projects" | "ideas" | "admin" | "unknown",
+ confidence: number (0-1),
+ title: string,
+ extracted: object,
+ reasons?: string[]
+ },
+ status: "filed" | "needs_review" | "corrected" | "error",
+ filed?: { destination, destinationId },
+ correction?: { correctedAt, previousDestination, newDestination, note? },
+ error?: { message, stack? }
+}
+
+// PeopleRecord
+{ id, name, context, followUps: [], lastTouched?, tags?, createdAt, updatedAt }
+
+// ProjectRecord
+{ id, name, status: "active"|"waiting"|"blocked"|"someday"|"done",
+ nextAction, notes?, tags?, createdAt, updatedAt }
+
+// IdeaRecord
+{ id, title, oneLiner, notes?, tags?, createdAt, updatedAt }
+
+// AdminRecord
+{ id, title, status: "open"|"waiting"|"done", dueDate?, nextAction?,
+ notes?, createdAt, updatedAt }
+
+// meta.json
+{
+ version: 1,
+ confidenceThreshold: 0.6,
+ dailyDigestTime: "09:00",
+ weeklyReviewTime: "16:00",
+ weeklyReviewDay: "sunday",
+ defaultProvider: "lmstudio",
+ defaultModel: "gptoss-20b"
+}
+```
+
+**Storage pattern (from existing codebase):**
+- Use atomic file rewrites with in-memory caching (TTL ~2s)
+- JSONL for append-heavy logs (inbox_log, digests, reviews)
+- JSON objects for entity stores (people, projects, ideas, admin)
+- `ensureDataDir()` before all file operations
+- UUID for all record IDs, ISO strings for timestamps
+
+---
+
+### Phase 2: API Endpoints
+
+**File to create:** `server/routes/brain.js`
+
+**Endpoints:**
+
+| Method | Endpoint | Purpose |
+|--------|----------|---------|
+| POST | `/api/brain/capture` | Capture thought, classify, store |
+| GET | `/api/brain/inbox` | List inbox log (filters: status, limit, offset) |
+| POST | `/api/brain/review/resolve` | Resolve needs_review item |
+| POST | `/api/brain/fix` | Correct misclassified item |
+| GET/POST/PUT/DELETE | `/api/brain/people/:id?` | People CRUD |
+| GET/POST/PUT/DELETE | `/api/brain/projects/:id?` | Projects CRUD |
+| GET/POST/PUT/DELETE | `/api/brain/ideas/:id?` | Ideas CRUD |
+| GET/POST/PUT/DELETE | `/api/brain/admin/:id?` | Admin CRUD |
+| GET | `/api/brain/digest/latest` | Get latest daily digest |
+| GET | `/api/brain/review/latest` | Get latest weekly review |
+| POST | `/api/brain/digest/run` | Manual trigger digest |
+| POST | `/api/brain/review/run` | Manual trigger review |
+| GET/PUT | `/api/brain/settings` | Get/update Brain settings |
+
+**Capture flow (POST /api/brain/capture):**
+1. Validate input with Zod
+2. Create InboxLogRecord immediately (status: pending)
+3. Call AI classifier with `capturedText`
+4. Parse JSON response, validate structure
+5. If `confidence >= threshold` and destination != "unknown":
+ - Create record in destination DB
+ - Update InboxLogRecord: status="filed", filed={...}
+6. If low confidence or unknown:
+ - Update InboxLogRecord: status="needs_review"
+7. Return InboxLogRecord + UI message
+
+**Error handling:** Use `asyncHandler()` wrapper, throw `ServerError` for validation/processing errors. If AI unavailable, set status="needs_review" with destination="unknown".
+
+---
+
+### Phase 3: AI Prompt Templates
+
+**Files to create:**
+- `data/prompts/stages/brain-classifier.md`
+- `data/prompts/stages/brain-daily-digest.md`
+- `data/prompts/stages/brain-weekly-review.md`
+- Update `data/prompts/stage-config.json` with new stages
+
+**Stage configurations:**
+```json
+{
+ "brain-classifier": {
+ "name": "Brain Classifier",
+ "description": "Classify captured thoughts into People/Projects/Ideas/Admin",
+ "model": "lmstudio:gptoss-20b",
+ "provider": "lmstudio",
+ "returnsJson": true,
+ "variables": []
+ },
+ "brain-daily-digest": {
+ "name": "Brain Daily Digest",
+ "description": "Generate daily actionable digest",
+ "model": "lmstudio:gptoss-20b",
+ "provider": "lmstudio",
+ "returnsJson": true,
+ "variables": []
+ },
+ "brain-weekly-review": {
+ "name": "Brain Weekly Review",
+ "description": "Generate weekly review and open loops",
+ "model": "lmstudio:gptoss-20b",
+ "provider": "lmstudio",
+ "returnsJson": true,
+ "variables": []
+ }
+}
+```
+
+**Classifier prompt output schema:**
+```json
+{
+ "destination": "people|projects|ideas|admin|unknown",
+ "confidence": 0.0-1.0,
+ "title": "Short title",
+ "extracted": { /* destination-specific fields */ },
+ "reasons": ["reason1", "reason2"]
+}
+```
+
+**Daily digest output schema:**
+```json
+{
+ "digestText": "< 150 words",
+ "topActions": ["action1", "action2", "action3"],
+ "stuckThing": "One stuck item",
+ "smallWin": "One positive thing"
+}
+```
+
+**Weekly review output schema:**
+```json
+{
+ "reviewText": "< 250 words",
+ "whatHappened": ["bullet1", "bullet2"],
+ "biggestOpenLoops": ["loop1", "loop2"],
+ "suggestedActionsNextWeek": ["action1", "action2"],
+ "recurringTheme": "One sentence pattern"
+}
+```
+
+---
+
+### Phase 4: Brain Service & Scheduler
+
+**Files to create:**
+- `server/services/brain.js` - Core business logic
+- `server/services/brainScheduler.js` - Daily/weekly job scheduler
+
+**Service functions:**
+```javascript
+// Core
+export async function captureThought(text, providerOverride?, modelOverride?)
+export async function resolveReview(inboxLogId, destination, editedExtracted?)
+export async function fixClassification(inboxLogId, newDestination, updatedFields?)
+
+// CRUD (for each entity type)
+export async function getPeople(filters?)
+export async function createPerson(data)
+export async function updatePerson(id, data)
+export async function deletePerson(id)
+// ... same pattern for projects, ideas, admin
+
+// Digest/Review
+export async function runDailyDigest()
+export async function runWeeklyReview()
+export async function getLatestDigest()
+export async function getLatestReview()
+
+// Settings
+export async function getSettings()
+export async function updateSettings(updates)
+```
+
+**Scheduler (adapt from taskSchedule.js pattern):**
+- Start scheduler on server boot
+- Check configured times against current time
+- Run daily digest at configured time (default 9:00 AM)
+- Run weekly review at configured day/time (default Sunday 4:00 PM)
+- Handle catch-up: if server was down, run at most 1 missed job per type
+- Store last run timestamps in meta.json
+
+```javascript
+// brainScheduler.js
+let schedulerInterval = null;
+
+export function startScheduler() {
+ schedulerInterval = setInterval(checkSchedule, 60000); // Check every minute
+ checkSchedule(); // Run immediately on start
+}
+
+export function stopScheduler() {
+ if (schedulerInterval) clearInterval(schedulerInterval);
+}
+
+async function checkSchedule() {
+ const settings = await getSettings();
+ const now = new Date();
+ // Check if daily digest is due
+ // Check if weekly review is due
+ // Handle missed runs (max 1 catch-up per type)
+}
+```
+
+**AI call pattern (from existing memoryClassifier.js):**
+```javascript
+async function callAI(stageName, variables) {
+ const provider = await getActiveProvider();
+ const prompt = await buildPrompt(stageName, variables);
+
+ const response = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': `Bearer ${provider.apiKey}`
+ },
+ body: JSON.stringify({
+ model: provider.defaultModel,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.1
+ })
+ });
+
+ const data = await response.json();
+ return parseJsonResponse(data.choices?.[0]?.message?.content);
+}
+
+function parseJsonResponse(content) {
+ // Remove markdown code blocks
+ const jsonMatch = content.match(/```(?:json)?\s*([\s\S]*?)```/) ||
+ content.match(/(\{[\s\S]*\})/);
+ if (!jsonMatch) throw new Error('No JSON found in response');
+ return JSON.parse(jsonMatch[1]);
+}
+```
+
+---
+
+### Phase 5: React UI
+
+**Files to create:**
+- `client/src/pages/Brain.jsx` - Main page with tab routing
+- `client/src/components/brain/index.js` - Re-exports
+- `client/src/components/brain/constants.js` - Tabs, states, colors
+- `client/src/components/brain/tabs/InboxTab.jsx` - Chat-like capture
+- `client/src/components/brain/tabs/MemoryTab.jsx` - CRUD views
+- `client/src/components/brain/tabs/DigestTab.jsx` - Digest & Review
+- `client/src/components/brain/tabs/TrustTab.jsx` - Audit trail
+
+**Update existing files:**
+- `client/src/App.jsx` - Add route `/brain/:tab`
+- `client/src/components/Layout.jsx` - Add Brain to nav
+- `client/src/services/api.js` - Add Brain API functions
+
+**Brain.jsx structure (follow ChiefOfStaff.jsx pattern):**
+```jsx
+import { useParams, useNavigate } from 'react-router-dom';
+
+const TABS = [
+ { id: 'inbox', label: 'Inbox', icon: MessageSquare },
+ { id: 'memory', label: 'Memory', icon: Database },
+ { id: 'digest', label: 'Digest', icon: Calendar },
+ { id: 'trust', label: 'Trust', icon: Shield }
+];
+
+export default function Brain() {
+ const { tab = 'inbox' } = useParams();
+ const navigate = useNavigate();
+
+ // Fetch data, handle socket events
+
+ return (
+
+ {/* Tab navigation */}
+ {/* Tab content based on tab param */}
+
+ );
+}
+```
+
+**InboxTab.jsx (chat-like UI):**
+- Single input field at bottom ("One thought at a time...")
+- Message list showing: capturedText, destination badge, confidence, status, timestamp
+- For filed items: link to view record
+- For needs_review: inline destination picker + "Reclassify" button
+- "Fix" button on each filed item for corrections
+
+**MemoryTab.jsx:**
+- Sub-tabs: People | Projects | Ideas | Admin | Needs Review
+- Each sub-tab: list view with filters + basic CRUD
+- Projects: filter by status (active/waiting/blocked/someday/done)
+- Admin: filter by status + sort by due date
+- Inline edit or modal edit for records
+
+**DigestTab.jsx:**
+- Latest daily digest (rendered markdown-like)
+- Latest weekly review
+- History list (collapsible)
+- "Run Now" buttons for manual trigger
+
+**TrustTab.jsx:**
+- Full inbox log with filters (status, date range, confidence range)
+- Expandable rows showing full classification JSON
+- Debug view for AI responses
+
+---
+
+### Phase 6: Sample Data & Tests
+
+**Sample data files (./data.sample/brain/):**
+- `meta.json` - Default settings
+- `inbox_log.jsonl` - 10-15 sample entries (mix of statuses)
+- `people.jsonl` - 5 sample people
+- `projects.jsonl` - 5 sample projects (various statuses)
+- `ideas.jsonl` - 3 sample ideas
+- `admin.jsonl` - 3 sample admin items
+- `digests.jsonl` - 2 sample digests
+- `reviews.jsonl` - 1 sample review
+
+**Tests (./server/tests/brain.test.js):**
+- Capture creates InboxLogRecord always
+- Confidence threshold gates filing
+- Fix/move updates records correctly
+- Digest word limit enforced
+- Review word limit enforced
+- CRUD operations work for all entity types
+- Settings update persists
+
+---
+
+## File Tree Summary
+
+```
+server/
+ lib/
+ brainValidation.js # Zod schemas
+ services/
+ brain.js # Core business logic
+ brainStorage.js # JSONL/JSON file operations
+ brainScheduler.js # Daily/weekly scheduler
+ routes/
+ brain.js # API endpoints
+ tests/
+ brain.test.js # Unit tests
+
+client/src/
+ pages/
+ Brain.jsx # Main page
+ components/
+ brain/
+ index.js # Re-exports
+ constants.js # TABS, states
+ tabs/
+ InboxTab.jsx
+ MemoryTab.jsx
+ DigestTab.jsx
+ TrustTab.jsx
+ services/
+ api.js # Add brain endpoints
+
+data/prompts/stages/
+ brain-classifier.md
+ brain-daily-digest.md
+ brain-weekly-review.md
+
+data.sample/brain/
+ meta.json
+ inbox_log.jsonl
+ people.jsonl
+ projects.jsonl
+ ideas.jsonl
+ admin.jsonl
+ digests.jsonl
+ reviews.jsonl
+```
+
+---
+
+## Verification Plan
+
+1. **Data layer**: Create sample data, verify file read/write
+2. **API**: Test each endpoint with curl/Postman
+3. **AI integration**: Test classifier with sample inputs, verify JSON parsing
+4. **UI**: Navigate to /brain, verify all tabs render
+5. **End-to-end capture flow**:
+ - Enter thought in inbox
+ - Verify classification + filing
+ - Check record appears in Memory tab
+ - Check inbox log shows entry
+6. **Fix flow**: Correct a misclassified item, verify both records update
+7. **Needs review flow**: Submit low-confidence item, resolve via UI
+8. **Digest**: Run manual digest, verify output < 150 words
+9. **Review**: Run manual review, verify output < 250 words
+10. **Scheduler**: Verify jobs run at configured times (or test with short intervals)
+11. **Offline resilience**: Stop LM Studio, capture thought, verify queued for later
+
+---
+
+## Implementation Order
+
+1. Data layer (schemas + storage)
+2. API endpoints (capture + CRUD)
+3. AI prompt templates
+4. Brain service (classification logic)
+5. Scheduler
+6. React UI (Inbox tab first)
+7. Remaining UI tabs
+8. Sample data
+9. Tests
+
+---
+
+## Key Decisions
+
+- **JSONL for append-heavy data**: inbox_log, digests, reviews use JSONL for easy appending and diffing
+- **JSON objects for entities**: people/projects/ideas/admin use JSON objects keyed by ID for efficient lookups
+- **Confidence threshold 0.6**: Default, configurable in settings
+- **No try/catch**: Per CLAUDE.md, errors bubble to asyncHandler middleware
+- **URL-based tabs**: `/brain/:tab` for deep linking (follow CoS pattern)
+- **Default to LM Studio**: Use `lmstudio:gptoss-20b` as default provider/model
+- **Word limits enforced server-side**: Truncate or request re-generation if AI exceeds limits
diff --git a/docs/features/brain/prompt.md b/docs/features/brain/prompt.md
new file mode 100644
index 0000000..13531f3
--- /dev/null
+++ b/docs/features/brain/prompt.md
@@ -0,0 +1,384 @@
+You are an AI coding agent implementing a new PortOS feature called **Brain**: an offline-first โsecond brainโ management + communication system.
+
+# 0) Core problem + product intent
+Human brains are for thinking, not storage. Forgetting creates hidden taxes: dropped relationship details, repeated project failures, and constant open-loop anxiety. The goal is a system that lets the user capture thoughts in seconds, then the system actively processes those thoughts in the background: classifies, routes, structures, stores, and proactively surfaces what mattersโwithout the user having to remember to organize or retrieve anything.
+
+This is not โa notes app.โ It is a behavior-changing loop:
+capture โ classify/route โ store โ surface daily โ review weekly โ correct easily โ maintain trust.
+
+# 1) Non-negotiable design principles (implement as requirements)
+1) **One reliable human behavior**: the user does only one thing consistentlyโcapture a thought in a single Inbox UI. No tagging, folders, taxonomy decisions at capture time.
+
+2) **Separate Interface / Compute / Memory**
+- Interface: Brain page UI (chat-like capture + dashboards).
+- Compute: local server endpoints + scheduled local jobs + AI calls.
+- Memory: local storage files as source-of-truth.
+
+3) **Treat prompts like APIs**: fixed input + fixed output JSON schemas. The model must output JSON only (no markdown, no prose). Reliability beats creativity.
+
+4) **Trust mechanisms**: always log what happened (audit trail), include confidence, make errors visible + traceable. Users abandon systems when they stop trusting them.
+
+5) **Default to safe behavior**: if uncertain, do NOT write low-quality guesses into the memory store. Hold items for review and ask for clarification.
+
+6) **Small, frequent, actionable outputs**
+- Daily digest MUST be <150 words, fit on a phone screen.
+- Weekly review MUST be <250 words.
+- Outputs must contain concrete actions, not motivational fluff.
+
+7) **Next action is the unit of execution**
+Project entries must have a single explicit โnextActionโ that is concrete and executable (e.g., โEmail Sarah to confirm copy deadlineโ).
+
+8) **Prefer routing over organizing**
+The system routes into a small set of stable buckets. Do not require ongoing folder maintenance.
+
+9) **Keep categories & fields painfully small**
+Start with only these categories:
+- People
+- Projects
+- Ideas
+- Admin
+Plus an Inbox Log (audit trail).
+Avoid expanding schema unless necessary.
+
+10) **Design for restart**
+If the user falls off, the system must be easy to resume without backlog guilt. The UI should encourage โjust restartโ rather than โcatch up.โ
+
+11) **Build one core workflow, then attach modules**
+Core loop: capture โ file โ daily digest โ weekly review โ fix/correct. Future integrations (Apple Notes, email/calendar) are out of scope for now; design modular hooks but donโt implement them.
+
+12) **Maintainability over cleverness**
+Few moving parts. Clear logs. Easy to debug. Do not add heavy dependencies or complex pipelines unless needed.
+
+# 2) Translate the classic stack into PortOS (offline)
+Classic tools equivalents:
+- โSlack capture channelโ โ Brain Inbox UI (chat-like)
+- โNotion databasesโ โ local file-based databases (People/Projects/Ideas/Admin + InboxLog)
+- โZapier automationsโ โ local server workflows + scheduler (daily/weekly)
+- โClaude/ChatGPT intelligenceโ โ PortOS AI provider abstraction (default: LM Studio โ model `openai/gpt-oss-20b`)
+
+Everything must work offline and be self-contained.
+
+# 3) Technical constraints + integration with PortOS
+- PortOS is an Express + React + Tailwind webapp.
+- Implement a new navigation entry **Brain** and route **/brain**.
+- Implement Express API routes under **/api/brain/**.
+- Use PortOSโs existing AI Provider/Model config + prompt templates system:
+ - Default Brain to LM Studio with model `openai/gpt-oss-20b`.
+ - Allow runtime override per request (provider/model).
+- Local persistence under **./data/brain/** (gitignored), with sample data under **./data.sample/brain/**.
+
+# 4) Data model (schemas + file layout)
+Use JSON Lines (preferred) for append-only audit and easy diffs. Keep records human-readable.
+
+Directory:
+- ./data/brain/meta.json
+- ./data/brain/inbox_log.jsonl
+- ./data/brain/people.jsonl
+- ./data/brain/projects.jsonl
+- ./data/brain/ideas.jsonl
+- ./data/brain/admin.jsonl
+
+IDs:
+- Use UUID for all records.
+- Timestamps: ISO strings.
+- Every record: createdAt, updatedAt.
+
+## 4.1 Inbox Log (Receipt / Audit Trail) โ ALWAYS write one per capture
+InboxLogRecord schema:
+- id: string
+- capturedText: string
+- capturedAt: string
+- source: "brain_ui" (future: notes/email/calendar)
+- ai: {
+ providerId: string,
+ modelId: string,
+ promptTemplateId: string,
+ temperature?: number,
+ maxTokens?: number
+ }
+- classification: {
+ destination: "people" | "projects" | "ideas" | "admin" | "unknown",
+ confidence: number, // 0..1
+ title: string, // AI-generated concise title
+ extracted: object, // destination-specific extracted fields (see below)
+ reasons?: string[] // short debug/trust hints
+ }
+- status: "filed" | "needs_review" | "corrected" | "error"
+- filed?: {
+ destination: "people" | "projects" | "ideas" | "admin",
+ destinationId: string
+ }
+- correction?: {
+ correctedAt: string,
+ previousDestination: string,
+ newDestination: string,
+ note?: string
+ }
+- error?: { message: string, stack?: string }
+
+## 4.2 People DB
+PeopleRecord:
+- id
+- name: string
+- context: string // who they are / how you know them
+- followUps: string[] // things to remember next time
+- lastTouched?: string // date/time of last interaction or update
+- tags?: string[]
+- createdAt, updatedAt
+
+## 4.3 Projects DB
+ProjectRecord:
+- id
+- name: string
+- status: "active" | "waiting" | "blocked" | "someday" | "done"
+- nextAction: string // MUST be concrete + executable
+- notes?: string
+- tags?: string[]
+- createdAt, updatedAt
+
+## 4.4 Ideas DB
+IdeaRecord:
+- id
+- title: string
+- oneLiner: string // core insight in 1 sentence
+- notes?: string
+- tags?: string[]
+- createdAt, updatedAt
+
+## 4.5 Admin DB
+AdminRecord:
+- id
+- title: string
+- status: "open" | "waiting" | "done"
+- dueDate?: string
+- nextAction?: string // if applicable
+- notes?: string
+- createdAt, updatedAt
+
+# 5) The โAI Loopโ workflows to implement (must ship)
+## 5.1 Capture โ Classify/Route โ Store โ Log
+User enters one thought per message in Brain Inbox.
+Server workflow:
+1) Create InboxLogRecord immediately with capturedText + capturedAt.
+2) Call AI classifier prompt with capturedText.
+3) Receive strict JSON output:
+ - destination + confidence + title + extracted fields.
+4) If confidence >= THRESHOLD (default 0.6):
+ - Write a record into the chosen DB (people/projects/ideas/admin) using extracted fields.
+ - Update InboxLogRecord status=filed + filed.destinationId.
+5) If confidence < THRESHOLD or destination="unknown":
+ - Set InboxLogRecord status=needs_review.
+ - Do NOT write to target DB.
+ - Return UI response asking for clarification and offering one-click re-route.
+
+## 5.2 Bouncer (Confidence filter)
+- Threshold is configurable in ./data/brain/meta.json and in UI settings.
+- Low confidence never pollutes the memory store.
+
+## 5.3 Fix Button (human-in-loop correction)
+Every filed item must be correctable in one step:
+- In UI, each Inbox item shows what it was filed as + confidence + โFixโ action.
+- Fix flow:
+ - User selects correct destination (and optionally edits extracted fields).
+ - System updates the destination DB record or moves it:
+ - If moving: create new record in new DB and mark old record as archived OR delete (choose a safe approach, default archive).
+ - Update InboxLogRecord status=corrected with correction info.
+ - Corrections must be trivial; no deep navigation.
+
+## 5.4 Tap on the Shoulder (daily + weekly surfacing)
+Implement scheduled jobs on the local server:
+- Daily Digest job (default 9:00 AM local time, configurable)
+ - Query: active projects, admin open items, people with followUps/lastTouched stale, and any needs_review items.
+ - Call AI summarizer prompt.
+ - Store digest output to ./data/brain/digests.jsonl (or similar) AND surface in UI notifications panel.
+- Weekly Review job (default Sunday 4:00 PM local time, configurable)
+ - Query: last 7 days inbox log + active projects + open loops (waiting/blocked) + needs_review.
+ - Call AI review prompt.
+ - Store review output and surface in UI.
+
+Outputs must be short:
+- Daily digest <150 words.
+- Weekly review <250 words.
+Also store structured metadata alongside the text, e.g. list of recommended actions.
+
+# 6) Brain Page UI requirements
+Implement /brain with 3 primary areas (tabs or layout sections):
+1) **Inbox (Chat-style)**
+ - Single input, โone thought per messageโ hint.
+ - Message list includes: capturedText, destination, confidence, status, createdAt.
+ - For filed items: link to the created record.
+ - For needs_review: inline clarification controls (choose category + optional short prefix + resubmit).
+ - โFixโ button for corrections.
+
+2) **Memory (Databases)**
+ - Views for People / Projects / Ideas / Admin with simple filters:
+ - Projects filter by status.
+ - Admin filter by status/due date.
+ - Needs review list.
+ - Basic CRUD editing for records.
+
+3) **Digest & Review**
+ - Show latest Daily Digest and Weekly Review.
+ - List previous digests/reviews (history).
+ - โRun nowโ buttons (manual trigger) for digest/review.
+
+Additional UI:
+- **Trust panel** / โInbox Logโ view:
+ - Audit trail with filters by status, date, confidence range.
+ - Inspect raw AI JSON for a record (for debugging).
+
+# 7) API endpoints (Express)
+Implement:
+- POST /api/brain/capture
+ - body: { text: string, providerOverride?, modelOverride? }
+ - returns: inboxLogRecord + uiMessage (filed/needs_review)
+- GET /api/brain/inbox?status=&limit=&offset=
+- POST /api/brain/review/resolve
+ - resolves needs_review: { inboxLogId, destination, editedExtracted? }
+- POST /api/brain/fix
+ - { inboxLogId, newDestination, updatedFields? }
+- CRUD endpoints for each DB:
+ - /api/brain/people, /projects, /ideas, /admin
+- GET /api/brain/digest/latest
+- GET /api/brain/review/latest
+- POST /api/brain/digest/run
+- POST /api/brain/review/run
+- GET /api/brain/settings
+- POST /api/brain/settings (threshold, schedule times, defaults)
+
+# 8) Scheduler
+Implement a simple in-process scheduler:
+- Reads settings from ./data/brain/meta.json.
+- Runs daily + weekly jobs at configured times.
+- Handles restarts cleanly:
+ - If server was down during scheduled time, run โcatch-upโ lightly:
+ - At most one missed daily digest.
+ - Weekly review runs next time it detects it missed the window.
+ - No backlog explosion.
+
+# 9) AI prompt templates (must implement as strict JSON contracts)
+Create 3 prompt templates stored in PortOSโs prompt system:
+1) brain_classifier_v1
+2) brain_daily_digest_v1
+3) brain_weekly_review_v1
+
+They must:
+- Demand JSON only.
+- Define allowed enum values.
+- Define how to handle ambiguity and confidence scoring.
+- Prefer safe behavior.
+
+## 9.1 brain_classifier_v1 (JSON-only)
+Input variables:
+- capturedText: string
+- now: ISO string
+
+Output JSON schema:
+{
+ "destination": "people" | "projects" | "ideas" | "admin" | "unknown",
+ "confidence": number, // 0..1
+ "title": string, // short
+ "extracted": object, // depends on destination
+ "reasons": string[] // <=3 short strings
+}
+
+Destination-specific extracted schemas:
+- people:
+ { "name": string, "context": string, "followUps": string[], "lastTouched": string|null, "tags": string[] }
+- projects:
+ { "name": string, "status": "active"|"waiting"|"blocked"|"someday"|"done", "nextAction": string, "notes": string, "tags": string[] }
+- ideas:
+ { "title": string, "oneLiner": string, "notes": string, "tags": string[] }
+- admin:
+ { "title": string, "status": "open"|"waiting"|"done", "dueDate": string|null, "nextAction": string|null, "notes": string }
+
+Rules:
+- If ambiguity is high, set destination="unknown" and confidence <0.6.
+- Confidence meaning: likelihood the chosen destination + extracted fields are correct.
+- โnextActionโ must be explicit and executable when destination=projects; if not inferable, lower confidence.
+- Keep tags optional and minimal; do not invent many tags.
+
+## 9.2 brain_daily_digest_v1 (JSON-only)
+Input variables:
+- activeProjects: ProjectRecord[]
+- openAdmin: AdminRecord[]
+- peopleFollowUps: PeopleRecord[]
+- needsReview: InboxLogRecord[]
+- now: ISO string
+
+Output JSON schema:
+{
+ "digestText": string, // MUST be <150 words
+ "topActions": string[], // 3 items max
+ "stuckThing": string, // 1 item
+ "smallWin": string // 1 item
+}
+
+Rules:
+- Must be operational, not motivational.
+- Fit on a phone screen.
+- If data is sparse, be honest and suggest a simple next step.
+
+## 9.3 brain_weekly_review_v1 (JSON-only)
+Input variables:
+- inboxLogLast7Days: InboxLogRecord[]
+- activeProjects: ProjectRecord[]
+- now: ISO string
+
+Output JSON schema:
+{
+ "reviewText": string, // MUST be <250 words
+ "whatHappened": string[], // 3-5 bullets (short)
+ "biggestOpenLoops": string[], // 1-3 items
+ "suggestedActionsNextWeek": string[],// 3 items max
+ "recurringTheme": string // 1 sentence
+}
+
+Rules:
+- Must focus on actionable follow-through and open loops.
+- No long analysis.
+
+# 10) Local storage implementation details
+Implement an append-safe storage layer:
+- Append JSONL for new records.
+- For updates, either:
+ - rewrite full file safely (atomic write), OR
+ - maintain a small โpatch logโ file and materialize views (choose simplest maintainable approach).
+Given maintainability, prefer atomic rewrite per DB file with file lock or single-threaded write queue.
+
+Include:
+- Validation of records against schemas (lightweight runtime checks).
+- Basic search/filter by fields.
+- Sorting by timestamps.
+- Robust error handling; errors logged to InboxLogRecord status=error.
+
+# 11) Offline-first behavior
+- If AI provider is unavailable (LM Studio down), capture must still work:
+ - Create InboxLogRecord with status=needs_review and destination=unknown.
+ - UI tells user โAI unavailable; queued for later classificationโ with a โRetry classificationโ action.
+- Add a small queue processor to retry classification when AI becomes available.
+
+# 12) Testing + sample data
+- Provide sample data under ./data.sample/brain to demo UI instantly.
+- Add minimal tests for:
+ - capture flow creates inbox log always
+ - confidence threshold gating
+ - fix/move behavior updates records + log
+ - digest/review prompt enforcement (word limits enforced server-side too)
+
+# 13) Implementation sequence (do in this order)
+1) Data layer + schemas + file IO + meta settings.
+2) API endpoints: capture + CRUD + digest/review run.
+3) AI prompt templates + strict JSON parsing/validation.
+4) Scheduler + manual trigger.
+5) React UI for Inbox + Memory + Digest/Review + Trust panel.
+6) Sample data + tests.
+
+# 14) Output expectations
+Return a PR-quality implementation:
+- Clean code, minimal dependencies.
+- Works end-to-end locally.
+- Default Brain uses LM Studio / `openai/gpt-oss-20b` unless overridden.
+- UI and audit trail make the system trustworthy and easy to repair.
+
+Do not implement Apple Notes/email/calendar integrations; only stub extension points.
diff --git a/docs/features/chief-of-staff.md b/docs/features/chief-of-staff.md
new file mode 100644
index 0000000..d50d4c4
--- /dev/null
+++ b/docs/features/chief-of-staff.md
@@ -0,0 +1,119 @@
+# Chief of Staff
+
+Autonomous agent manager that watches task files, spawns sub-agents, and maintains system health.
+
+## Architecture
+
+- **Task Parser** (`server/lib/taskParser.js`): Parses TASKS.md and COS-TASKS.md formats
+- **CoS Service** (`server/services/cos.js`): State management, health monitoring, task evaluation
+- **Task Watcher** (`server/services/taskWatcher.js`): File watching with chokidar
+- **Sub-Agent Spawner** (`server/services/subAgentSpawner.js`): Claude CLI execution with MCP
+- **CoS Routes** (`server/routes/cos.js`): REST API endpoints
+- **CoS UI** (`client/src/pages/ChiefOfStaff.jsx`): Tasks, Agents, Health, Config tabs
+
+## Features
+
+1. **Dual Task Lists**: User tasks (TASKS.md) and system tasks (COS-TASKS.md)
+2. **Autonomous Execution**: Auto-approved tasks run without user intervention
+3. **Approval Workflow**: Tasks marked APPROVAL require user confirmation
+4. **System Health Monitoring**: PM2 process checks, memory usage, error detection
+5. **Sub-Agent Spawning**: Claude CLI with --dangerously-skip-permissions and MCP servers
+6. **Self-Improvement**: Can analyze performance and suggest prompt/config improvements
+7. **Script Generation**: Creates automation scripts for repetitive tasks
+8. **Report Generation**: Daily summaries of completed work
+
+## Task File Format
+
+```markdown
+# Tasks
+## Pending
+- [ ] #task-001 | HIGH | Task description
+ - Context: Additional context
+ - App: app-name
+
+## In Progress
+- [~] #task-002 | MEDIUM | Another task
+ - Agent: agent-id
+ - Started: 2024-01-15T10:30:00Z
+
+## Completed
+- [x] #task-003 | LOW | Done task
+ - Completed: 2024-01-14T15:45:00Z
+```
+
+## System Task Format
+
+```markdown
+- [ ] #sys-001 | HIGH | AUTO | Auto-approved task
+- [ ] #sys-002 | MEDIUM | APPROVAL | Needs user approval
+```
+
+## Data Storage
+
+```
+./data/cos/
+โโโ state.json # Daemon state and config
+โโโ agents/{agentId}/ # Agent prompts and outputs
+โโโ reports/{date}.json # Daily reports
+โโโ scripts/ # Generated automation scripts
+```
+
+## Model Selection Rules
+
+The `selectModelForTask` function routes tasks to appropriate model tiers:
+
+| Tier | Trigger | Example Tasks |
+|------|---------|---------------|
+| **heavy** | Critical priority, visual analysis, complex reasoning | Architect, refactor, security audit, long context |
+| **medium** | Standard development tasks, default | Most coding tasks, bug fixes, feature implementation |
+| **light** | Documentation-only tasks | Update README, write docs, format text |
+
+**Important**: Light model (haiku) is NEVER used for coding tasks. Tasks containing keywords like `fix`, `bug`, `implement`, `test`, `feature`, `api`, `component`, etc. are automatically routed to medium tier or higher.
+
+## Configuration
+
+| Setting | Default | Description |
+|---------|---------|-------------|
+| evaluationIntervalMs | 60000 | Task evaluation interval (1 minute) |
+| healthCheckIntervalMs | 900000 | Health check interval (15 minutes) |
+| maxConcurrentAgents | 3 | Max parallel agents |
+| maxProcessMemoryMb | 2048 | Memory alert threshold |
+| autoStart | false | Start on server boot |
+| selfImprovementEnabled | true | Allow self-analysis |
+| proactiveMode | true | Always find work when idle |
+| comprehensiveAppImprovement | true | Apply full analysis to managed apps |
+
+## API Endpoints
+
+| Route | Description |
+|-------|-------------|
+| GET /api/cos | Get CoS status |
+| POST /api/cos/start | Start daemon |
+| POST /api/cos/stop | Stop daemon |
+| GET/PUT /api/cos/config | Configuration |
+| GET /api/cos/tasks | Get all tasks |
+| POST /api/cos/evaluate | Force evaluation |
+| GET /api/cos/health | Health status |
+| POST /api/cos/health/check | Run health check |
+| GET /api/cos/agents | List agents |
+| POST /api/cos/agents/:id/terminate | Terminate agent |
+| GET /api/cos/reports | List reports |
+| GET /api/cos/learning | Get learning insights |
+| GET /api/cos/digest | Get weekly digest |
+
+## Prompt Templates
+
+| Template | Purpose |
+|----------|---------|
+| cos-agent-briefing | Brief sub-agent on task |
+| cos-evaluate | Evaluate tasks and decide actions |
+| cos-report-summary | Generate daily summary |
+| cos-self-improvement | Analyze and suggest improvements |
+
+## Related Features
+
+- [Memory System](./memory-system.md)
+- [Task Learning](./task-learning.md)
+- [Self-Improvement](./self-improvement.md)
+- [Error Handling](./error-handling.md)
+- [Scheduled Scripts](./scheduled-scripts.md)
diff --git a/docs/features/cos-agent-runner.md b/docs/features/cos-agent-runner.md
new file mode 100644
index 0000000..40bd60e
--- /dev/null
+++ b/docs/features/cos-agent-runner.md
@@ -0,0 +1,45 @@
+# CoS Agent Runner
+
+Isolated PM2 process for spawning Claude CLI agents, preventing orphaned processes when portos-server restarts.
+
+## Problem
+
+When multiple CoS agents are running and the main portos-server restarts (due to code changes, crashes, or manual restart), child processes spawned via `child_process.spawn()` become orphaned. The parent loses track of them because the `activeAgents` Map is in memory.
+
+## Solution
+
+A separate `portos-cos` PM2 process that:
+1. Runs independently from `portos-server`
+2. Manages agent spawning via HTTP/Socket.IO bridge
+3. Doesn't restart when `portos-server` restarts
+4. Maintains its own state file for PID tracking
+
+## Architecture
+
+```
+โโโโโโโโโโโโโโโโโโโ HTTP/Socket.IO โโโโโโโโโโโโโโโโโโโ
+โ portos-server โ โโโโโโโโโโโโโโโโโโโบ โ portos-cos โ
+โ (5554) โ spawn/terminate โ (5558) โ
+โ โ โโโโโโโโโโโโโโโโโโโ โ โ
+โ subAgentSpawnerโ events/output โ cos-runner โ
+โโโโโโโโโโโโโโโโโโโ โโโโโโโโโโฌโโโโโโโโโ
+ โ
+ โ spawn
+ โผ
+ โโโโโโโโโโโโโโโโโ
+ โ Claude CLI โ
+ โ Processes โ
+ โโโโโโโโโโโโโโโโโ
+```
+
+## Features
+
+- **Process Isolation**: Agent processes survive server restarts
+- **State Persistence**: PIDs tracked in state file for recovery
+- **Bridge Communication**: HTTP/Socket.IO for cross-process messaging
+- **Orphan Detection**: Automatic cleanup of orphaned agent processes
+
+## Related Features
+
+- [Chief of Staff](./chief-of-staff.md) - Main orchestration system
+- [Error Handling](./error-handling.md) - Agent error recovery
diff --git a/docs/features/cos-enhancement.md b/docs/features/cos-enhancement.md
new file mode 100644
index 0000000..64e15d2
--- /dev/null
+++ b/docs/features/cos-enhancement.md
@@ -0,0 +1,126 @@
+# Chief of Staff Enhancement (M35)
+
+Comprehensive upgrade from reactive task executor to proactive autonomous agent with hybrid memory, missions, local model integration, and dynamic thinking levels.
+
+## Architecture
+
+### New Services
+
+| Service | Purpose |
+|---------|---------|
+| `server/lib/bm25.js` | BM25 algorithm with IDF weighting |
+| `server/services/memoryBM25.js` | BM25 index manager for memory search |
+| `server/services/sessionDelta.js` | Session delta tracking for pending bytes/messages |
+| `server/services/toolStateMachine.js` | Tool execution state machine |
+| `server/services/agentGateway.js` | Request deduplication and caching |
+| `server/services/errorRecovery.js` | Error analysis and recovery strategies |
+| `server/services/agentRunCache.js` | Agent output caching with TTL |
+| `server/services/eventScheduler.js` | Cron-based event scheduling |
+| `server/services/executionLanes.js` | Concurrent execution lane management |
+| `server/services/missions.js` | Long-term goal and mission management |
+| `server/services/lmStudioManager.js` | LM Studio model discovery and health |
+| `server/services/localThinking.js` | Local model completions |
+| `server/services/thinkingLevels.js` | Dynamic model selection |
+| `server/services/contextUpgrader.js` | Complexity analysis for model upgrade |
+| `server/services/cosEvolution.js` | Self-evolution and model changes |
+
+## Features
+
+### Phase 1: Hybrid Memory Search
+
+- BM25 algorithm with IDF weighting and inverted index
+- Reciprocal Rank Fusion (RRF) combining BM25 + vector search
+- 40% BM25 / 60% vector weighting for optimal retrieval
+- Session delta tracking for pending bytes/messages
+
+### Phase 2: Proactive Execution
+
+- Event scheduler with cron expressions and timeout-safe timers (clamps to 2^31-1)
+- Execution lanes: critical (1), standard (2), background (3) concurrent slots
+- Mission system for long-term goals with sub-tasks
+- Mission-driven task generation in evaluation loop
+
+### Phase 3: Local Model Integration
+
+- LM Studio availability checking and model discovery
+- Quick completions for local thinking without cloud costs
+- Memory classification using local models
+- Embeddings via local LM Studio
+
+### Phase 4: Dynamic Model Selection
+
+- Thinking levels: off, minimal, low, medium, high, xhigh
+- Level resolution hierarchy: task โ hooks โ agent โ provider
+- Context upgrader with complexity analysis
+- COS self-evolution with automatic model changes
+
+### Phase 5: Agent Architecture
+
+- Tool execution state machine (IDLE โ START โ RUNNING โ UPDATE โ END โ ERROR)
+- Agent gateway with request deduplication and 10-minute cache
+- Error recovery with 6 strategies: retry, escalate, fallback, decompose, defer, investigate
+- Agent run cache for outputs, tool results, and contexts
+
+## Execution Lanes
+
+| Lane | Concurrent Slots | Purpose |
+|------|------------------|---------|
+| critical | 1 | Emergency fixes, blocking issues |
+| standard | 2 | Normal development tasks |
+| background | 3 | Self-improvement, documentation |
+
+## Thinking Levels
+
+| Level | Description | Model Tier |
+|-------|-------------|------------|
+| off | No extended thinking | light |
+| minimal | Brief analysis | light |
+| low | Standard analysis | medium |
+| medium | Thorough analysis | medium |
+| high | Deep analysis | heavy |
+| xhigh | Maximum analysis | heavy |
+
+## Error Recovery Strategies
+
+| Strategy | When Used |
+|----------|-----------|
+| retry | Transient errors (rate limit, timeout) |
+| escalate | Persistent failures requiring human review |
+| fallback | Model unavailable, try alternative |
+| decompose | Complex task, break into subtasks |
+| defer | Non-urgent, schedule for later |
+| investigate | Unknown error, create investigation task |
+
+## API Endpoints
+
+| Route | Description |
+|-------|-------------|
+| GET /api/lmstudio/status | Check LM Studio availability |
+| GET /api/lmstudio/models | List loaded models |
+| POST /api/lmstudio/completion | Local model completion |
+| POST /api/lmstudio/analyze-task | Analyze task complexity |
+| POST /api/lmstudio/classify-memory | Classify memory content |
+
+## Design Decisions
+
+1. **Mission Autonomy**: Full autonomy - COS can implement changes, run tests, commit without approval for managed apps
+2. **Model Usage**: Local-first with LM Studio, no cloud API costs for thinking
+3. **Self-Modification**: Full autonomy - COS can change its own base thinking model without user approval
+
+## Test Coverage
+
+| Test File | Module |
+|-----------|--------|
+| `server/lib/bm25.test.js` | BM25 Algorithm |
+| `server/services/toolStateMachine.test.js` | Tool State Machine |
+| `server/services/thinkingLevels.test.js` | Thinking Levels |
+| `server/services/executionLanes.test.js` | Execution Lanes |
+| `server/services/errorRecovery.test.js` | Error Recovery |
+| `server/services/agentRunCache.test.js` | Agent Run Cache |
+| `server/services/missions.test.js` | Missions Service |
+
+## Related Features
+
+- [Chief of Staff](./chief-of-staff.md) - Core orchestration
+- [Memory System](./memory-system.md) - Memory search integration
+- [Error Handling](./error-handling.md) - Error recovery integration
diff --git a/docs/features/digital-twin.md b/docs/features/digital-twin.md
new file mode 100644
index 0000000..4af4b0b
--- /dev/null
+++ b/docs/features/digital-twin.md
@@ -0,0 +1,134 @@
+# Digital Twin Personality Enhancement
+
+Quantitative personality modeling and prediction system that accurately embodies a human's values, decision patterns, and communication style.
+
+## Vision
+
+Transform the Digital Twin from a document capture system into a quantitative personality modeling and prediction system.
+
+## Architecture
+
+- **Digital Twin Service** (`server/services/digitalTwin.js`): Trait analysis, confidence scoring, gap recommendations
+- **Digital Twin Routes** (`server/routes/digital-twin.js`): REST API endpoints
+- **Digital Twin Validation** (`server/lib/digitalTwinValidation.js`): Zod schemas for trait data
+
+## Features
+
+### Phase 1: Quantitative Personality Modeling (Complete)
+
+**Big Five Trait Scoring**
+- Quantified OCEAN scores (Openness, Conscientiousness, Extraversion, Agreeableness, Neuroticism)
+- Infer scores from existing documents using LLM analysis
+- Allow manual override/adjustment
+- Store in `meta.json` under `traits.bigFive`
+
+**Values Hierarchy**
+- Extract explicit values from VALUES.md and NON_NEGOTIABLES.md
+- Create ranked values list with conflict resolution rules
+- Store in `meta.json` under `traits.valuesHierarchy`
+
+**Communication Fingerprint**
+- Quantify writing style: formality (1-10), verbosity (1-10), emoji usage, sentence length avg
+- Extract from WRITING_STYLE.md and writing samples
+- Store in `meta.json` under `traits.communicationProfile`
+
+### Phase 2: Personality Confidence Scoring (Complete)
+
+**Coverage Metrics**
+- For each Big Five dimension: evidence count from documents
+- For each value: supporting document count + specificity score
+- For communication: sample diversity, consistency across samples
+
+**Confidence Algorithm**
+```
+confidence(aspect) = min(1.0,
+ (evidence_count / required_evidence) *
+ (consistency_score) *
+ (recency_weight)
+)
+```
+
+**Gap Recommendations**
+- Identify lowest-confidence aspects
+- Generate specific questions to fill gaps
+- Prioritize enrichment categories by confidence gap
+
+### Phase 4: External Data Integration (Complete)
+
+Import from external sources to reduce manual input:
+- Goodreads CSV import for reading preferences
+- Spotify/Last.fm for music profile
+- Calendar pattern analysis for routines
+
+## Data Structure
+
+```javascript
+traits: {
+ bigFive: { O: 0.75, C: 0.82, E: 0.45, A: 0.68, N: 0.32 },
+ valuesHierarchy: ["authenticity", "growth", "family", ...],
+ communicationProfile: {
+ formality: 6,
+ verbosity: 4,
+ avgSentenceLength: 18,
+ emojiUsage: "rare",
+ preferredTone: "direct-but-warm"
+ },
+ lastAnalyzed: "2026-01-21T..."
+}
+```
+
+## UI Components
+
+- `PersonalityMap.jsx` - Radar chart of Big Five with confidence coloring
+- `ConfidenceGauge.jsx` - Per-dimension confidence indicator
+- `GapRecommendations.jsx` - Prioritized enrichment suggestions
+- `TraitEditor.jsx` - Manual trait override interface
+
+## API Endpoints
+
+| Route | Description |
+|-------|-------------|
+| GET /api/digital-twin/traits | Get all trait scores |
+| POST /api/digital-twin/traits/analyze | Analyze documents to extract traits |
+| PUT /api/digital-twin/traits/:category | Manual override trait scores |
+| GET /api/digital-twin/confidence | Get confidence scores |
+| POST /api/digital-twin/confidence/calculate | Recalculate confidence |
+| GET /api/digital-twin/gaps | Get gap recommendations |
+
+## Planned Phases
+
+### Phase 3: Behavioral Feedback Loop
+- Response validation: "sounds like me" / "not quite me" ratings
+- Feedback analysis and document improvement suggestions
+- Adaptive document weighting based on feedback patterns
+
+### Phase 5: Multi-Modal Personality Capture
+- Voice analysis for speech patterns
+- Video interview for facial expressions and gestures
+- Comparison of spoken vs written style
+
+### Phase 6: Advanced Behavioral Testing
+- Complex multi-turn conversation scenarios
+- Ethical dilemma tests aligned with stated values
+- Quantitative scoring of communication style match
+- Adversarial testing of boundaries
+
+### Phase 7: Twin Personas & Context Switching
+- Named personas (Professional, Casual, Family, Creative)
+- Blending rules for trait variation per context
+- Per-persona testing
+
+## Success Metrics
+
+| Metric | Current | Target |
+|--------|---------|--------|
+| Behavioral test pass rate | ~70% | >90% |
+| Enrichment category coverage | Manual | Confidence-guided |
+| User feedback: "sounds like me" | N/A | >85% |
+| Time to usable twin | Hours | <30 min |
+| Trait confidence coverage | 0% | >80% across all dimensions |
+
+## Related Features
+
+- [Soul System](./soul-system.md) - Document-based identity management
+- [Chief of Staff](./chief-of-staff.md) - Uses twin context in agent prompts
diff --git a/docs/features/error-handling.md b/docs/features/error-handling.md
new file mode 100644
index 0000000..592499f
--- /dev/null
+++ b/docs/features/error-handling.md
@@ -0,0 +1,77 @@
+# Graceful Error Handling
+
+Enhanced error handling system with automatic recovery and UI notifications.
+
+## Architecture
+
+- **Error Handler** (`server/lib/errorHandler.js`): Centralized error normalization and Socket.IO emission
+- **Auto-Fixer** (`server/services/autoFixer.js`): Automatic agent spawning for critical errors
+- **Socket.IO Integration**: Real-time error notifications to connected clients
+- **Route Protection**: All routes use asyncHandler wrapper for consistent error handling
+
+## Features
+
+1. **Graceful Error Handling**: Server never crashes, all errors caught and handled
+2. **Socket.IO Error Events**: Real-time error notifications to UI with severity and context
+3. **Auto-Fix Tasks**: Critical errors automatically create CoS tasks for agent resolution
+4. **Error Recovery UI**: Client can request manual error recovery via Socket.IO
+5. **Process Error Handlers**: Unhandled rejections and exceptions trigger auto-fix
+6. **Error Deduplication**: Prevents duplicate auto-fix tasks within 1-minute window
+
+## Error Severity Levels
+
+| Severity | Description | Auto-Fix |
+|----------|-------------|----------|
+| warning | Non-critical issues | No |
+| error | Server errors, failures | No |
+| critical | System-threatening errors | Yes |
+
+## Socket.IO Events
+
+| Event | Direction | Payload |
+|-------|-----------|---------|
+| error:occurred | Server โ Client | Error details with severity, code, timestamp |
+| system:critical-error | Server โ Client | Critical errors only |
+| error:notified | Server โ Subscribers | Error notification to subscribed clients |
+| errors:subscribe | Client โ Server | Subscribe to error events |
+| errors:unsubscribe | Client โ Server | Unsubscribe from error events |
+| error:recover | Client โ Server | Request manual error recovery |
+| error:recover:requested | Server โ Client | Recovery task created confirmation |
+
+## Auto-Fix Flow
+
+1. Error occurs in route or service
+2. `asyncHandler` catches and normalizes error
+3. Error emitted to `errorEvents` EventEmitter
+4. `autoFixer` checks if error should trigger auto-fix
+5. If yes, creates CoS task with error context
+6. Socket.IO broadcasts error to all connected clients
+7. CoS evaluates and spawns agent to fix the error
+8. Agent analyzes, fixes, and reports back
+
+## Error Context
+
+Errors include rich context for debugging:
+- Error code and message
+- HTTP status code
+- Timestamp
+- Stack trace (for 500+ errors)
+- Custom context object
+- Severity level
+- Auto-fix flag
+
+## Implementation Files
+
+| File | Purpose |
+|------|---------|
+| `server/lib/errorHandler.js` | Error classes, asyncHandler, middleware |
+| `server/services/autoFixer.js` | Auto-fix task creation and deduplication |
+| `server/services/socket.js` | Socket.IO error event forwarding |
+| `server/routes/*.js` | All routes use asyncHandler wrapper |
+| `client/src/hooks/useErrorNotifications.js` | Client-side error event handler with toast notifications |
+| `client/src/components/Layout.jsx` | Mounts error notification hook for app-wide coverage |
+
+## Related Features
+
+- [Chief of Staff](./chief-of-staff.md)
+- [Autofixer](./autofixer.md)
diff --git a/docs/features/memory-system.md b/docs/features/memory-system.md
new file mode 100644
index 0000000..0172ce5
--- /dev/null
+++ b/docs/features/memory-system.md
@@ -0,0 +1,188 @@
+# Memory System
+
+Semantic memory system for the Chief of Staff that stores facts, learnings, observations, decisions, and user preferences with vector embeddings for intelligent retrieval.
+
+## Architecture
+
+- **Memory Service** (`server/services/memory.js`): Core CRUD, search, and lifecycle operations
+- **Embeddings Service** (`server/services/memoryEmbeddings.js`): LM Studio integration for vector generation
+- **Memory Extractor** (`server/services/memoryExtractor.js`): Extract memories from agent output
+- **Memory Classifier** (`server/services/memoryClassifier.js`): LLM-based quality filtering
+- **Memory Retriever** (`server/services/memoryRetriever.js`): Context injection for agent prompts
+- **Memory Routes** (`server/routes/memory.js`): REST API endpoints
+- **Memory Tab** (`ChiefOfStaff.jsx`): UI with list, timeline, and graph views
+
+## Features
+
+1. **Six Memory Types**: fact, learning, observation, decision, preference, context
+2. **Semantic Search**: LM Studio embeddings for similarity-based retrieval
+3. **LLM Classification**: Intelligent memory extraction with quality filtering (M31)
+4. **Auto-Extraction**: Memories extracted from successful agent task completions
+5. **Auto-Injection**: Relevant memories injected into agent prompts before execution
+6. **Importance Decay**: Time-based decay with access-based boosts
+7. **Memory Consolidation**: Merge similar memories automatically
+8. **Real-time Updates**: WebSocket events for memory changes
+9. **Graph Visualization**: D3.js relationship graph (planned)
+
+## Memory Schema
+
+```javascript
+{
+ id: string, // UUID
+ type: 'fact' | 'learning' | 'observation' | 'decision' | 'preference' | 'context',
+ content: string, // Full memory content
+ summary: string, // Short summary
+ category: string, // e.g., 'codebase', 'workflow', 'tools'
+ tags: string[], // Auto-extracted and user-defined
+ relatedMemories: string[], // Linked memory IDs
+ sourceTaskId: string, // Origin task
+ sourceAgentId: string, // Origin agent
+ embedding: number[], // Vector (768 dims for nomic-embed)
+ confidence: number, // 0.0-1.0
+ importance: number, // 0.0-1.0 (decays over time)
+ accessCount: number,
+ lastAccessed: string,
+ createdAt: string,
+ status: 'active' | 'archived' | 'expired'
+}
+```
+
+## Data Storage
+
+```
+./data/cos/memory/
+โโโ index.json # Lightweight metadata for listing/filtering
+โโโ embeddings.json # Vector storage for semantic search
+โโโ memories/ # Full memory content
+ โโโ {id}/
+ โโโ memory.json
+```
+
+## LLM-Based Classification (M31)
+
+The memory classifier uses LM Studio's gptoss-20b model to intelligently evaluate agent output and extract only genuinely useful memories.
+
+### Good Memories
+
+- Codebase facts: File locations, architecture patterns, dependencies
+- User preferences: Coding style, tool preferences, workflow patterns
+- Learnings: Discovered behaviors, gotchas, workarounds
+- Decisions: Architectural choices with reasoning
+
+### Rejected Memories
+
+- Task echoes: Just restating what the task was
+- Generic summaries: "The task was successful"
+- Temporary info: Session-specific data, timestamps
+- Truncated/incomplete content
+
+### Configuration
+
+```json
+{
+ "enabled": true,
+ "provider": "lmstudio",
+ "endpoint": "http://localhost:1234/v1/chat/completions",
+ "model": "gptoss-20b",
+ "timeout": 60000,
+ "maxOutputLength": 10000,
+ "minConfidence": 0.6,
+ "fallbackToPatterns": true
+}
+```
+
+## Memory Extraction
+
+Memories are extracted from agent output:
+
+1. **LLM Classification**: gptoss-20b analyzes task and output, extracts quality memories
+2. **Fallback Patterns**: If LLM unavailable, falls back to pattern matching
+3. **High confidence (>0.8)**: Auto-saved
+4. **Medium confidence (0.5-0.8)**: Queued for user approval
+
+## Memory Injection
+
+Before agent task execution:
+
+1. Generate embedding for task description
+2. Find semantically similar memories (>0.7 relevance)
+3. Include high-importance user preferences
+4. Include relevant codebase facts
+5. Format as markdown section in agent prompt
+
+## API Endpoints
+
+| Route | Description |
+|-------|-------------|
+| GET /api/memory | List memories with filters |
+| GET /api/memory/:id | Get single memory |
+| POST /api/memory | Create memory |
+| PUT /api/memory/:id | Update memory |
+| DELETE /api/memory/:id | Delete (soft) memory |
+| POST /api/memory/search | Semantic search |
+| GET /api/memory/categories | List categories |
+| GET /api/memory/tags | List tags |
+| GET /api/memory/timeline | Timeline view data |
+| GET /api/memory/graph | Graph visualization data |
+| GET /api/memory/stats | Memory statistics |
+| POST /api/memory/link | Link two memories |
+| POST /api/memory/consolidate | Merge similar memories |
+| POST /api/memory/decay | Apply importance decay |
+| DELETE /api/memory/expired | Clear expired memories |
+| GET /api/memory/embeddings/status | LM Studio connection status |
+
+## WebSocket Events
+
+| Event | Description |
+|-------|-------------|
+| cos:memory:created | New memory created |
+| cos:memory:updated | Memory updated |
+| cos:memory:deleted | Memory deleted |
+| cos:memory:extracted | Memories extracted from agent |
+| cos:memory:approval-needed | Medium-confidence memories pending approval |
+
+## Setup Requirements
+
+**LM Studio** must be running with an embedding model loaded:
+
+1. Download and install [LM Studio](https://lmstudio.ai/)
+2. Load an embedding model: `text-embedding-nomic-embed-text-v2-moe` (recommended)
+3. Load a classification model: `gptoss-20b` or similar
+4. Start the local server on port 1234 (default)
+5. The memory system will automatically connect
+
+## LM Studio Configuration
+
+```javascript
+memory: {
+ enabled: true,
+ embeddingProvider: 'lmstudio',
+ embeddingEndpoint: 'http://localhost:1234/v1/embeddings',
+ embeddingModel: 'text-embedding-nomic-embed-text-v2-moe',
+ embeddingDimension: 768,
+ maxContextTokens: 2000,
+ minRelevanceThreshold: 0.7,
+ autoExtractEnabled: true
+}
+```
+
+## Implementation Files
+
+| File | Purpose |
+|------|---------|
+| `server/lib/memoryValidation.js` | Zod schemas for memory operations |
+| `server/lib/vectorMath.js` | Cosine similarity, clustering helpers |
+| `server/services/memory.js` | Core CRUD, search, lifecycle |
+| `server/services/memoryEmbeddings.js` | LM Studio embedding generation |
+| `server/services/memoryExtractor.js` | Extract memories from agent output |
+| `server/services/memoryClassifier.js` | LLM-based classification service |
+| `server/services/memoryRetriever.js` | Retrieve and format for injection |
+| `server/routes/memory.js` | REST API endpoints |
+| `client/src/pages/ChiefOfStaff.jsx` | MemoryTab, MemoryTimeline, MemoryGraph |
+| `client/src/services/api.js` | Memory API client functions |
+| `data/prompts/stages/memory-evaluate.md` | Memory evaluation prompt template |
+
+## Related Features
+
+- [Chief of Staff](./chief-of-staff.md)
+- [Brain System](./brain-system.md)
diff --git a/docs/features/prompt-manager.md b/docs/features/prompt-manager.md
new file mode 100644
index 0000000..cfb7c3c
--- /dev/null
+++ b/docs/features/prompt-manager.md
@@ -0,0 +1,61 @@
+# Prompt Manager
+
+Customizable AI prompts for all backend AI operations with file-based storage and template rendering.
+
+## Architecture
+
+- **Prompt Service** (`server/services/prompts.js`): Template loading, variable substitution, stage configuration
+- **Prompt Routes** (`server/routes/prompts.js`): REST API endpoints
+- **Prompt Page** (`client/src/pages/Prompts.jsx`): Stages, Variables, Elements tabs with live preview
+
+## Features
+
+1. **Prompt Stages**: Define different prompts for different AI tasks (detection, analysis, etc.)
+2. **Variables**: Reusable content blocks (personas, formats, constraints)
+3. **Per-Stage Provider Config**: Each stage can use different AI providers/models
+4. **Web UI**: Edit prompts, variables, and preview rendered output
+5. **Template Syntax**: `{{variable}}`, `{{#condition}}...{{/condition}}`, arrays
+
+## Directory Structure
+
+```
+./data/prompts/
+โโโ stages/ # Individual prompt templates (.md files)
+โ โโโ app-detection.md
+โ โโโ code-analysis.md
+โ โโโ ...
+โโโ variables.json # Reusable prompt variables
+โโโ stage-config.json # Stage metadata and provider config
+```
+
+## Template Syntax
+
+Templates use Mustache-like syntax:
+
+- `{{variable}}` - Simple variable substitution
+- `{{#condition}}...{{/condition}}` - Conditional blocks
+- `{{#array}}...{{/array}}` - Array iteration
+
+## API Endpoints
+
+| Route | Description |
+|-------|-------------|
+| GET /api/prompts | List all prompt stages |
+| GET /api/prompts/:stage | Get stage template |
+| PUT /api/prompts/:stage | Update stage/template |
+| POST /api/prompts/:stage/preview | Preview compiled prompt |
+| GET /api/prompts/variables | List all variables |
+| PUT /api/prompts/variables/:key | Update variable |
+| POST /api/prompts/variables | Create variable |
+| DELETE /api/prompts/variables/:key | Delete variable |
+
+## UI
+
+- `/prompts` - Prompt Manager with tabs for Stages, Variables, Elements
+- Live preview with test variables
+- Insert variable references
+
+## Related Features
+
+- [Chief of Staff](./chief-of-staff.md) - Uses prompts for agent briefings
+- [Memory System](./memory-system.md) - Uses prompts for memory evaluation
diff --git a/docs/features/soul-system.md b/docs/features/soul-system.md
new file mode 100644
index 0000000..a409b9f
--- /dev/null
+++ b/docs/features/soul-system.md
@@ -0,0 +1,112 @@
+# Soul System
+
+Digital twin identity scaffold management for creating and testing aligned AI personas.
+
+## Overview
+
+LLMs can embody specific personas, but creating comprehensive identity documents and testing alignment across different models is manual and error-prone. The Soul System provides a structured approach to capturing, validating, and deploying personality models.
+
+## Architecture
+
+- **Digital Twin Service** (`server/services/digitalTwin.js`): Document management, testing, enrichment
+- **Digital Twin Routes** (`server/routes/digital-twin.js`): REST API endpoints mounted under `/api/digital-twin/*`
+- **Digital Twin Page** (`client/src/pages/DigitalTwin.jsx`): Overview, Documents, Test, Enrich, Export tabs
+
+## Features
+
+### Five-Tab Interface
+
+1. **Overview Tab**: Dashboard showing soul health score, document counts, test scores, enrichment progress, and quick actions
+2. **Documents Tab**: Sidebar-based document editor for managing soul markdown files by category (core, audio, behavioral, enrichment)
+3. **Test Tab**: Multi-model behavioral testing against 14 predefined tests, with side-by-side result comparison
+4. **Enrich Tab**: Guided questionnaire across 10 categories that generates soul document content from answers
+5. **Export Tab**: Export soul for use in external LLMs (System Prompt, CLAUDE.md, JSON, individual files)
+
+### CoS Integration
+
+- Soul context automatically injected into agent prompts when enabled
+- Settings control `autoInjectToCoS` and `maxContextTokens`
+- Prompt template `cos-agent-briefing.md` includes `{{soulSection}}`
+
+## Directory Structure
+
+```
+data/digital-twin/
+โโโ meta.json # Document metadata, test history, settings
+โโโ SOUL.md # Core identity
+โโโ Expanded.md # High-fidelity spec
+โโโ BEHAVIORAL_TEST_SUITE.md # 14 behavioral tests
+โโโ AUDIO*.md # Audio preferences
+โโโ MEMORIES.md # Generated via enrichment
+โโโ FAVORITES.md # Generated via enrichment
+โโโ PREFERENCES.md # Generated via enrichment
+```
+
+## Enrichment Categories
+
+| Category | Description |
+|----------|-------------|
+| Core Memories | Formative experiences |
+| Favorite Books | Books that shaped thinking |
+| Favorite Movies | Films that resonate |
+| Music Taste | Cognitive infrastructure |
+| Communication | How to give/receive info |
+| Decision Making | Approach to choices |
+| Values | Core principles |
+| Aesthetics | Visual preferences |
+| Daily Routines | Structure habits |
+| Career/Skills | Professional expertise |
+
+### Additional Categories (M33.1)
+
+- **non_negotiables**: Principles and boundaries that define your limits
+- **decision_heuristics**: Mental models and shortcuts for making choices
+- **error_intolerance**: What your digital twin should never do
+
+## Validation & Analysis
+
+1. **Completeness Validator**: Checks for 6 required sections (identity, values, communication, decision making, non-negotiables, error intolerance), shows percentage complete with actionable suggestions
+2. **Contradiction Detector**: AI-powered analysis to find inconsistencies between soul documents, with severity levels and resolution suggestions
+
+## Dynamic Testing
+
+- AI generates behavioral tests based on soul content
+- Targets values, communication style, non-negotiables, and decision patterns
+- Returns structured tests with prompts, expected behaviors, and failure signals
+
+## Writing Sample Analysis
+
+- Paste writing samples to extract authentic voice patterns
+- Analyzes: sentence structure, vocabulary, formality, tone, distinctive markers
+- Generates WRITING_STYLE.md document content
+
+## Context Optimization
+
+- **Document Weighting**: Priority slider (1-10) on each document
+- Higher weighted documents preserved first when context limits force truncation
+
+## API Endpoints
+
+| Route | Description |
+|-------|-------------|
+| GET /api/digital-twin | Status summary |
+| GET /api/digital-twin/documents | List documents |
+| POST /api/digital-twin/documents | Create document |
+| PUT /api/digital-twin/documents/:id | Update document |
+| DELETE /api/digital-twin/documents/:id | Delete document |
+| GET /api/digital-twin/tests | Get test suite |
+| POST /api/digital-twin/tests/run | Run single-model tests |
+| POST /api/digital-twin/tests/run-multi | Run multi-model tests |
+| GET /api/digital-twin/enrich/categories | List enrichment categories |
+| POST /api/digital-twin/enrich/question | Get next question |
+| POST /api/digital-twin/enrich/answer | Submit answer |
+| POST /api/digital-twin/export | Export soul |
+| GET /api/digital-twin/validate/completeness | Check soul completeness |
+| POST /api/digital-twin/validate/contradictions | Detect contradictions |
+| POST /api/digital-twin/tests/generate | Generate dynamic tests |
+| POST /api/digital-twin/analyze-writing | Analyze writing samples |
+
+## Related Features
+
+- [Digital Twin](./digital-twin.md) - Quantitative personality modeling
+- [Chief of Staff](./chief-of-staff.md) - Uses soul context in agent prompts
diff --git a/ecosystem.config.cjs b/ecosystem.config.cjs
index 5117689..0f47d72 100644
--- a/ecosystem.config.cjs
+++ b/ecosystem.config.cjs
@@ -53,7 +53,7 @@ module.exports = {
},
{
name: 'portos-ui',
- script: 'node_modules/.bin/vite',
+ script: `${__dirname}/node_modules/.bin/vite`,
cwd: `${__dirname}/client`,
args: `--host 0.0.0.0 --port ${PORTS.UI}`,
env: {
diff --git a/package-lock.json b/package-lock.json
index bc884bf..0b63864 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1,1185 +1,6734 @@
{
"name": "portos",
- "version": "0.8.10",
+ "version": "0.9.19",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "portos",
- "version": "0.8.10",
+ "version": "0.9.19",
+ "license": "MIT",
+ "workspaces": [
+ "packages/*",
+ "server",
+ "client"
+ ],
"dependencies": {
- "express": "^5.2.1"
+ "express": "^5.2.1",
+ "portos-ai-toolkit": "github:atomantic/portos-ai-toolkit#v0.2.0"
},
"devDependencies": {
- "concurrently": "^8.2.2"
+ "@vitejs/plugin-react": "^4.3.4",
+ "autoprefixer": "^10.4.20",
+ "concurrently": "^8.2.2",
+ "postcss": "^8.4.49",
+ "tailwindcss": "^3.4.17",
+ "vite": "^6.0.6"
}
},
- "node_modules/@babel/runtime": {
- "version": "7.28.4",
- "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz",
- "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==",
+ "client": {
+ "name": "portos-client",
+ "version": "0.9.19",
+ "dependencies": {
+ "@dnd-kit/core": "^6.3.1",
+ "@dnd-kit/sortable": "^10.0.0",
+ "@dnd-kit/utilities": "^3.2.2",
+ "lucide-react": "^0.562.0",
+ "react": "^18.3.1",
+ "react-dom": "^18.3.1",
+ "react-hot-toast": "^2.6.0",
+ "react-router-dom": "^7.1.1",
+ "socket.io-client": "^4.8.3"
+ },
+ "devDependencies": {
+ "@vitejs/plugin-react": "^4.3.4",
+ "autoprefixer": "^10.4.20",
+ "postcss": "^8.4.49",
+ "tailwindcss": "^3.4.17",
+ "vite": "^6.0.6"
+ }
+ },
+ "node_modules/@alloc/quick-lru": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz",
+ "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==",
"dev": true,
"license": "MIT",
"engines": {
- "node": ">=6.9.0"
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/accepts": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz",
- "integrity": "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==",
+ "node_modules/@babel/code-frame": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz",
+ "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==",
+ "dev": true,
"license": "MIT",
"dependencies": {
- "mime-types": "^3.0.0",
- "negotiator": "^1.0.0"
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.1.1"
},
"engines": {
- "node": ">= 0.6"
+ "node": ">=6.9.0"
}
},
- "node_modules/ansi-regex": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
- "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "node_modules/@babel/compat-data": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz",
+ "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==",
"dev": true,
"license": "MIT",
"engines": {
- "node": ">=8"
+ "node": ">=6.9.0"
}
},
- "node_modules/ansi-styles": {
- "version": "4.3.0",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
- "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "node_modules/@babel/core": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz",
+ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==",
"dev": true,
"license": "MIT",
"dependencies": {
- "color-convert": "^2.0.1"
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-compilation-targets": "^7.27.2",
+ "@babel/helper-module-transforms": "^7.28.3",
+ "@babel/helpers": "^7.28.4",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/traverse": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/remapping": "^2.3.5",
+ "convert-source-map": "^2.0.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.3",
+ "semver": "^6.3.1"
},
"engines": {
- "node": ">=8"
+ "node": ">=6.9.0"
},
"funding": {
- "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
}
},
- "node_modules/body-parser": {
- "version": "2.2.1",
- "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.1.tgz",
- "integrity": "sha512-nfDwkulwiZYQIGwxdy0RUmowMhKcFVcYXUU7m4QlKYim1rUtg83xm2yjZ40QjDuc291AJjjeSc9b++AWHSgSHw==",
+ "node_modules/@babel/generator": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz",
+ "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==",
+ "dev": true,
"license": "MIT",
"dependencies": {
- "bytes": "^3.1.2",
- "content-type": "^1.0.5",
- "debug": "^4.4.3",
- "http-errors": "^2.0.0",
- "iconv-lite": "^0.7.0",
- "on-finished": "^2.4.1",
- "qs": "^6.14.0",
- "raw-body": "^3.0.1",
- "type-is": "^2.0.1"
+ "@babel/parser": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/gen-mapping": "^0.3.12",
+ "@jridgewell/trace-mapping": "^0.3.28",
+ "jsesc": "^3.0.2"
},
"engines": {
- "node": ">=18"
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz",
+ "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/compat-data": "^7.27.2",
+ "@babel/helper-validator-option": "^7.27.1",
+ "browserslist": "^4.24.0",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.1"
},
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/express"
+ "engines": {
+ "node": ">=6.9.0"
}
},
- "node_modules/bytes": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
- "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
+ "node_modules/@babel/helper-globals": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
+ "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
+ "dev": true,
"license": "MIT",
"engines": {
- "node": ">= 0.8"
+ "node": ">=6.9.0"
}
},
- "node_modules/call-bind-apply-helpers": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
- "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz",
+ "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==",
+ "dev": true,
"license": "MIT",
"dependencies": {
- "es-errors": "^1.3.0",
- "function-bind": "^1.1.2"
+ "@babel/traverse": "^7.27.1",
+ "@babel/types": "^7.27.1"
},
"engines": {
- "node": ">= 0.4"
+ "node": ">=6.9.0"
}
},
- "node_modules/call-bound": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz",
- "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.28.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz",
+ "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==",
+ "dev": true,
"license": "MIT",
"dependencies": {
- "call-bind-apply-helpers": "^1.0.2",
- "get-intrinsic": "^1.3.0"
+ "@babel/helper-module-imports": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "@babel/traverse": "^7.28.3"
},
"engines": {
- "node": ">= 0.4"
+ "node": ">=6.9.0"
},
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
}
},
- "node_modules/chalk": {
- "version": "4.1.2",
- "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
- "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz",
+ "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "ansi-styles": "^4.1.0",
- "supports-color": "^7.1.0"
- },
"engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/chalk/chalk?sponsor=1"
+ "node": ">=6.9.0"
}
},
- "node_modules/chalk/node_modules/supports-color": {
- "version": "7.2.0",
- "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
- "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
+ "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "has-flag": "^4.0.0"
- },
"engines": {
- "node": ">=8"
+ "node": ">=6.9.0"
}
},
- "node_modules/cliui": {
- "version": "8.0.1",
- "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
- "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
+ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
"dev": true,
- "license": "ISC",
- "dependencies": {
- "string-width": "^4.2.0",
- "strip-ansi": "^6.0.1",
- "wrap-ansi": "^7.0.0"
- },
+ "license": "MIT",
"engines": {
- "node": ">=12"
+ "node": ">=6.9.0"
}
},
- "node_modules/color-convert": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
- "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz",
+ "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "color-name": "~1.1.4"
- },
"engines": {
- "node": ">=7.0.0"
+ "node": ">=6.9.0"
}
},
- "node_modules/color-name": {
- "version": "1.1.4",
- "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
- "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "node_modules/@babel/helpers": {
+ "version": "7.28.4",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz",
+ "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==",
"dev": true,
- "license": "MIT"
+ "license": "MIT",
+ "dependencies": {
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
},
- "node_modules/concurrently": {
- "version": "8.2.2",
- "resolved": "https://registry.npmjs.org/concurrently/-/concurrently-8.2.2.tgz",
- "integrity": "sha512-1dP4gpXFhei8IOtlXRE/T/4H88ElHgTiUzh71YUmtjTEHMSRS2Z/fgOxHSxxusGHogsRfxNq1vyAwxSC+EVyDg==",
+ "node_modules/@babel/parser": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
+ "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
"dev": true,
"license": "MIT",
"dependencies": {
- "chalk": "^4.1.2",
- "date-fns": "^2.30.0",
- "lodash": "^4.17.21",
- "rxjs": "^7.8.1",
- "shell-quote": "^1.8.1",
- "spawn-command": "0.0.2",
- "supports-color": "^8.1.1",
- "tree-kill": "^1.2.2",
- "yargs": "^17.7.2"
+ "@babel/types": "^7.28.5"
},
"bin": {
- "conc": "dist/bin/concurrently.js",
- "concurrently": "dist/bin/concurrently.js"
+ "parser": "bin/babel-parser.js"
},
"engines": {
- "node": "^14.13.0 || >=16.0.0"
- },
- "funding": {
- "url": "https://github.com/open-cli-tools/concurrently?sponsor=1"
+ "node": ">=6.0.0"
}
},
- "node_modules/content-disposition": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.1.tgz",
- "integrity": "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==",
+ "node_modules/@babel/plugin-transform-react-jsx-self": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz",
+ "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==",
+ "dev": true,
"license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
"engines": {
- "node": ">=18"
+ "node": ">=6.9.0"
},
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/express"
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
}
},
- "node_modules/content-type": {
- "version": "1.0.5",
- "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
- "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
+ "node_modules/@babel/plugin-transform-react-jsx-source": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz",
+ "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==",
+ "dev": true,
"license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
"engines": {
- "node": ">= 0.6"
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
}
},
- "node_modules/cookie": {
- "version": "0.7.2",
- "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz",
- "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==",
+ "node_modules/@babel/runtime": {
+ "version": "7.28.4",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz",
+ "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==",
+ "dev": true,
"license": "MIT",
"engines": {
- "node": ">= 0.6"
+ "node": ">=6.9.0"
}
},
- "node_modules/cookie-signature": {
- "version": "1.2.2",
- "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz",
- "integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==",
+ "node_modules/@babel/template": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
+ "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
+ "dev": true,
"license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/parser": "^7.27.2",
+ "@babel/types": "^7.27.1"
+ },
"engines": {
- "node": ">=6.6.0"
+ "node": ">=6.9.0"
}
},
- "node_modules/date-fns": {
- "version": "2.30.0",
- "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz",
- "integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==",
+ "node_modules/@babel/traverse": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz",
+ "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@babel/runtime": "^7.21.0"
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-globals": "^7.28.0",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.5",
+ "debug": "^4.3.1"
},
"engines": {
- "node": ">=0.11"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/date-fns"
+ "node": ">=6.9.0"
}
},
- "node_modules/debug": {
- "version": "4.4.3",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
- "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "node_modules/@babel/types": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
+ "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
+ "dev": true,
"license": "MIT",
"dependencies": {
- "ms": "^2.1.3"
+ "@babel/helper-string-parser": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.28.5"
},
"engines": {
- "node": ">=6.0"
- },
- "peerDependenciesMeta": {
- "supports-color": {
- "optional": true
- }
+ "node": ">=6.9.0"
}
},
- "node_modules/depd": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
- "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "node_modules/@bcoe/v8-coverage": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-1.0.2.tgz",
+ "integrity": "sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==",
+ "dev": true,
"license": "MIT",
"engines": {
- "node": ">= 0.8"
+ "node": ">=18"
}
},
- "node_modules/dunder-proto": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
- "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "node_modules/@dnd-kit/accessibility": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/@dnd-kit/accessibility/-/accessibility-3.1.1.tgz",
+ "integrity": "sha512-2P+YgaXF+gRsIihwwY1gCsQSYnu9Zyj2py8kY5fFvUM1qm2WA2u639R6YNVfU4GWr+ZM5mqEsfHZZLoRONbemw==",
"license": "MIT",
"dependencies": {
- "call-bind-apply-helpers": "^1.0.1",
- "es-errors": "^1.3.0",
- "gopd": "^1.2.0"
+ "tslib": "^2.0.0"
},
- "engines": {
- "node": ">= 0.4"
+ "peerDependencies": {
+ "react": ">=16.8.0"
}
},
- "node_modules/ee-first": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
- "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
- "license": "MIT"
- },
- "node_modules/emoji-regex": {
- "version": "8.0.0",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
- "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
- "dev": true,
- "license": "MIT"
+ "node_modules/@dnd-kit/core": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/@dnd-kit/core/-/core-6.3.1.tgz",
+ "integrity": "sha512-xkGBRQQab4RLwgXxoqETICr6S5JlogafbhNsidmrkVv2YRs5MLwpjoF2qpiGjQt8S9AoxtIV603s0GIUpY5eYQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@dnd-kit/accessibility": "^3.1.1",
+ "@dnd-kit/utilities": "^3.2.2",
+ "tslib": "^2.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=16.8.0",
+ "react-dom": ">=16.8.0"
+ }
},
- "node_modules/encodeurl": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
- "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
+ "node_modules/@dnd-kit/sortable": {
+ "version": "10.0.0",
+ "resolved": "https://registry.npmjs.org/@dnd-kit/sortable/-/sortable-10.0.0.tgz",
+ "integrity": "sha512-+xqhmIIzvAYMGfBYYnbKuNicfSsk4RksY2XdmJhT+HAC01nix6fHCztU68jooFiMUB01Ky3F0FyOvhG/BZrWkg==",
"license": "MIT",
- "engines": {
- "node": ">= 0.8"
+ "dependencies": {
+ "@dnd-kit/utilities": "^3.2.2",
+ "tslib": "^2.0.0"
+ },
+ "peerDependencies": {
+ "@dnd-kit/core": "^6.3.0",
+ "react": ">=16.8.0"
}
},
- "node_modules/es-define-property": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
- "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "node_modules/@dnd-kit/utilities": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/@dnd-kit/utilities/-/utilities-3.2.2.tgz",
+ "integrity": "sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==",
"license": "MIT",
- "engines": {
- "node": ">= 0.4"
+ "dependencies": {
+ "tslib": "^2.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=16.8.0"
}
},
- "node_modules/es-errors": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
- "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz",
+ "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "aix"
+ ],
"engines": {
- "node": ">= 0.4"
+ "node": ">=18"
}
},
- "node_modules/es-object-atoms": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
- "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz",
+ "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
"license": "MIT",
- "dependencies": {
- "es-errors": "^1.3.0"
- },
+ "optional": true,
+ "os": [
+ "android"
+ ],
"engines": {
- "node": ">= 0.4"
+ "node": ">=18"
}
},
- "node_modules/escalade": {
- "version": "3.2.0",
- "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
- "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz",
+ "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==",
+ "cpu": [
+ "arm64"
+ ],
"dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
"engines": {
- "node": ">=6"
+ "node": ">=18"
}
},
- "node_modules/escape-html": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
- "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==",
- "license": "MIT"
- },
- "node_modules/etag": {
- "version": "1.8.1",
- "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
- "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz",
+ "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
"engines": {
- "node": ">= 0.6"
+ "node": ">=18"
}
},
- "node_modules/express": {
- "version": "5.2.1",
- "resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz",
- "integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==",
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz",
+ "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
"license": "MIT",
- "dependencies": {
- "accepts": "^2.0.0",
- "body-parser": "^2.2.1",
- "content-disposition": "^1.0.0",
- "content-type": "^1.0.5",
- "cookie": "^0.7.1",
- "cookie-signature": "^1.2.1",
- "debug": "^4.4.0",
- "depd": "^2.0.0",
- "encodeurl": "^2.0.0",
- "escape-html": "^1.0.3",
- "etag": "^1.8.1",
- "finalhandler": "^2.1.0",
- "fresh": "^2.0.0",
- "http-errors": "^2.0.0",
- "merge-descriptors": "^2.0.0",
- "mime-types": "^3.0.0",
- "on-finished": "^2.4.1",
- "once": "^1.4.0",
- "parseurl": "^1.3.3",
- "proxy-addr": "^2.0.7",
- "qs": "^6.14.0",
- "range-parser": "^1.2.1",
- "router": "^2.2.0",
- "send": "^1.1.0",
- "serve-static": "^2.2.0",
- "statuses": "^2.0.1",
- "type-is": "^2.0.1",
- "vary": "^1.1.2"
- },
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
"engines": {
- "node": ">= 18"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/express"
+ "node": ">=18"
}
},
- "node_modules/finalhandler": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.1.tgz",
- "integrity": "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==",
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz",
+ "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
"license": "MIT",
- "dependencies": {
- "debug": "^4.4.0",
- "encodeurl": "^2.0.0",
- "escape-html": "^1.0.3",
- "on-finished": "^2.4.1",
- "parseurl": "^1.3.3",
- "statuses": "^2.0.1"
- },
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
"engines": {
- "node": ">= 18.0.0"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/express"
+ "node": ">=18"
}
},
- "node_modules/forwarded": {
- "version": "0.2.0",
- "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
- "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz",
+ "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
"engines": {
- "node": ">= 0.6"
+ "node": ">=18"
}
},
- "node_modules/fresh": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz",
- "integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==",
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz",
+ "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
"engines": {
- "node": ">= 0.8"
+ "node": ">=18"
}
},
- "node_modules/function-bind": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
- "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz",
+ "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
"license": "MIT",
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
}
},
- "node_modules/get-caller-file": {
- "version": "2.0.5",
- "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
- "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz",
+ "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==",
+ "cpu": [
+ "arm64"
+ ],
"dev": true,
- "license": "ISC",
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
"engines": {
- "node": "6.* || 8.* || >= 10.*"
+ "node": ">=18"
}
},
- "node_modules/get-intrinsic": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
- "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz",
+ "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
"license": "MIT",
- "dependencies": {
- "call-bind-apply-helpers": "^1.0.2",
- "es-define-property": "^1.0.1",
- "es-errors": "^1.3.0",
- "es-object-atoms": "^1.1.1",
- "function-bind": "^1.1.2",
- "get-proto": "^1.0.1",
- "gopd": "^1.2.0",
- "has-symbols": "^1.1.0",
- "hasown": "^2.0.2",
- "math-intrinsics": "^1.1.0"
- },
+ "optional": true,
+ "os": [
+ "linux"
+ ],
"engines": {
- "node": ">= 0.4"
- },
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "node": ">=18"
}
},
- "node_modules/get-proto": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
- "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz",
+ "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
"license": "MIT",
- "dependencies": {
- "dunder-proto": "^1.0.1",
- "es-object-atoms": "^1.0.0"
- },
+ "optional": true,
+ "os": [
+ "linux"
+ ],
"engines": {
- "node": ">= 0.4"
+ "node": ">=18"
}
},
- "node_modules/gopd": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
- "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz",
+ "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
"engines": {
- "node": ">= 0.4"
- },
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "node": ">=18"
}
},
- "node_modules/has-flag": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
- "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz",
+ "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==",
+ "cpu": [
+ "ppc64"
+ ],
"dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
"engines": {
- "node": ">=8"
+ "node": ">=18"
}
},
- "node_modules/has-symbols": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
- "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz",
+ "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
"engines": {
- "node": ">= 0.4"
- },
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "node": ">=18"
}
},
- "node_modules/hasown": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
- "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz",
+ "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
"license": "MIT",
- "dependencies": {
- "function-bind": "^1.1.2"
- },
+ "optional": true,
+ "os": [
+ "linux"
+ ],
"engines": {
- "node": ">= 0.4"
+ "node": ">=18"
}
},
- "node_modules/http-errors": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz",
- "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==",
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz",
+ "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
"license": "MIT",
- "dependencies": {
- "depd": "~2.0.0",
- "inherits": "~2.0.4",
- "setprototypeof": "~1.2.0",
- "statuses": "~2.0.2",
- "toidentifier": "~1.0.1"
- },
+ "optional": true,
+ "os": [
+ "linux"
+ ],
"engines": {
- "node": ">= 0.8"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/express"
+ "node": ">=18"
}
},
- "node_modules/iconv-lite": {
- "version": "0.7.1",
- "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.1.tgz",
- "integrity": "sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw==",
+ "node_modules/@esbuild/netbsd-arm64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz",
+ "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
"license": "MIT",
- "dependencies": {
- "safer-buffer": ">= 2.1.2 < 3.0.0"
- },
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
"engines": {
- "node": ">=0.10.0"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/express"
+ "node": ">=18"
}
},
- "node_modules/inherits": {
- "version": "2.0.4",
- "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
- "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
- "license": "ISC"
- },
- "node_modules/ipaddr.js": {
- "version": "1.9.1",
- "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
- "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz",
+ "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
"engines": {
- "node": ">= 0.10"
+ "node": ">=18"
}
},
- "node_modules/is-fullwidth-code-point": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
- "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "node_modules/@esbuild/openbsd-arm64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz",
+ "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==",
+ "cpu": [
+ "arm64"
+ ],
"dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
"engines": {
- "node": ">=8"
+ "node": ">=18"
}
},
- "node_modules/is-promise": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz",
- "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==",
- "license": "MIT"
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz",
+ "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
},
- "node_modules/lodash": {
- "version": "4.17.21",
- "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
- "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
+ "node_modules/@esbuild/openharmony-arm64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz",
+ "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==",
+ "cpu": [
+ "arm64"
+ ],
"dev": true,
- "license": "MIT"
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
},
- "node_modules/math-intrinsics": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
- "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz",
+ "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
"engines": {
- "node": ">= 0.4"
+ "node": ">=18"
}
},
- "node_modules/media-typer": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-1.1.0.tgz",
- "integrity": "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==",
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz",
+ "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
"engines": {
- "node": ">= 0.8"
+ "node": ">=18"
}
},
- "node_modules/merge-descriptors": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-2.0.0.tgz",
- "integrity": "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==",
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz",
+ "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
"engines": {
"node": ">=18"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/mime-db": {
- "version": "1.54.0",
- "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz",
- "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==",
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz",
+ "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
"license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
"engines": {
- "node": ">= 0.6"
+ "node": ">=18"
}
},
- "node_modules/mime-types": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz",
- "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==",
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.13",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
+ "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
+ "dev": true,
"license": "MIT",
"dependencies": {
- "mime-db": "^1.54.0"
- },
+ "@jridgewell/sourcemap-codec": "^1.5.0",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/remapping": {
+ "version": "2.3.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
+ "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+ "dev": true,
+ "license": "MIT",
"engines": {
- "node": ">=18"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/express"
+ "node": ">=6.0.0"
}
},
- "node_modules/ms": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
- "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
+ "dev": true,
"license": "MIT"
},
- "node_modules/negotiator": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz",
- "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==",
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.31",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
+ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
+ "dev": true,
"license": "MIT",
- "engines": {
- "node": ">= 0.6"
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
}
},
- "node_modules/object-inspect": {
- "version": "1.13.4",
- "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz",
- "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
+ "node_modules/@noble/hashes": {
+ "version": "1.8.0",
+ "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz",
+ "integrity": "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==",
+ "dev": true,
"license": "MIT",
"engines": {
- "node": ">= 0.4"
+ "node": "^14.21.3 || >=16"
},
"funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "url": "https://paulmillr.com/funding/"
}
},
- "node_modules/on-finished": {
- "version": "2.4.1",
- "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
- "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "node_modules/@nodelib/fs.scandir": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+ "dev": true,
"license": "MIT",
"dependencies": {
- "ee-first": "1.1.1"
+ "@nodelib/fs.stat": "2.0.5",
+ "run-parallel": "^1.1.9"
},
"engines": {
- "node": ">= 0.8"
+ "node": ">= 8"
}
},
- "node_modules/once": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
- "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
- "license": "ISC",
- "dependencies": {
- "wrappy": "1"
+ "node_modules/@nodelib/fs.stat": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 8"
}
},
- "node_modules/parseurl": {
- "version": "1.3.3",
- "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
- "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
+ "node_modules/@nodelib/fs.walk": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+ "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+ "dev": true,
"license": "MIT",
+ "dependencies": {
+ "@nodelib/fs.scandir": "2.1.5",
+ "fastq": "^1.6.0"
+ },
"engines": {
- "node": ">= 0.8"
+ "node": ">= 8"
}
},
- "node_modules/path-to-regexp": {
- "version": "8.3.0",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz",
- "integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==",
+ "node_modules/@paralleldrive/cuid2": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/@paralleldrive/cuid2/-/cuid2-2.3.1.tgz",
+ "integrity": "sha512-XO7cAxhnTZl0Yggq6jOgjiOHhbgcO4NqFqwSmQpjK3b6TEE6Uj/jfSk6wzYyemh3+I0sHirKSetjQwn5cZktFw==",
+ "dev": true,
"license": "MIT",
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/express"
+ "dependencies": {
+ "@noble/hashes": "^1.1.5"
}
},
- "node_modules/proxy-addr": {
- "version": "2.0.7",
- "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
- "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
+ "node_modules/@pm2/agent": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/@pm2/agent/-/agent-2.0.4.tgz",
+ "integrity": "sha512-n7WYvvTJhHLS2oBb1PjOtgLpMhgImOq8sXkPBw6smeg9LJBWZjiEgPKOpR8mn9UJZsB5P3W4V/MyvNnp31LKeA==",
+ "license": "AGPL-3.0",
+ "dependencies": {
+ "async": "~3.2.0",
+ "chalk": "~3.0.0",
+ "dayjs": "~1.8.24",
+ "debug": "~4.3.1",
+ "eventemitter2": "~5.0.1",
+ "fast-json-patch": "^3.0.0-1",
+ "fclone": "~1.0.11",
+ "nssocket": "0.6.0",
+ "pm2-axon": "~4.0.1",
+ "pm2-axon-rpc": "~0.7.0",
+ "proxy-agent": "~6.3.0",
+ "semver": "~7.5.0",
+ "ws": "~7.5.10"
+ }
+ },
+ "node_modules/@pm2/agent/node_modules/chalk": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz",
+ "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==",
"license": "MIT",
"dependencies": {
- "forwarded": "0.2.0",
- "ipaddr.js": "1.9.1"
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
},
"engines": {
- "node": ">= 0.10"
+ "node": ">=8"
}
},
- "node_modules/qs": {
- "version": "6.14.1",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.1.tgz",
- "integrity": "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==",
- "license": "BSD-3-Clause",
+ "node_modules/@pm2/agent/node_modules/dayjs": {
+ "version": "1.8.36",
+ "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.8.36.tgz",
+ "integrity": "sha512-3VmRXEtw7RZKAf+4Tv1Ym9AGeo8r8+CjDi26x+7SYQil1UqtqdaokhzoEJohqlzt0m5kacJSDhJQkG/LWhpRBw==",
+ "license": "MIT"
+ },
+ "node_modules/@pm2/agent/node_modules/debug": {
+ "version": "4.3.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz",
+ "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==",
+ "license": "MIT",
"dependencies": {
- "side-channel": "^1.1.0"
+ "ms": "^2.1.3"
},
"engines": {
- "node": ">=0.6"
+ "node": ">=6.0"
},
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
}
},
- "node_modules/range-parser": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
- "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
- "license": "MIT",
+ "node_modules/@pm2/agent/node_modules/lru-cache": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
+ "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
+ "license": "ISC",
+ "dependencies": {
+ "yallist": "^4.0.0"
+ },
"engines": {
- "node": ">= 0.6"
+ "node": ">=10"
}
},
- "node_modules/raw-body": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.2.tgz",
- "integrity": "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==",
- "license": "MIT",
+ "node_modules/@pm2/agent/node_modules/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "license": "ISC",
"dependencies": {
- "bytes": "~3.1.2",
- "http-errors": "~2.0.1",
- "iconv-lite": "~0.7.0",
- "unpipe": "~1.0.0"
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
},
"engines": {
- "node": ">= 0.10"
+ "node": ">=10"
}
},
- "node_modules/require-directory": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
- "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
- "dev": true,
+ "node_modules/@pm2/agent/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"license": "MIT",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
"engines": {
- "node": ">=0.10.0"
+ "node": ">=8"
}
},
- "node_modules/router": {
- "version": "2.2.0",
- "resolved": "https://registry.npmjs.org/router/-/router-2.2.0.tgz",
- "integrity": "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==",
- "license": "MIT",
+ "node_modules/@pm2/agent/node_modules/yallist": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "license": "ISC"
+ },
+ "node_modules/@pm2/io": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/@pm2/io/-/io-6.0.1.tgz",
+ "integrity": "sha512-KiA+shC6sULQAr9mGZ1pg+6KVW9MF8NpG99x26Lf/082/Qy8qsTCtnJy+HQReW1A9Rdf0C/404cz0RZGZro+IA==",
+ "license": "Apache-2",
"dependencies": {
- "debug": "^4.4.0",
- "depd": "^2.0.0",
- "is-promise": "^4.0.0",
- "parseurl": "^1.3.3",
- "path-to-regexp": "^8.0.0"
+ "async": "~2.6.1",
+ "debug": "~4.3.1",
+ "eventemitter2": "^6.3.1",
+ "require-in-the-middle": "^5.0.0",
+ "semver": "~7.5.4",
+ "shimmer": "^1.2.0",
+ "signal-exit": "^3.0.3",
+ "tslib": "1.9.3"
},
"engines": {
- "node": ">= 18"
+ "node": ">=6.0"
}
},
- "node_modules/rxjs": {
- "version": "7.8.2",
- "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz",
- "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==",
- "dev": true,
- "license": "Apache-2.0",
+ "node_modules/@pm2/io/node_modules/async": {
+ "version": "2.6.4",
+ "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz",
+ "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==",
+ "license": "MIT",
"dependencies": {
- "tslib": "^2.1.0"
+ "lodash": "^4.17.14"
}
},
- "node_modules/safer-buffer": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
- "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
- "license": "MIT"
- },
- "node_modules/send": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/send/-/send-1.2.1.tgz",
- "integrity": "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==",
+ "node_modules/@pm2/io/node_modules/debug": {
+ "version": "4.3.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz",
+ "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==",
"license": "MIT",
"dependencies": {
- "debug": "^4.4.3",
- "encodeurl": "^2.0.0",
- "escape-html": "^1.0.3",
- "etag": "^1.8.1",
- "fresh": "^2.0.0",
- "http-errors": "^2.0.1",
- "mime-types": "^3.0.2",
- "ms": "^2.1.3",
- "on-finished": "^2.4.1",
- "range-parser": "^1.2.1",
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@pm2/io/node_modules/eventemitter2": {
+ "version": "6.4.9",
+ "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-6.4.9.tgz",
+ "integrity": "sha512-JEPTiaOt9f04oa6NOkc4aH+nVp5I3wEjpHbIPqfgCdD5v5bUzy7xQqwcVO2aDQgOWhI28da57HksMrzK9HlRxg==",
+ "license": "MIT"
+ },
+ "node_modules/@pm2/io/node_modules/lru-cache": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
+ "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
+ "license": "ISC",
+ "dependencies": {
+ "yallist": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@pm2/io/node_modules/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "license": "ISC",
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@pm2/io/node_modules/tslib": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.3.tgz",
+ "integrity": "sha512-4krF8scpejhaOgqzBEcGM7yDIEfi0/8+8zDRZhNZZ2kjmHJ4hv3zCbQWxoJGz1iw5U0Jl0nma13xzHXcncMavQ==",
+ "license": "Apache-2.0"
+ },
+ "node_modules/@pm2/io/node_modules/yallist": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "license": "ISC"
+ },
+ "node_modules/@pm2/js-api": {
+ "version": "0.8.0",
+ "resolved": "https://registry.npmjs.org/@pm2/js-api/-/js-api-0.8.0.tgz",
+ "integrity": "sha512-nmWzrA/BQZik3VBz+npRcNIu01kdBhWL0mxKmP1ciF/gTcujPTQqt027N9fc1pK9ERM8RipFhymw7RcmCyOEYA==",
+ "license": "Apache-2",
+ "dependencies": {
+ "async": "^2.6.3",
+ "debug": "~4.3.1",
+ "eventemitter2": "^6.3.1",
+ "extrareqp2": "^1.0.0",
+ "ws": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/@pm2/js-api/node_modules/async": {
+ "version": "2.6.4",
+ "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz",
+ "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==",
+ "license": "MIT",
+ "dependencies": {
+ "lodash": "^4.17.14"
+ }
+ },
+ "node_modules/@pm2/js-api/node_modules/debug": {
+ "version": "4.3.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz",
+ "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@pm2/js-api/node_modules/eventemitter2": {
+ "version": "6.4.9",
+ "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-6.4.9.tgz",
+ "integrity": "sha512-JEPTiaOt9f04oa6NOkc4aH+nVp5I3wEjpHbIPqfgCdD5v5bUzy7xQqwcVO2aDQgOWhI28da57HksMrzK9HlRxg==",
+ "license": "MIT"
+ },
+ "node_modules/@pm2/pm2-version-check": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/@pm2/pm2-version-check/-/pm2-version-check-1.0.4.tgz",
+ "integrity": "sha512-SXsM27SGH3yTWKc2fKR4SYNxsmnvuBQ9dd6QHtEWmiZ/VqaOYPAIlS8+vMcn27YLtAEBGvNRSh3TPNvtjZgfqA==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "^4.3.1"
+ }
+ },
+ "node_modules/@rolldown/pluginutils": {
+ "version": "1.0.0-beta.27",
+ "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz",
+ "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@rollup/rollup-android-arm-eabi": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.55.1.tgz",
+ "integrity": "sha512-9R0DM/ykwfGIlNu6+2U09ga0WXeZ9MRC2Ter8jnz8415VbuIykVuc6bhdrbORFZANDmTDvq26mJrEVTl8TdnDg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-android-arm64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.55.1.tgz",
+ "integrity": "sha512-eFZCb1YUqhTysgW3sj/55du5cG57S7UTNtdMjCW7LwVcj3dTTcowCsC8p7uBdzKsZYa8J7IDE8lhMI+HX1vQvg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-arm64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.55.1.tgz",
+ "integrity": "sha512-p3grE2PHcQm2e8PSGZdzIhCKbMCw/xi9XvMPErPhwO17vxtvCN5FEA2mSLgmKlCjHGMQTP6phuQTYWUnKewwGg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-x64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.55.1.tgz",
+ "integrity": "sha512-rDUjG25C9qoTm+e02Esi+aqTKSBYwVTaoS1wxcN47/Luqef57Vgp96xNANwt5npq9GDxsH7kXxNkJVEsWEOEaQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-arm64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.55.1.tgz",
+ "integrity": "sha512-+JiU7Jbp5cdxekIgdte0jfcu5oqw4GCKr6i3PJTlXTCU5H5Fvtkpbs4XJHRmWNXF+hKmn4v7ogI5OQPaupJgOg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-x64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.55.1.tgz",
+ "integrity": "sha512-V5xC1tOVWtLLmr3YUk2f6EJK4qksksOYiz/TCsFHu/R+woubcLWdC9nZQmwjOAbmExBIVKsm1/wKmEy4z4u4Bw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.55.1.tgz",
+ "integrity": "sha512-Rn3n+FUk2J5VWx+ywrG/HGPTD9jXNbicRtTM11e/uorplArnXZYsVifnPPqNNP5BsO3roI4n8332ukpY/zN7rQ==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-musleabihf": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.55.1.tgz",
+ "integrity": "sha512-grPNWydeKtc1aEdrJDWk4opD7nFtQbMmV7769hiAaYyUKCT1faPRm2av8CX1YJsZ4TLAZcg9gTR1KvEzoLjXkg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.55.1.tgz",
+ "integrity": "sha512-a59mwd1k6x8tXKcUxSyISiquLwB5pX+fJW9TkWU46lCqD/GRDe9uDN31jrMmVP3feI3mhAdvcCClhV8V5MhJFQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-musl": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.55.1.tgz",
+ "integrity": "sha512-puS1MEgWX5GsHSoiAsF0TYrpomdvkaXm0CofIMG5uVkP6IBV+ZO9xhC5YEN49nsgYo1DuuMquF9+7EDBVYu4uA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-loong64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.55.1.tgz",
+ "integrity": "sha512-r3Wv40in+lTsULSb6nnoudVbARdOwb2u5fpeoOAZjFLznp6tDU8kd+GTHmJoqZ9lt6/Sys33KdIHUaQihFcu7g==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-loong64-musl": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.55.1.tgz",
+ "integrity": "sha512-MR8c0+UxAlB22Fq4R+aQSPBayvYa3+9DrwG/i1TKQXFYEaoW3B5b/rkSRIypcZDdWjWnpcvxbNaAJDcSbJU3Lw==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-ppc64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.55.1.tgz",
+ "integrity": "sha512-3KhoECe1BRlSYpMTeVrD4sh2Pw2xgt4jzNSZIIPLFEsnQn9gAnZagW9+VqDqAHgm1Xc77LzJOo2LdigS5qZ+gw==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-ppc64-musl": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.55.1.tgz",
+ "integrity": "sha512-ziR1OuZx0vdYZZ30vueNZTg73alF59DicYrPViG0NEgDVN8/Jl87zkAPu4u6VjZST2llgEUjaiNl9JM6HH1Vdw==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.55.1.tgz",
+ "integrity": "sha512-uW0Y12ih2XJRERZ4jAfKamTyIHVMPQnTZcQjme2HMVDAHY4amf5u414OqNYC+x+LzRdRcnIG1YodLrrtA8xsxw==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-musl": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.55.1.tgz",
+ "integrity": "sha512-u9yZ0jUkOED1BFrqu3BwMQoixvGHGZ+JhJNkNKY/hyoEgOwlqKb62qu+7UjbPSHYjiVy8kKJHvXKv5coH4wDeg==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-s390x-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.55.1.tgz",
+ "integrity": "sha512-/0PenBCmqM4ZUd0190j7J0UsQ/1nsi735iPRakO8iPciE7BQ495Y6msPzaOmvx0/pn+eJVVlZrNrSh4WSYLxNg==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.55.1.tgz",
+ "integrity": "sha512-a8G4wiQxQG2BAvo+gU6XrReRRqj+pLS2NGXKm8io19goR+K8lw269eTrPkSdDTALwMmJp4th2Uh0D8J9bEV1vg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-musl": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.55.1.tgz",
+ "integrity": "sha512-bD+zjpFrMpP/hqkfEcnjXWHMw5BIghGisOKPj+2NaNDuVT+8Ds4mPf3XcPHuat1tz89WRL+1wbcxKY3WSbiT7w==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-openbsd-x64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.55.1.tgz",
+ "integrity": "sha512-eLXw0dOiqE4QmvikfQ6yjgkg/xDM+MdU9YJuP4ySTibXU0oAvnEWXt7UDJmD4UkYialMfOGFPJnIHSe/kdzPxg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-openharmony-arm64": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.55.1.tgz",
+ "integrity": "sha512-xzm44KgEP11te3S2HCSyYf5zIzWmx3n8HDCc7EE59+lTcswEWNpvMLfd9uJvVX8LCg9QWG67Xt75AuHn4vgsXw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-arm64-msvc": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.55.1.tgz",
+ "integrity": "sha512-yR6Bl3tMC/gBok5cz/Qi0xYnVbIxGx5Fcf/ca0eB6/6JwOY+SRUcJfI0OpeTpPls7f194as62thCt/2BjxYN8g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-ia32-msvc": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.55.1.tgz",
+ "integrity": "sha512-3fZBidchE0eY0oFZBnekYCfg+5wAB0mbpCBuofh5mZuzIU/4jIVkbESmd2dOsFNS78b53CYv3OAtwqkZZmU5nA==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-gnu": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.55.1.tgz",
+ "integrity": "sha512-xGGY5pXj69IxKb4yv/POoocPy/qmEGhimy/FoTpTSVju3FYXUQQMFCaZZXJVidsmGxRioZAwpThl/4zX41gRKg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-msvc": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.55.1.tgz",
+ "integrity": "sha512-SPEpaL6DX4rmcXtnhdrQYgzQ5W2uW3SCJch88lB2zImhJRhIIK44fkUrgIV/Q8yUNfw5oyZ5vkeQsZLhCb06lw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@socket.io/component-emitter": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz",
+ "integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==",
+ "license": "MIT"
+ },
+ "node_modules/@standard-schema/spec": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz",
+ "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@tootallnate/quickjs-emscripten": {
+ "version": "0.23.0",
+ "resolved": "https://registry.npmjs.org/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz",
+ "integrity": "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/babel__core": {
+ "version": "7.20.5",
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
+ "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.20.7",
+ "@babel/types": "^7.20.7",
+ "@types/babel__generator": "*",
+ "@types/babel__template": "*",
+ "@types/babel__traverse": "*"
+ }
+ },
+ "node_modules/@types/babel__generator": {
+ "version": "7.27.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz",
+ "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__template": {
+ "version": "7.4.4",
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
+ "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.1.0",
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__traverse": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz",
+ "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.2"
+ }
+ },
+ "node_modules/@types/chai": {
+ "version": "5.2.3",
+ "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz",
+ "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/deep-eql": "*",
+ "assertion-error": "^2.0.1"
+ }
+ },
+ "node_modules/@types/cors": {
+ "version": "2.8.19",
+ "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.19.tgz",
+ "integrity": "sha512-mFNylyeyqN93lfe/9CSxOGREz8cpzAhH+E93xJ4xWQf62V8sQ/24reV2nyzUWM6H6Xji+GGHpkbLe7pVoUEskg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/deep-eql": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz",
+ "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
+ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/node": {
+ "version": "25.0.6",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-25.0.6.tgz",
+ "integrity": "sha512-NNu0sjyNxpoiW3YuVFfNz7mxSQ+S4X2G28uqg2s+CzoqoQjLPsWSbsFFyztIAqt2vb8kfEAsJNepMGPTxFDx3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~7.16.0"
+ }
+ },
+ "node_modules/@vitejs/plugin-react": {
+ "version": "4.7.0",
+ "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz",
+ "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.28.0",
+ "@babel/plugin-transform-react-jsx-self": "^7.27.1",
+ "@babel/plugin-transform-react-jsx-source": "^7.27.1",
+ "@rolldown/pluginutils": "1.0.0-beta.27",
+ "@types/babel__core": "^7.20.5",
+ "react-refresh": "^0.17.0"
+ },
+ "engines": {
+ "node": "^14.18.0 || >=16.0.0"
+ },
+ "peerDependencies": {
+ "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0"
+ }
+ },
+ "node_modules/@vitest/coverage-v8": {
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-4.0.16.tgz",
+ "integrity": "sha512-2rNdjEIsPRzsdu6/9Eq0AYAzYdpP6Bx9cje9tL3FE5XzXRQF1fNU9pe/1yE8fCrS0HD+fBtt6gLPh6LI57tX7A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@bcoe/v8-coverage": "^1.0.2",
+ "@vitest/utils": "4.0.16",
+ "ast-v8-to-istanbul": "^0.3.8",
+ "istanbul-lib-coverage": "^3.2.2",
+ "istanbul-lib-report": "^3.0.1",
+ "istanbul-lib-source-maps": "^5.0.6",
+ "istanbul-reports": "^3.2.0",
+ "magicast": "^0.5.1",
+ "obug": "^2.1.1",
+ "std-env": "^3.10.0",
+ "tinyrainbow": "^3.0.3"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ },
+ "peerDependencies": {
+ "@vitest/browser": "4.0.16",
+ "vitest": "4.0.16"
+ },
+ "peerDependenciesMeta": {
+ "@vitest/browser": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@vitest/expect": {
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.16.tgz",
+ "integrity": "sha512-eshqULT2It7McaJkQGLkPjPjNph+uevROGuIMJdG3V+0BSR2w9u6J9Lwu+E8cK5TETlfou8GRijhafIMhXsimA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@standard-schema/spec": "^1.0.0",
+ "@types/chai": "^5.2.2",
+ "@vitest/spy": "4.0.16",
+ "@vitest/utils": "4.0.16",
+ "chai": "^6.2.1",
+ "tinyrainbow": "^3.0.3"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/mocker": {
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.16.tgz",
+ "integrity": "sha512-yb6k4AZxJTB+q9ycAvsoxGn+j/po0UaPgajllBgt1PzoMAAmJGYFdDk0uCcRcxb3BrME34I6u8gHZTQlkqSZpg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@vitest/spy": "4.0.16",
+ "estree-walker": "^3.0.3",
+ "magic-string": "^0.30.21"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ },
+ "peerDependencies": {
+ "msw": "^2.4.9",
+ "vite": "^6.0.0 || ^7.0.0-0"
+ },
+ "peerDependenciesMeta": {
+ "msw": {
+ "optional": true
+ },
+ "vite": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@vitest/pretty-format": {
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.16.tgz",
+ "integrity": "sha512-eNCYNsSty9xJKi/UdVD8Ou16alu7AYiS2fCPRs0b1OdhJiV89buAXQLpTbe+X8V9L6qrs9CqyvU7OaAopJYPsA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "tinyrainbow": "^3.0.3"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/runner": {
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.16.tgz",
+ "integrity": "sha512-VWEDm5Wv9xEo80ctjORcTQRJ539EGPB3Pb9ApvVRAY1U/WkHXmmYISqU5E79uCwcW7xYUV38gwZD+RV755fu3Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@vitest/utils": "4.0.16",
+ "pathe": "^2.0.3"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/snapshot": {
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.16.tgz",
+ "integrity": "sha512-sf6NcrYhYBsSYefxnry+DR8n3UV4xWZwWxYbCJUt2YdvtqzSPR7VfGrY0zsv090DAbjFZsi7ZaMi1KnSRyK1XA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@vitest/pretty-format": "4.0.16",
+ "magic-string": "^0.30.21",
+ "pathe": "^2.0.3"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/spy": {
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.16.tgz",
+ "integrity": "sha512-4jIOWjKP0ZUaEmJm00E0cOBLU+5WE0BpeNr3XN6TEF05ltro6NJqHWxXD0kA8/Zc8Nh23AT8WQxwNG+WeROupw==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/utils": {
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.16.tgz",
+ "integrity": "sha512-h8z9yYhV3e1LEfaQ3zdypIrnAg/9hguReGZoS7Gl0aBG5xgA410zBqECqmaF/+RkTggRsfnzc1XaAHA6bmUufA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@vitest/pretty-format": "4.0.16",
+ "tinyrainbow": "^3.0.3"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/accepts": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz",
+ "integrity": "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-types": "^3.0.0",
+ "negotiator": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/agent-base": {
+ "version": "7.1.4",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz",
+ "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/amp": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/amp/-/amp-0.3.1.tgz",
+ "integrity": "sha512-OwIuC4yZaRogHKiuU5WlMR5Xk/jAcpPtawWL05Gj8Lvm2F6mwoJt4O/bHI+DHwG79vWd+8OFYM4/BzYqyRd3qw==",
+ "license": "MIT"
+ },
+ "node_modules/amp-message": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/amp-message/-/amp-message-0.1.2.tgz",
+ "integrity": "sha512-JqutcFwoU1+jhv7ArgW38bqrE+LQdcRv4NxNw0mp0JHQyB6tXesWRjtYKlDgHRY2o3JE5UTaBGUK8kSWUdxWUg==",
+ "license": "MIT",
+ "dependencies": {
+ "amp": "0.3.1"
+ }
+ },
+ "node_modules/ansi-colors": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz",
+ "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "license": "MIT",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/any-promise": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz",
+ "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/anymatch": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
+ "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
+ "license": "ISC",
+ "dependencies": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/arg": {
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz",
+ "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+ "license": "Python-2.0"
+ },
+ "node_modules/array-flatten": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
+ "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==",
+ "license": "MIT"
+ },
+ "node_modules/asap": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz",
+ "integrity": "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/assertion-error": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz",
+ "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/ast-types": {
+ "version": "0.13.4",
+ "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.13.4.tgz",
+ "integrity": "sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==",
+ "license": "MIT",
+ "dependencies": {
+ "tslib": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/ast-v8-to-istanbul": {
+ "version": "0.3.10",
+ "resolved": "https://registry.npmjs.org/ast-v8-to-istanbul/-/ast-v8-to-istanbul-0.3.10.tgz",
+ "integrity": "sha512-p4K7vMz2ZSk3wN8l5o3y2bJAoZXT3VuJI5OLTATY/01CYWumWvwkUw0SqDBnNq6IiTO3qDa1eSQDibAV8g7XOQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/trace-mapping": "^0.3.31",
+ "estree-walker": "^3.0.3",
+ "js-tokens": "^9.0.1"
+ }
+ },
+ "node_modules/ast-v8-to-istanbul/node_modules/js-tokens": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz",
+ "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/async": {
+ "version": "3.2.6",
+ "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz",
+ "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==",
+ "license": "MIT"
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/autoprefixer": {
+ "version": "10.4.23",
+ "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.23.tgz",
+ "integrity": "sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/autoprefixer"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "browserslist": "^4.28.1",
+ "caniuse-lite": "^1.0.30001760",
+ "fraction.js": "^5.3.4",
+ "picocolors": "^1.1.1",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "bin": {
+ "autoprefixer": "bin/autoprefixer"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ },
+ "peerDependencies": {
+ "postcss": "^8.1.0"
+ }
+ },
+ "node_modules/base64id": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz",
+ "integrity": "sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog==",
+ "license": "MIT",
+ "engines": {
+ "node": "^4.5.0 || >= 5.9"
+ }
+ },
+ "node_modules/baseline-browser-mapping": {
+ "version": "2.9.14",
+ "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.14.tgz",
+ "integrity": "sha512-B0xUquLkiGLgHhpPBqvl7GWegWBUNuujQ6kXd/r1U38ElPT6Ok8KZ8e+FpUGEc2ZoRQUzq/aUnaKFc/svWUGSg==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "baseline-browser-mapping": "dist/cli.js"
+ }
+ },
+ "node_modules/basic-ftp": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.1.0.tgz",
+ "integrity": "sha512-RkaJzeJKDbaDWTIPiJwubyljaEPwpVWkm9Rt5h9Nd6h7tEXTJ3VB4qxdZBioV7JO5yLUaOKwz7vDOzlncUsegw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/binary-extensions": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz",
+ "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/blessed": {
+ "version": "0.1.81",
+ "resolved": "https://registry.npmjs.org/blessed/-/blessed-0.1.81.tgz",
+ "integrity": "sha512-LoF5gae+hlmfORcG1M5+5XZi4LBmvlXTzwJWzUlPryN/SJdSflZvROM2TwkT0GMpq7oqT48NRd4GS7BiVBc5OQ==",
+ "license": "MIT",
+ "bin": {
+ "blessed": "bin/tput.js"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/bodec": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/bodec/-/bodec-0.1.0.tgz",
+ "integrity": "sha512-Ylo+MAo5BDUq1KA3f3R/MFhh+g8cnHmo8bz3YPGhI1znrMaf77ol1sfvYJzsw3nTE+Y2GryfDxBaR+AqpAkEHQ==",
+ "license": "MIT"
+ },
+ "node_modules/body-parser": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.1.tgz",
+ "integrity": "sha512-nfDwkulwiZYQIGwxdy0RUmowMhKcFVcYXUU7m4QlKYim1rUtg83xm2yjZ40QjDuc291AJjjeSc9b++AWHSgSHw==",
+ "license": "MIT",
+ "dependencies": {
+ "bytes": "^3.1.2",
+ "content-type": "^1.0.5",
+ "debug": "^4.4.3",
+ "http-errors": "^2.0.0",
+ "iconv-lite": "^0.7.0",
+ "on-finished": "^2.4.1",
+ "qs": "^6.14.0",
+ "raw-body": "^3.0.1",
+ "type-is": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/braces": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+ "license": "MIT",
+ "dependencies": {
+ "fill-range": "^7.1.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.28.1",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz",
+ "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "baseline-browser-mapping": "^2.9.0",
+ "caniuse-lite": "^1.0.30001759",
+ "electron-to-chromium": "^1.5.263",
+ "node-releases": "^2.0.27",
+ "update-browserslist-db": "^1.2.0"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/buffer-from": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
+ "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
+ "license": "MIT"
+ },
+ "node_modules/bytes": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
+ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/call-bound": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz",
+ "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "get-intrinsic": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/camelcase-css": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
+ "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001764",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001764.tgz",
+ "integrity": "sha512-9JGuzl2M+vPL+pz70gtMF9sHdMFbY9FJaQBi186cHKH3pSzDvzoUJUPV6fqiKIMyXbud9ZLg4F3Yza1vJ1+93g==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "CC-BY-4.0"
+ },
+ "node_modules/chai": {
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz",
+ "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/chalk/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/charm": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/charm/-/charm-0.1.2.tgz",
+ "integrity": "sha512-syedaZ9cPe7r3hoQA9twWYKu5AIyCswN5+szkmPBe9ccdLrj4bYaCnLVPTLd2kgVRc7+zoX4tyPgRnFKCj5YjQ==",
+ "license": "MIT/X11"
+ },
+ "node_modules/chokidar": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz",
+ "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==",
+ "license": "MIT",
+ "dependencies": {
+ "anymatch": "~3.1.2",
+ "braces": "~3.0.2",
+ "glob-parent": "~5.1.2",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.6.0"
+ },
+ "engines": {
+ "node": ">= 8.10.0"
+ },
+ "funding": {
+ "url": "https://paulmillr.com/funding/"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/cli-tableau": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/cli-tableau/-/cli-tableau-2.0.1.tgz",
+ "integrity": "sha512-he+WTicka9cl0Fg/y+YyxcN6/bfQ/1O3QmgxRXDhABKqLzvoOSM4fMzp39uMyLBulAFuywD2N7UaoQE7WaADxQ==",
+ "dependencies": {
+ "chalk": "3.0.0"
+ },
+ "engines": {
+ "node": ">=8.10.0"
+ }
+ },
+ "node_modules/cli-tableau/node_modules/chalk": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz",
+ "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cli-tableau/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cliui": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
+ "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "string-width": "^4.2.0",
+ "strip-ansi": "^6.0.1",
+ "wrap-ansi": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "license": "MIT",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "license": "MIT"
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/commander": {
+ "version": "2.15.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz",
+ "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag==",
+ "license": "MIT"
+ },
+ "node_modules/component-emitter": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.1.tgz",
+ "integrity": "sha512-T0+barUSQRTUQASh8bx02dl+DhF54GtIDY13Y3m9oWTklKbb3Wv974meRpeZ3lp1JpLVECWWNHC4vaG2XHXouQ==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/concurrently": {
+ "version": "8.2.2",
+ "resolved": "https://registry.npmjs.org/concurrently/-/concurrently-8.2.2.tgz",
+ "integrity": "sha512-1dP4gpXFhei8IOtlXRE/T/4H88ElHgTiUzh71YUmtjTEHMSRS2Z/fgOxHSxxusGHogsRfxNq1vyAwxSC+EVyDg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "chalk": "^4.1.2",
+ "date-fns": "^2.30.0",
+ "lodash": "^4.17.21",
+ "rxjs": "^7.8.1",
+ "shell-quote": "^1.8.1",
+ "spawn-command": "0.0.2",
+ "supports-color": "^8.1.1",
+ "tree-kill": "^1.2.2",
+ "yargs": "^17.7.2"
+ },
+ "bin": {
+ "conc": "dist/bin/concurrently.js",
+ "concurrently": "dist/bin/concurrently.js"
+ },
+ "engines": {
+ "node": "^14.13.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/open-cli-tools/concurrently?sponsor=1"
+ }
+ },
+ "node_modules/content-disposition": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.1.tgz",
+ "integrity": "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/content-type": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
+ "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/convert-source-map": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/cookie": {
+ "version": "0.7.2",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz",
+ "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie-signature": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz",
+ "integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.6.0"
+ }
+ },
+ "node_modules/cookiejar": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/cookiejar/-/cookiejar-2.1.4.tgz",
+ "integrity": "sha512-LDx6oHrK+PhzLKJU9j5S7/Y3jM/mUHvD/DeI1WQmJn652iPC5Y4TBzC9l+5OMOXlyTTA+SmVUPm0HQUwpD5Jqw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/cors": {
+ "version": "2.8.5",
+ "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz",
+ "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==",
+ "license": "MIT",
+ "dependencies": {
+ "object-assign": "^4",
+ "vary": "^1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/croner": {
+ "version": "4.1.97",
+ "resolved": "https://registry.npmjs.org/croner/-/croner-4.1.97.tgz",
+ "integrity": "sha512-/f6gpQuxDaqXu+1kwQYSckUglPaOrHdbIlBAu0YuW8/Cdb45XwXYNUBXg3r/9Mo6n540Kn/smKcZWko5x99KrQ==",
+ "license": "MIT"
+ },
+ "node_modules/cssesc": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
+ "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "cssesc": "bin/cssesc"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/csstype": {
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
+ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
+ "license": "MIT"
+ },
+ "node_modules/culvert": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/culvert/-/culvert-0.1.2.tgz",
+ "integrity": "sha512-yi1x3EAWKjQTreYWeSd98431AV+IEE0qoDyOoaHJ7KJ21gv6HtBXHVLX74opVSGqcR8/AbjJBHAHpcOy2bj5Gg==",
+ "license": "MIT"
+ },
+ "node_modules/data-uri-to-buffer": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-6.0.2.tgz",
+ "integrity": "sha512-7hvf7/GW8e86rW0ptuwS3OcBGDjIi6SZva7hCyWC0yYry2cOPmLIjXAUHI6DK2HsnwJd9ifmt57i8eV2n4YNpw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/date-fns": {
+ "version": "2.30.0",
+ "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz",
+ "integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/runtime": "^7.21.0"
+ },
+ "engines": {
+ "node": ">=0.11"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/date-fns"
+ }
+ },
+ "node_modules/dayjs": {
+ "version": "1.11.19",
+ "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.19.tgz",
+ "integrity": "sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==",
+ "license": "MIT"
+ },
+ "node_modules/debug": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/degenerator": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/degenerator/-/degenerator-5.0.1.tgz",
+ "integrity": "sha512-TllpMR/t0M5sqCXfj85i4XaAzxmS5tVA16dqvdkMwGmzI+dXLXnw3J+3Vdv7VKw+ThlTMboK6i9rnZ6Nntj5CQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ast-types": "^0.13.4",
+ "escodegen": "^2.1.0",
+ "esprima": "^4.0.1"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/destroy": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
+ "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
+ }
+ },
+ "node_modules/dezalgo": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/dezalgo/-/dezalgo-1.0.4.tgz",
+ "integrity": "sha512-rXSP0bf+5n0Qonsb+SVVfNfIsimO4HEtmnIpPHY8Q1UCzKlQrDMfdobr8nJOOsRgWCyMRqeSBQzmWUMq7zvVig==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "asap": "^2.0.0",
+ "wrappy": "1"
+ }
+ },
+ "node_modules/didyoumean": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz",
+ "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==",
+ "dev": true,
+ "license": "Apache-2.0"
+ },
+ "node_modules/dlv": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz",
+ "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
+ "license": "MIT"
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.5.267",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz",
+ "integrity": "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/encodeurl": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
+ "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/engine.io": {
+ "version": "6.6.5",
+ "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.6.5.tgz",
+ "integrity": "sha512-2RZdgEbXmp5+dVbRm0P7HQUImZpICccJy7rN7Tv+SFa55pH+lxnuw6/K1ZxxBfHoYpSkHLAO92oa8O4SwFXA2A==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/cors": "^2.8.12",
+ "@types/node": ">=10.0.0",
+ "accepts": "~1.3.4",
+ "base64id": "2.0.0",
+ "cookie": "~0.7.2",
+ "cors": "~2.8.5",
+ "debug": "~4.4.1",
+ "engine.io-parser": "~5.2.1",
+ "ws": "~8.18.3"
+ },
+ "engines": {
+ "node": ">=10.2.0"
+ }
+ },
+ "node_modules/engine.io-client": {
+ "version": "6.6.4",
+ "resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-6.6.4.tgz",
+ "integrity": "sha512-+kjUJnZGwzewFDw951CDWcwj35vMNf2fcj7xQWOctq1F2i1jkDdVvdFG9kM/BEChymCH36KgjnW0NsL58JYRxw==",
+ "license": "MIT",
+ "dependencies": {
+ "@socket.io/component-emitter": "~3.1.0",
+ "debug": "~4.4.1",
+ "engine.io-parser": "~5.2.1",
+ "ws": "~8.18.3",
+ "xmlhttprequest-ssl": "~2.1.1"
+ }
+ },
+ "node_modules/engine.io-client/node_modules/ws": {
+ "version": "8.18.3",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz",
+ "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/engine.io-parser": {
+ "version": "5.2.3",
+ "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.2.3.tgz",
+ "integrity": "sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/engine.io/node_modules/accepts": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
+ "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-types": "~2.1.34",
+ "negotiator": "0.6.3"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/engine.io/node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/engine.io/node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/engine.io/node_modules/negotiator": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
+ "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/engine.io/node_modules/ws": {
+ "version": "8.18.3",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz",
+ "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/enquirer": {
+ "version": "2.3.6",
+ "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz",
+ "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-colors": "^4.1.1"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-module-lexer": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz",
+ "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/esbuild": {
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz",
+ "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.25.12",
+ "@esbuild/android-arm": "0.25.12",
+ "@esbuild/android-arm64": "0.25.12",
+ "@esbuild/android-x64": "0.25.12",
+ "@esbuild/darwin-arm64": "0.25.12",
+ "@esbuild/darwin-x64": "0.25.12",
+ "@esbuild/freebsd-arm64": "0.25.12",
+ "@esbuild/freebsd-x64": "0.25.12",
+ "@esbuild/linux-arm": "0.25.12",
+ "@esbuild/linux-arm64": "0.25.12",
+ "@esbuild/linux-ia32": "0.25.12",
+ "@esbuild/linux-loong64": "0.25.12",
+ "@esbuild/linux-mips64el": "0.25.12",
+ "@esbuild/linux-ppc64": "0.25.12",
+ "@esbuild/linux-riscv64": "0.25.12",
+ "@esbuild/linux-s390x": "0.25.12",
+ "@esbuild/linux-x64": "0.25.12",
+ "@esbuild/netbsd-arm64": "0.25.12",
+ "@esbuild/netbsd-x64": "0.25.12",
+ "@esbuild/openbsd-arm64": "0.25.12",
+ "@esbuild/openbsd-x64": "0.25.12",
+ "@esbuild/openharmony-arm64": "0.25.12",
+ "@esbuild/sunos-x64": "0.25.12",
+ "@esbuild/win32-arm64": "0.25.12",
+ "@esbuild/win32-ia32": "0.25.12",
+ "@esbuild/win32-x64": "0.25.12"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==",
+ "license": "MIT"
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/escodegen": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz",
+ "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==",
+ "license": "BSD-2-Clause",
+ "dependencies": {
+ "esprima": "^4.0.1",
+ "estraverse": "^5.2.0",
+ "esutils": "^2.0.2"
+ },
+ "bin": {
+ "escodegen": "bin/escodegen.js",
+ "esgenerate": "bin/esgenerate.js"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "optionalDependencies": {
+ "source-map": "~0.6.1"
+ }
+ },
+ "node_modules/esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "license": "BSD-2-Clause",
+ "bin": {
+ "esparse": "bin/esparse.js",
+ "esvalidate": "bin/esvalidate.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/estraverse": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
+ "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
+ "license": "BSD-2-Clause",
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/estree-walker": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz",
+ "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0"
+ }
+ },
+ "node_modules/esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
+ "license": "BSD-2-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/eventemitter2": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-5.0.1.tgz",
+ "integrity": "sha512-5EM1GHXycJBS6mauYAbVKT1cVs7POKWb2NXD4Vyt8dDqeZa7LaDK1/sjtL+Zb0lzTpSNil4596Dyu97hz37QLg==",
+ "license": "MIT"
+ },
+ "node_modules/expect-type": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz",
+ "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=12.0.0"
+ }
+ },
+ "node_modules/express": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz",
+ "integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==",
+ "license": "MIT",
+ "dependencies": {
+ "accepts": "^2.0.0",
+ "body-parser": "^2.2.1",
+ "content-disposition": "^1.0.0",
+ "content-type": "^1.0.5",
+ "cookie": "^0.7.1",
+ "cookie-signature": "^1.2.1",
+ "debug": "^4.4.0",
+ "depd": "^2.0.0",
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "etag": "^1.8.1",
+ "finalhandler": "^2.1.0",
+ "fresh": "^2.0.0",
+ "http-errors": "^2.0.0",
+ "merge-descriptors": "^2.0.0",
+ "mime-types": "^3.0.0",
+ "on-finished": "^2.4.1",
+ "once": "^1.4.0",
+ "parseurl": "^1.3.3",
+ "proxy-addr": "^2.0.7",
+ "qs": "^6.14.0",
+ "range-parser": "^1.2.1",
+ "router": "^2.2.0",
+ "send": "^1.1.0",
+ "serve-static": "^2.2.0",
+ "statuses": "^2.0.1",
+ "type-is": "^2.0.1",
+ "vary": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/extrareqp2": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/extrareqp2/-/extrareqp2-1.0.0.tgz",
+ "integrity": "sha512-Gum0g1QYb6wpPJCVypWP3bbIuaibcFiJcpuPM10YSXp/tzqi84x9PJageob+eN4xVRIOto4wjSGNLyMD54D2xA==",
+ "license": "MIT",
+ "dependencies": {
+ "follow-redirects": "^1.14.0"
+ }
+ },
+ "node_modules/fast-glob": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz",
+ "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@nodelib/fs.stat": "^2.0.2",
+ "@nodelib/fs.walk": "^1.2.3",
+ "glob-parent": "^5.1.2",
+ "merge2": "^1.3.0",
+ "micromatch": "^4.0.8"
+ },
+ "engines": {
+ "node": ">=8.6.0"
+ }
+ },
+ "node_modules/fast-json-patch": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/fast-json-patch/-/fast-json-patch-3.1.1.tgz",
+ "integrity": "sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==",
+ "license": "MIT"
+ },
+ "node_modules/fast-safe-stringify": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz",
+ "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/fastq": {
+ "version": "1.20.1",
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz",
+ "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "reusify": "^1.0.4"
+ }
+ },
+ "node_modules/fclone": {
+ "version": "1.0.11",
+ "resolved": "https://registry.npmjs.org/fclone/-/fclone-1.0.11.tgz",
+ "integrity": "sha512-GDqVQezKzRABdeqflsgMr7ktzgF9CyS+p2oe0jJqUY6izSSbhPIQJDpoU4PtGcD7VPM9xh/dVrTu6z1nwgmEGw==",
+ "license": "MIT"
+ },
+ "node_modules/fill-range": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+ "license": "MIT",
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/finalhandler": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.1.tgz",
+ "integrity": "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "^4.4.0",
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "on-finished": "^2.4.1",
+ "parseurl": "^1.3.3",
+ "statuses": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/follow-redirects": {
+ "version": "1.15.11",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz",
+ "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==",
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://github.com/sponsors/RubenVerborgh"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=4.0"
+ },
+ "peerDependenciesMeta": {
+ "debug": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
+ "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/form-data/node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/form-data/node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/formidable": {
+ "version": "3.5.4",
+ "resolved": "https://registry.npmjs.org/formidable/-/formidable-3.5.4.tgz",
+ "integrity": "sha512-YikH+7CUTOtP44ZTnUhR7Ic2UASBPOqmaRkRKxRbywPTe5VxF7RRCck4af9wutiZ/QKM5nME9Bie2fFaPz5Gug==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@paralleldrive/cuid2": "^2.2.2",
+ "dezalgo": "^1.0.4",
+ "once": "^1.4.0"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ },
+ "funding": {
+ "url": "https://ko-fi.com/tunnckoCore/commissions"
+ }
+ },
+ "node_modules/forwarded": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
+ "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fraction.js": {
+ "version": "5.3.4",
+ "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz",
+ "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/rawify"
+ }
+ },
+ "node_modules/fresh": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz",
+ "integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": "6.* || 8.* || >= 10.*"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "license": "MIT",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/get-uri": {
+ "version": "6.0.5",
+ "resolved": "https://registry.npmjs.org/get-uri/-/get-uri-6.0.5.tgz",
+ "integrity": "sha512-b1O07XYq8eRuVzBNgJLstU6FYc1tS6wnMtF1I1D9lE8LxZSOGZ7LhxN54yPP6mGw5f2CkXY2BQUL9Fx41qvcIg==",
+ "license": "MIT",
+ "dependencies": {
+ "basic-ftp": "^5.0.2",
+ "data-uri-to-buffer": "^6.0.2",
+ "debug": "^4.3.4"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/git-node-fs": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/git-node-fs/-/git-node-fs-1.0.0.tgz",
+ "integrity": "sha512-bLQypt14llVXBg0S0u8q8HmU7g9p3ysH+NvVlae5vILuUvs759665HvmR5+wb04KjHyjFcDRxdYb4kyNnluMUQ==",
+ "license": "MIT"
+ },
+ "node_modules/git-sha1": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/git-sha1/-/git-sha1-0.1.2.tgz",
+ "integrity": "sha512-2e/nZezdVlyCopOCYHeW0onkbZg7xP1Ad6pndPy1rCygeRykefUS6r7oA5cJRGEFvseiaz5a/qUHFVX1dd6Isg==",
+ "license": "MIT"
+ },
+ "node_modules/glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "license": "ISC",
+ "dependencies": {
+ "is-glob": "^4.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/goober": {
+ "version": "2.1.18",
+ "resolved": "https://registry.npmjs.org/goober/-/goober-2.1.18.tgz",
+ "integrity": "sha512-2vFqsaDVIT9Gz7N6kAL++pLpp41l3PfDuusHcjnGLfR6+huZkl6ziX+zgVC3ZxpqWhzH6pyDdGrCeDhMIvwaxw==",
+ "license": "MIT",
+ "peerDependencies": {
+ "csstype": "^3.0.10"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-symbols": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/html-escaper": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
+ "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/http-errors": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz",
+ "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==",
+ "license": "MIT",
+ "dependencies": {
+ "depd": "~2.0.0",
+ "inherits": "~2.0.4",
+ "setprototypeof": "~1.2.0",
+ "statuses": "~2.0.2",
+ "toidentifier": "~1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/http-proxy-agent": {
+ "version": "7.0.2",
+ "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz",
+ "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==",
+ "license": "MIT",
+ "dependencies": {
+ "agent-base": "^7.1.0",
+ "debug": "^4.3.4"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/https-proxy-agent": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
+ "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==",
+ "license": "MIT",
+ "dependencies": {
+ "agent-base": "^7.1.2",
+ "debug": "4"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.1.tgz",
+ "integrity": "sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw==",
+ "license": "MIT",
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+ "license": "ISC"
+ },
+ "node_modules/ini": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
+ "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==",
+ "license": "ISC"
+ },
+ "node_modules/ip-address": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.1.0.tgz",
+ "integrity": "sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 12"
+ }
+ },
+ "node_modules/ipaddr.js": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
+ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/is-binary-path": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+ "license": "MIT",
+ "dependencies": {
+ "binary-extensions": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-core-module": {
+ "version": "2.16.1",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz",
+ "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==",
+ "license": "MIT",
+ "dependencies": {
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "license": "MIT",
+ "dependencies": {
+ "is-extglob": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/is-promise": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz",
+ "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==",
+ "license": "MIT"
+ },
+ "node_modules/istanbul-lib-coverage": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz",
+ "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/istanbul-lib-report": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz",
+ "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "istanbul-lib-coverage": "^3.0.0",
+ "make-dir": "^4.0.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-lib-report/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/istanbul-lib-source-maps": {
+ "version": "5.0.6",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-5.0.6.tgz",
+ "integrity": "sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@jridgewell/trace-mapping": "^0.3.23",
+ "debug": "^4.1.1",
+ "istanbul-lib-coverage": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-reports": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz",
+ "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "html-escaper": "^2.0.0",
+ "istanbul-lib-report": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/jiti": {
+ "version": "1.21.7",
+ "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz",
+ "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "jiti": "bin/jiti.js"
+ }
+ },
+ "node_modules/js-git": {
+ "version": "0.7.8",
+ "resolved": "https://registry.npmjs.org/js-git/-/js-git-0.7.8.tgz",
+ "integrity": "sha512-+E5ZH/HeRnoc/LW0AmAyhU+mNcWBzAKE+30+IDMLSLbbK+Tdt02AdkOKq9u15rlJsDEGFqtgckc8ZM59LhhiUA==",
+ "license": "MIT",
+ "dependencies": {
+ "bodec": "^0.1.0",
+ "culvert": "^0.1.2",
+ "git-sha1": "^0.1.2",
+ "pako": "^0.2.5"
+ }
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "license": "MIT"
+ },
+ "node_modules/js-yaml": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
+ "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
+ "license": "MIT",
+ "dependencies": {
+ "argparse": "^2.0.1"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/jsesc": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz",
+ "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/json-stringify-safe": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
+ "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==",
+ "license": "ISC",
+ "optional": true
+ },
+ "node_modules/json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/lazy": {
+ "version": "1.0.11",
+ "resolved": "https://registry.npmjs.org/lazy/-/lazy-1.0.11.tgz",
+ "integrity": "sha512-Y+CjUfLmIpoUCCRl0ub4smrYtGGr5AOa2AKOaWelGHOGz33X/Y/KizefGqbkwfz44+cnq/+9habclf8vOmu2LA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.2.0"
+ }
+ },
+ "node_modules/lilconfig": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz",
+ "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antonk52"
+ }
+ },
+ "node_modules/lines-and-columns": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
+ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
+ "license": "MIT"
+ },
+ "node_modules/loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ },
+ "bin": {
+ "loose-envify": "cli.js"
+ }
+ },
+ "node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/lucide-react": {
+ "version": "0.562.0",
+ "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.562.0.tgz",
+ "integrity": "sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw==",
+ "license": "ISC",
+ "peerDependencies": {
+ "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/magic-string": {
+ "version": "0.30.21",
+ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz",
+ "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.5.5"
+ }
+ },
+ "node_modules/magicast": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.5.1.tgz",
+ "integrity": "sha512-xrHS24IxaLrvuo613F719wvOIv9xPHFWQHuvGUBmPnCA/3MQxKI3b+r7n1jAoDHmsbC5bRhTZYR77invLAxVnw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "source-map-js": "^1.2.1"
+ }
+ },
+ "node_modules/make-dir": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz",
+ "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "semver": "^7.5.3"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/make-dir/node_modules/semver": {
+ "version": "7.7.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
+ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/media-typer": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-1.1.0.tgz",
+ "integrity": "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/merge-descriptors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-2.0.0.tgz",
+ "integrity": "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/methods": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+ "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/micromatch": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "braces": "^3.0.3",
+ "picomatch": "^2.3.1"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/mime": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz",
+ "integrity": "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "mime": "cli.js"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.54.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz",
+ "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz",
+ "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "^1.54.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/mkdirp": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz",
+ "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==",
+ "license": "MIT",
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/module-details-from-path": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.4.tgz",
+ "integrity": "sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==",
+ "license": "MIT"
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/mute-stream": {
+ "version": "0.0.8",
+ "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz",
+ "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==",
+ "license": "ISC"
+ },
+ "node_modules/mz": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
+ "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "any-promise": "^1.0.0",
+ "object-assign": "^4.0.1",
+ "thenify-all": "^1.0.0"
+ }
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.11",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+ "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/needle": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/needle/-/needle-2.4.0.tgz",
+ "integrity": "sha512-4Hnwzr3mi5L97hMYeNl8wRW/Onhy4nUKR/lVemJ8gJedxxUyBLm9kkrDColJvoSfwi0jCNhD+xCdOtiGDQiRZg==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "^3.2.6",
+ "iconv-lite": "^0.4.4",
+ "sax": "^1.2.4"
+ },
+ "bin": {
+ "needle": "bin/needle"
+ },
+ "engines": {
+ "node": ">= 4.4.x"
+ }
+ },
+ "node_modules/needle/node_modules/debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.1"
+ }
+ },
+ "node_modules/needle/node_modules/iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "license": "MIT",
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/negotiator": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz",
+ "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/netmask": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/netmask/-/netmask-2.0.2.tgz",
+ "integrity": "sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.27",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
+ "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/nssocket": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/nssocket/-/nssocket-0.6.0.tgz",
+ "integrity": "sha512-a9GSOIql5IqgWJR3F/JXG4KpJTA3Z53Cj0MeMvGpglytB1nxE4PdFNC0jINe27CS7cGivoynwc054EzCcT3M3w==",
+ "license": "MIT",
+ "dependencies": {
+ "eventemitter2": "~0.4.14",
+ "lazy": "~1.0.11"
+ },
+ "engines": {
+ "node": ">= 0.10.x"
+ }
+ },
+ "node_modules/nssocket/node_modules/eventemitter2": {
+ "version": "0.4.14",
+ "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz",
+ "integrity": "sha512-K7J4xq5xAD5jHsGM5ReWXRTFa3JRGofHiMcVgQ8PRwgWxzjHpMWCIzsmyf60+mh8KLsqYPcjUMa0AC4hd6lPyQ==",
+ "license": "MIT"
+ },
+ "node_modules/object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-hash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz",
+ "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/object-inspect": {
+ "version": "1.13.4",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz",
+ "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/obug": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz",
+ "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==",
+ "dev": true,
+ "funding": [
+ "https://github.com/sponsors/sxzz",
+ "https://opencollective.com/debug"
+ ],
+ "license": "MIT"
+ },
+ "node_modules/on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "license": "MIT",
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+ "license": "ISC",
+ "dependencies": {
+ "wrappy": "1"
+ }
+ },
+ "node_modules/pac-proxy-agent": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-7.2.0.tgz",
+ "integrity": "sha512-TEB8ESquiLMc0lV8vcd5Ql/JAKAoyzHFXaStwjkzpOpC5Yv+pIzLfHvjTSdf3vpa2bMiUQrg9i6276yn8666aA==",
+ "license": "MIT",
+ "dependencies": {
+ "@tootallnate/quickjs-emscripten": "^0.23.0",
+ "agent-base": "^7.1.2",
+ "debug": "^4.3.4",
+ "get-uri": "^6.0.1",
+ "http-proxy-agent": "^7.0.0",
+ "https-proxy-agent": "^7.0.6",
+ "pac-resolver": "^7.0.1",
+ "socks-proxy-agent": "^8.0.5"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/pac-resolver": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/pac-resolver/-/pac-resolver-7.0.1.tgz",
+ "integrity": "sha512-5NPgf87AT2STgwa2ntRMr45jTKrYBGkVU36yT0ig/n/GMAa3oPqhZfIQ2kMEimReg0+t9kZViDVZ83qfVUlckg==",
+ "license": "MIT",
+ "dependencies": {
+ "degenerator": "^5.0.0",
+ "netmask": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/pako": {
+ "version": "0.2.9",
+ "resolved": "https://registry.npmjs.org/pako/-/pako-0.2.9.tgz",
+ "integrity": "sha512-NUcwaKxUxWrZLpDG+z/xZaCgQITkA/Dv4V/T6bw7VON6l1Xz/VnrBqrYjZQ12TamKHzITTfOEIYUj48y2KXImA==",
+ "license": "MIT"
+ },
+ "node_modules/parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
+ "license": "MIT"
+ },
+ "node_modules/path-to-regexp": {
+ "version": "8.3.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz",
+ "integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/pathe": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz",
+ "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/pidusage": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/pidusage/-/pidusage-3.0.2.tgz",
+ "integrity": "sha512-g0VU+y08pKw5M8EZ2rIGiEBaB8wrQMjYGFfW2QVIfyT8V+fq8YFLkvlz4bz5ljvFDJYNFCWT3PWqcRr2FKO81w==",
+ "license": "MIT",
+ "dependencies": {
+ "safe-buffer": "^5.2.1"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/pify": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz",
+ "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/pirates": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz",
+ "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/pm2": {
+ "version": "5.4.3",
+ "resolved": "https://registry.npmjs.org/pm2/-/pm2-5.4.3.tgz",
+ "integrity": "sha512-4/I1htIHzZk1Y67UgOCo4F1cJtas1kSds31N8zN0PybO230id1nigyjGuGFzUnGmUFPmrJ0On22fO1ChFlp7VQ==",
+ "license": "AGPL-3.0",
+ "dependencies": {
+ "@pm2/agent": "~2.0.0",
+ "@pm2/io": "~6.0.1",
+ "@pm2/js-api": "~0.8.0",
+ "@pm2/pm2-version-check": "latest",
+ "async": "~3.2.0",
+ "blessed": "0.1.81",
+ "chalk": "3.0.0",
+ "chokidar": "^3.5.3",
+ "cli-tableau": "^2.0.0",
+ "commander": "2.15.1",
+ "croner": "~4.1.92",
+ "dayjs": "~1.11.5",
+ "debug": "^4.3.1",
+ "enquirer": "2.3.6",
+ "eventemitter2": "5.0.1",
+ "fclone": "1.0.11",
+ "js-yaml": "~4.1.0",
+ "mkdirp": "1.0.4",
+ "needle": "2.4.0",
+ "pidusage": "~3.0",
+ "pm2-axon": "~4.0.1",
+ "pm2-axon-rpc": "~0.7.1",
+ "pm2-deploy": "~1.0.2",
+ "pm2-multimeter": "^0.1.2",
+ "promptly": "^2",
+ "semver": "^7.2",
+ "source-map-support": "0.5.21",
+ "sprintf-js": "1.1.2",
+ "vizion": "~2.2.1"
+ },
+ "bin": {
+ "pm2": "bin/pm2",
+ "pm2-dev": "bin/pm2-dev",
+ "pm2-docker": "bin/pm2-docker",
+ "pm2-runtime": "bin/pm2-runtime"
+ },
+ "engines": {
+ "node": ">=12.0.0"
+ },
+ "optionalDependencies": {
+ "pm2-sysmonit": "^1.2.8"
+ }
+ },
+ "node_modules/pm2-axon": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/pm2-axon/-/pm2-axon-4.0.1.tgz",
+ "integrity": "sha512-kES/PeSLS8orT8dR5jMlNl+Yu4Ty3nbvZRmaAtROuVm9nYYGiaoXqqKQqQYzWQzMYWUKHMQTvBlirjE5GIIxqg==",
+ "license": "MIT",
+ "dependencies": {
+ "amp": "~0.3.1",
+ "amp-message": "~0.1.1",
+ "debug": "^4.3.1",
+ "escape-string-regexp": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=5"
+ }
+ },
+ "node_modules/pm2-axon-rpc": {
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/pm2-axon-rpc/-/pm2-axon-rpc-0.7.1.tgz",
+ "integrity": "sha512-FbLvW60w+vEyvMjP/xom2UPhUN/2bVpdtLfKJeYM3gwzYhoTEEChCOICfFzxkxuoEleOlnpjie+n1nue91bDQw==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "^4.3.1"
+ },
+ "engines": {
+ "node": ">=5"
+ }
+ },
+ "node_modules/pm2-deploy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/pm2-deploy/-/pm2-deploy-1.0.2.tgz",
+ "integrity": "sha512-YJx6RXKrVrWaphEYf++EdOOx9EH18vM8RSZN/P1Y+NokTKqYAca/ejXwVLyiEpNju4HPZEk3Y2uZouwMqUlcgg==",
+ "license": "MIT",
+ "dependencies": {
+ "run-series": "^1.1.8",
+ "tv4": "^1.3.0"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/pm2-multimeter": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/pm2-multimeter/-/pm2-multimeter-0.1.2.tgz",
+ "integrity": "sha512-S+wT6XfyKfd7SJIBqRgOctGxaBzUOmVQzTAS+cg04TsEUObJVreha7lvCfX8zzGVr871XwCSnHUU7DQQ5xEsfA==",
+ "license": "MIT/X11",
+ "dependencies": {
+ "charm": "~0.1.1"
+ }
+ },
+ "node_modules/pm2-sysmonit": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/pm2-sysmonit/-/pm2-sysmonit-1.2.8.tgz",
+ "integrity": "sha512-ACOhlONEXdCTVwKieBIQLSi2tQZ8eKinhcr9JpZSUAL8Qy0ajIgRtsLxG/lwPOW3JEKqPyw/UaHmTWhUzpP4kA==",
+ "license": "Apache",
+ "optional": true,
+ "dependencies": {
+ "async": "^3.2.0",
+ "debug": "^4.3.1",
+ "pidusage": "^2.0.21",
+ "systeminformation": "^5.7",
+ "tx2": "~1.0.4"
+ }
+ },
+ "node_modules/pm2-sysmonit/node_modules/pidusage": {
+ "version": "2.0.21",
+ "resolved": "https://registry.npmjs.org/pidusage/-/pidusage-2.0.21.tgz",
+ "integrity": "sha512-cv3xAQos+pugVX+BfXpHsbyz/dLzX+lr44zNMsYiGxUw+kV5sgQCIcLd1z+0vq+KyC7dJ+/ts2PsfgWfSC3WXA==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "safe-buffer": "^5.2.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pm2/node_modules/chalk": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz",
+ "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pm2/node_modules/semver": {
+ "version": "7.7.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
+ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/pm2/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/portos-ai-toolkit": {
+ "version": "0.2.0",
+ "resolved": "git+ssh://git@github.com/atomantic/portos-ai-toolkit.git#54c33f88be85b1fa301c5ccb4ca099bfda4199e2",
+ "license": "MIT",
+ "dependencies": {
+ "uuid": "^11.0.3",
+ "zod": "^3.24.1"
+ },
+ "peerDependencies": {
+ "express": "^4.21.2 || ^5.2.1",
+ "react": "^18.3.1",
+ "react-dom": "^18.3.1",
+ "socket.io": "^4.8.3",
+ "socket.io-client": "^4.8.3"
+ },
+ "peerDependenciesMeta": {
+ "express": {
+ "optional": true
+ },
+ "react": {
+ "optional": true
+ },
+ "react-dom": {
+ "optional": true
+ },
+ "socket.io": {
+ "optional": true
+ },
+ "socket.io-client": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/portos-client": {
+ "resolved": "client",
+ "link": true
+ },
+ "node_modules/portos-server": {
+ "resolved": "server",
+ "link": true
+ },
+ "node_modules/postcss": {
+ "version": "8.5.6",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz",
+ "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "nanoid": "^3.3.11",
+ "picocolors": "^1.1.1",
+ "source-map-js": "^1.2.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/postcss-import": {
+ "version": "15.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz",
+ "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "postcss-value-parser": "^4.0.0",
+ "read-cache": "^1.0.0",
+ "resolve": "^1.1.7"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.0.0"
+ }
+ },
+ "node_modules/postcss-js": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz",
+ "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "camelcase-css": "^2.0.1"
+ },
+ "engines": {
+ "node": "^12 || ^14 || >= 16"
+ },
+ "peerDependencies": {
+ "postcss": "^8.4.21"
+ }
+ },
+ "node_modules/postcss-load-config": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz",
+ "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "lilconfig": "^3.1.1"
+ },
+ "engines": {
+ "node": ">= 18"
+ },
+ "peerDependencies": {
+ "jiti": ">=1.21.0",
+ "postcss": ">=8.0.9",
+ "tsx": "^4.8.1",
+ "yaml": "^2.4.2"
+ },
+ "peerDependenciesMeta": {
+ "jiti": {
+ "optional": true
+ },
+ "postcss": {
+ "optional": true
+ },
+ "tsx": {
+ "optional": true
+ },
+ "yaml": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/postcss-nested": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz",
+ "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "postcss-selector-parser": "^6.1.1"
+ },
+ "engines": {
+ "node": ">=12.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.14"
+ }
+ },
+ "node_modules/postcss-selector-parser": {
+ "version": "6.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz",
+ "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "cssesc": "^3.0.0",
+ "util-deprecate": "^1.0.2"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/postcss-value-parser": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
+ "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/promptly": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/promptly/-/promptly-2.2.0.tgz",
+ "integrity": "sha512-aC9j+BZsRSSzEsXBNBwDnAxujdx19HycZoKgRgzWnS8eOHg1asuf9heuLprfbe739zY3IdUQx+Egv6Jn135WHA==",
+ "license": "MIT",
+ "dependencies": {
+ "read": "^1.0.4"
+ }
+ },
+ "node_modules/proxy-addr": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
+ "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
+ "license": "MIT",
+ "dependencies": {
+ "forwarded": "0.2.0",
+ "ipaddr.js": "1.9.1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/proxy-agent": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-6.3.1.tgz",
+ "integrity": "sha512-Rb5RVBy1iyqOtNl15Cw/llpeLH8bsb37gM1FUfKQ+Wck6xHlbAhWGUFiTRHtkjqGTA5pSHz6+0hrPW/oECihPQ==",
+ "license": "MIT",
+ "dependencies": {
+ "agent-base": "^7.0.2",
+ "debug": "^4.3.4",
+ "http-proxy-agent": "^7.0.0",
+ "https-proxy-agent": "^7.0.2",
+ "lru-cache": "^7.14.1",
+ "pac-proxy-agent": "^7.0.1",
+ "proxy-from-env": "^1.1.0",
+ "socks-proxy-agent": "^8.0.2"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/proxy-agent/node_modules/lru-cache": {
+ "version": "7.18.3",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
+ "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/proxy-from-env": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
+ "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
+ "license": "MIT"
+ },
+ "node_modules/qs": {
+ "version": "6.14.1",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.1.tgz",
+ "integrity": "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "side-channel": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/queue-microtask": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/raw-body": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.2.tgz",
+ "integrity": "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==",
+ "license": "MIT",
+ "dependencies": {
+ "bytes": "~3.1.2",
+ "http-errors": "~2.0.1",
+ "iconv-lite": "~0.7.0",
+ "unpipe": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/react": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz",
+ "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-dom": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz",
+ "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0",
+ "scheduler": "^0.23.2"
+ },
+ "peerDependencies": {
+ "react": "^18.3.1"
+ }
+ },
+ "node_modules/react-hot-toast": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/react-hot-toast/-/react-hot-toast-2.6.0.tgz",
+ "integrity": "sha512-bH+2EBMZ4sdyou/DPrfgIouFpcRLCJ+HoCA32UoAYHn6T3Ur5yfcDCeSr5mwldl6pFOsiocmrXMuoCJ1vV8bWg==",
+ "license": "MIT",
+ "dependencies": {
+ "csstype": "^3.1.3",
+ "goober": "^2.1.16"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "react": ">=16",
+ "react-dom": ">=16"
+ }
+ },
+ "node_modules/react-refresh": {
+ "version": "0.17.0",
+ "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz",
+ "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-router": {
+ "version": "7.12.0",
+ "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.12.0.tgz",
+ "integrity": "sha512-kTPDYPFzDVGIIGNLS5VJykK0HfHLY5MF3b+xj0/tTyNYL1gF1qs7u67Z9jEhQk2sQ98SUaHxlG31g1JtF7IfVw==",
+ "license": "MIT",
+ "dependencies": {
+ "cookie": "^1.0.1",
+ "set-cookie-parser": "^2.6.0"
+ },
+ "engines": {
+ "node": ">=20.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=18",
+ "react-dom": ">=18"
+ },
+ "peerDependenciesMeta": {
+ "react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/react-router-dom": {
+ "version": "7.12.0",
+ "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.12.0.tgz",
+ "integrity": "sha512-pfO9fiBcpEfX4Tx+iTYKDtPbrSLLCbwJ5EqP+SPYQu1VYCXdy79GSj0wttR0U4cikVdlImZuEZ/9ZNCgoaxwBA==",
+ "license": "MIT",
+ "dependencies": {
+ "react-router": "7.12.0"
+ },
+ "engines": {
+ "node": ">=20.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=18",
+ "react-dom": ">=18"
+ }
+ },
+ "node_modules/react-router/node_modules/cookie": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz",
+ "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/read": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/read/-/read-1.0.7.tgz",
+ "integrity": "sha512-rSOKNYUmaxy0om1BNjMN4ezNT6VKK+2xF4GBhc81mkH7L60i6dp8qPYrkndNLT3QPphoII3maL9PVC9XmhHwVQ==",
+ "license": "ISC",
+ "dependencies": {
+ "mute-stream": "~0.0.4"
+ },
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/read-cache": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz",
+ "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "pify": "^2.3.0"
+ }
+ },
+ "node_modules/readdirp": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
+ "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
+ "license": "MIT",
+ "dependencies": {
+ "picomatch": "^2.2.1"
+ },
+ "engines": {
+ "node": ">=8.10.0"
+ }
+ },
+ "node_modules/require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/require-in-the-middle": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/require-in-the-middle/-/require-in-the-middle-5.2.0.tgz",
+ "integrity": "sha512-efCx3b+0Z69/LGJmm9Yvi4cqEdxnoGnxYxGxBghkkTTFeXRtTCmmhO0AnAfHz59k957uTSuy8WaHqOs8wbYUWg==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "^4.1.1",
+ "module-details-from-path": "^1.0.3",
+ "resolve": "^1.22.1"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/resolve": {
+ "version": "1.22.11",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz",
+ "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==",
+ "license": "MIT",
+ "dependencies": {
+ "is-core-module": "^2.16.1",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/reusify": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz",
+ "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "iojs": ">=1.0.0",
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/rollup": {
+ "version": "4.55.1",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.55.1.tgz",
+ "integrity": "sha512-wDv/Ht1BNHB4upNbK74s9usvl7hObDnvVzknxqY/E/O3X6rW1U1rV1aENEfJ54eFZDTNo7zv1f5N4edCluH7+A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "1.0.8"
+ },
+ "bin": {
+ "rollup": "dist/bin/rollup"
+ },
+ "engines": {
+ "node": ">=18.0.0",
+ "npm": ">=8.0.0"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-android-arm-eabi": "4.55.1",
+ "@rollup/rollup-android-arm64": "4.55.1",
+ "@rollup/rollup-darwin-arm64": "4.55.1",
+ "@rollup/rollup-darwin-x64": "4.55.1",
+ "@rollup/rollup-freebsd-arm64": "4.55.1",
+ "@rollup/rollup-freebsd-x64": "4.55.1",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.55.1",
+ "@rollup/rollup-linux-arm-musleabihf": "4.55.1",
+ "@rollup/rollup-linux-arm64-gnu": "4.55.1",
+ "@rollup/rollup-linux-arm64-musl": "4.55.1",
+ "@rollup/rollup-linux-loong64-gnu": "4.55.1",
+ "@rollup/rollup-linux-loong64-musl": "4.55.1",
+ "@rollup/rollup-linux-ppc64-gnu": "4.55.1",
+ "@rollup/rollup-linux-ppc64-musl": "4.55.1",
+ "@rollup/rollup-linux-riscv64-gnu": "4.55.1",
+ "@rollup/rollup-linux-riscv64-musl": "4.55.1",
+ "@rollup/rollup-linux-s390x-gnu": "4.55.1",
+ "@rollup/rollup-linux-x64-gnu": "4.55.1",
+ "@rollup/rollup-linux-x64-musl": "4.55.1",
+ "@rollup/rollup-openbsd-x64": "4.55.1",
+ "@rollup/rollup-openharmony-arm64": "4.55.1",
+ "@rollup/rollup-win32-arm64-msvc": "4.55.1",
+ "@rollup/rollup-win32-ia32-msvc": "4.55.1",
+ "@rollup/rollup-win32-x64-gnu": "4.55.1",
+ "@rollup/rollup-win32-x64-msvc": "4.55.1",
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/router": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/router/-/router-2.2.0.tgz",
+ "integrity": "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "^4.4.0",
+ "depd": "^2.0.0",
+ "is-promise": "^4.0.0",
+ "parseurl": "^1.3.3",
+ "path-to-regexp": "^8.0.0"
+ },
+ "engines": {
+ "node": ">= 18"
+ }
+ },
+ "node_modules/run-parallel": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "queue-microtask": "^1.2.2"
+ }
+ },
+ "node_modules/run-series": {
+ "version": "1.1.9",
+ "resolved": "https://registry.npmjs.org/run-series/-/run-series-1.1.9.tgz",
+ "integrity": "sha512-Arc4hUN896vjkqCYrUXquBFtRZdv1PfLbTYP71efP6butxyQ0kWpiNJyAgsxscmQg1cqvHY32/UCBzXedTpU2g==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/rxjs": {
+ "version": "7.8.2",
+ "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz",
+ "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "tslib": "^2.1.0"
+ }
+ },
+ "node_modules/safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "license": "MIT"
+ },
+ "node_modules/sax": {
+ "version": "1.4.4",
+ "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.4.tgz",
+ "integrity": "sha512-1n3r/tGXO6b6VXMdFT54SHzT9ytu9yr7TaELowdYpMqY/Ao7EnlQGmAQ1+RatX7Tkkdm6hONI2owqNx2aZj5Sw==",
+ "license": "BlueOak-1.0.0",
+ "engines": {
+ "node": ">=11.0.0"
+ }
+ },
+ "node_modules/scheduler": {
+ "version": "0.23.2",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz",
+ "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ }
+ },
+ "node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/send": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/send/-/send-1.2.1.tgz",
+ "integrity": "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "^4.4.3",
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "etag": "^1.8.1",
+ "fresh": "^2.0.0",
+ "http-errors": "^2.0.1",
+ "mime-types": "^3.0.2",
+ "ms": "^2.1.3",
+ "on-finished": "^2.4.1",
+ "range-parser": "^1.2.1",
"statuses": "^2.0.2"
},
"engines": {
- "node": ">= 18"
+ "node": ">= 18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/serve-static": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.1.tgz",
+ "integrity": "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==",
+ "license": "MIT",
+ "dependencies": {
+ "encodeurl": "^2.0.0",
+ "escape-html": "^1.0.3",
+ "parseurl": "^1.3.3",
+ "send": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
+ "node_modules/set-cookie-parser": {
+ "version": "2.7.2",
+ "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz",
+ "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==",
+ "license": "MIT"
+ },
+ "node_modules/setprototypeof": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
+ "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
+ "license": "ISC"
+ },
+ "node_modules/shell-quote": {
+ "version": "1.8.3",
+ "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.3.tgz",
+ "integrity": "sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/shimmer": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/shimmer/-/shimmer-1.2.1.tgz",
+ "integrity": "sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==",
+ "license": "BSD-2-Clause"
+ },
+ "node_modules/side-channel": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz",
+ "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "object-inspect": "^1.13.3",
+ "side-channel-list": "^1.0.0",
+ "side-channel-map": "^1.0.1",
+ "side-channel-weakmap": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-list": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz",
+ "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "object-inspect": "^1.13.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-map": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz",
+ "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bound": "^1.0.2",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.5",
+ "object-inspect": "^1.13.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel-weakmap": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
+ "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bound": "^1.0.2",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.5",
+ "object-inspect": "^1.13.3",
+ "side-channel-map": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/siginfo": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz",
+ "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/signal-exit": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
+ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
+ "license": "ISC"
+ },
+ "node_modules/smart-buffer": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz",
+ "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 6.0.0",
+ "npm": ">= 3.0.0"
+ }
+ },
+ "node_modules/socket.io": {
+ "version": "4.8.3",
+ "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.8.3.tgz",
+ "integrity": "sha512-2Dd78bqzzjE6KPkD5fHZmDAKRNe3J15q+YHDrIsy9WEkqttc7GY+kT9OBLSMaPbQaEd0x1BjcmtMtXkfpc+T5A==",
+ "license": "MIT",
+ "dependencies": {
+ "accepts": "~1.3.4",
+ "base64id": "~2.0.0",
+ "cors": "~2.8.5",
+ "debug": "~4.4.1",
+ "engine.io": "~6.6.0",
+ "socket.io-adapter": "~2.5.2",
+ "socket.io-parser": "~4.2.4"
+ },
+ "engines": {
+ "node": ">=10.2.0"
+ }
+ },
+ "node_modules/socket.io-adapter": {
+ "version": "2.5.6",
+ "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.5.6.tgz",
+ "integrity": "sha512-DkkO/dz7MGln0dHn5bmN3pPy+JmywNICWrJqVWiVOyvXjWQFIv9c2h24JrQLLFJ2aQVQf/Cvl1vblnd4r2apLQ==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "~4.4.1",
+ "ws": "~8.18.3"
+ }
+ },
+ "node_modules/socket.io-adapter/node_modules/ws": {
+ "version": "8.18.3",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz",
+ "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/socket.io-client": {
+ "version": "4.8.3",
+ "resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-4.8.3.tgz",
+ "integrity": "sha512-uP0bpjWrjQmUt5DTHq9RuoCBdFJF10cdX9X+a368j/Ft0wmaVgxlrjvK3kjvgCODOMMOz9lcaRzxmso0bTWZ/g==",
+ "license": "MIT",
+ "dependencies": {
+ "@socket.io/component-emitter": "~3.1.0",
+ "debug": "~4.4.1",
+ "engine.io-client": "~6.6.1",
+ "socket.io-parser": "~4.2.4"
+ },
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/socket.io-parser": {
+ "version": "4.2.5",
+ "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.5.tgz",
+ "integrity": "sha512-bPMmpy/5WWKHea5Y/jYAP6k74A+hvmRCQaJuJB6I/ML5JZq/KfNieUVo/3Mh7SAqn7TyFdIo6wqYHInG1MU1bQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@socket.io/component-emitter": "~3.1.0",
+ "debug": "~4.4.1"
+ },
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/socket.io/node_modules/accepts": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
+ "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-types": "~2.1.34",
+ "negotiator": "0.6.3"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/socket.io/node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/socket.io/node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/socket.io/node_modules/negotiator": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
+ "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/socks": {
+ "version": "2.8.7",
+ "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.7.tgz",
+ "integrity": "sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A==",
+ "license": "MIT",
+ "dependencies": {
+ "ip-address": "^10.0.1",
+ "smart-buffer": "^4.2.0"
+ },
+ "engines": {
+ "node": ">= 10.0.0",
+ "npm": ">= 3.0.0"
+ }
+ },
+ "node_modules/socks-proxy-agent": {
+ "version": "8.0.5",
+ "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz",
+ "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==",
+ "license": "MIT",
+ "dependencies": {
+ "agent-base": "^7.1.2",
+ "debug": "^4.3.4",
+ "socks": "^2.8.3"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/source-map-support": {
+ "version": "0.5.21",
+ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
+ "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
+ "license": "MIT",
+ "dependencies": {
+ "buffer-from": "^1.0.0",
+ "source-map": "^0.6.0"
+ }
+ },
+ "node_modules/spawn-command": {
+ "version": "0.0.2",
+ "resolved": "https://registry.npmjs.org/spawn-command/-/spawn-command-0.0.2.tgz",
+ "integrity": "sha512-zC8zGoGkmc8J9ndvml8Xksr1Amk9qBujgbF0JAIWO7kXr43w0h/0GJNM/Vustixu+YE8N/MTrQ7N31FvHUACxQ==",
+ "dev": true
+ },
+ "node_modules/sprintf-js": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz",
+ "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==",
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/stackback": {
+ "version": "0.0.2",
+ "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz",
+ "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/statuses": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz",
+ "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/std-env": {
+ "version": "3.10.0",
+ "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz",
+ "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/sucrase": {
+ "version": "3.35.1",
+ "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz",
+ "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.2",
+ "commander": "^4.0.0",
+ "lines-and-columns": "^1.1.6",
+ "mz": "^2.7.0",
+ "pirates": "^4.0.1",
+ "tinyglobby": "^0.2.11",
+ "ts-interface-checker": "^0.1.9"
+ },
+ "bin": {
+ "sucrase": "bin/sucrase",
+ "sucrase-node": "bin/sucrase-node"
+ },
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ }
+ },
+ "node_modules/sucrase/node_modules/commander": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
+ "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/superagent": {
+ "version": "10.3.0",
+ "resolved": "https://registry.npmjs.org/superagent/-/superagent-10.3.0.tgz",
+ "integrity": "sha512-B+4Ik7ROgVKrQsXTV0Jwp2u+PXYLSlqtDAhYnkkD+zn3yg8s/zjA2MeGayPoY/KICrbitwneDHrjSotxKL+0XQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "component-emitter": "^1.3.1",
+ "cookiejar": "^2.1.4",
+ "debug": "^4.3.7",
+ "fast-safe-stringify": "^2.1.1",
+ "form-data": "^4.0.5",
+ "formidable": "^3.5.4",
+ "methods": "^1.1.2",
+ "mime": "2.6.0",
+ "qs": "^6.14.1"
+ },
+ "engines": {
+ "node": ">=14.18.0"
+ }
+ },
+ "node_modules/supertest": {
+ "version": "7.2.2",
+ "resolved": "https://registry.npmjs.org/supertest/-/supertest-7.2.2.tgz",
+ "integrity": "sha512-oK8WG9diS3DlhdUkcFn4tkNIiIbBx9lI2ClF8K+b2/m8Eyv47LSawxUzZQSNKUrVb2KsqeTDCcjAAVPYaSLVTA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "cookie-signature": "^1.2.2",
+ "methods": "^1.1.2",
+ "superagent": "^10.3.0"
+ },
+ "engines": {
+ "node": ">=14.18.0"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "8.1.1",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
+ "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/supports-color?sponsor=1"
+ }
+ },
+ "node_modules/supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/systeminformation": {
+ "version": "5.30.2",
+ "resolved": "https://registry.npmjs.org/systeminformation/-/systeminformation-5.30.2.tgz",
+ "integrity": "sha512-Rrt5oFTWluUVuPlbtn3o9ja+nvjdF3Um4DG0KxqfYvpzcx7Q9plZBTjJiJy9mAouua4+OI7IUGBaG9Zyt9NgxA==",
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin",
+ "linux",
+ "win32",
+ "freebsd",
+ "openbsd",
+ "netbsd",
+ "sunos",
+ "android"
+ ],
+ "bin": {
+ "systeminformation": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ },
+ "funding": {
+ "type": "Buy me a coffee",
+ "url": "https://www.buymeacoffee.com/systeminfo"
+ }
+ },
+ "node_modules/tailwindcss": {
+ "version": "3.4.19",
+ "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz",
+ "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@alloc/quick-lru": "^5.2.0",
+ "arg": "^5.0.2",
+ "chokidar": "^3.6.0",
+ "didyoumean": "^1.2.2",
+ "dlv": "^1.1.3",
+ "fast-glob": "^3.3.2",
+ "glob-parent": "^6.0.2",
+ "is-glob": "^4.0.3",
+ "jiti": "^1.21.7",
+ "lilconfig": "^3.1.3",
+ "micromatch": "^4.0.8",
+ "normalize-path": "^3.0.0",
+ "object-hash": "^3.0.0",
+ "picocolors": "^1.1.1",
+ "postcss": "^8.4.47",
+ "postcss-import": "^15.1.0",
+ "postcss-js": "^4.0.1",
+ "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0",
+ "postcss-nested": "^6.2.0",
+ "postcss-selector-parser": "^6.1.2",
+ "resolve": "^1.22.8",
+ "sucrase": "^3.35.0"
+ },
+ "bin": {
+ "tailwind": "lib/cli.js",
+ "tailwindcss": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "node_modules/tailwindcss/node_modules/glob-parent": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "is-glob": "^4.0.3"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/thenify": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz",
+ "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "any-promise": "^1.0.0"
+ }
+ },
+ "node_modules/thenify-all": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz",
+ "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "thenify": ">= 3.1.0 < 4"
+ },
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/tinybench": {
+ "version": "2.9.0",
+ "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz",
+ "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/tinyexec": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz",
+ "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/tinyglobby": {
+ "version": "0.2.15",
+ "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz",
+ "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "fdir": "^6.5.0",
+ "picomatch": "^4.0.3"
+ },
+ "engines": {
+ "node": ">=12.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/SuperchupuDev"
+ }
+ },
+ "node_modules/tinyglobby/node_modules/fdir": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
+ "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12.0.0"
+ },
+ "peerDependencies": {
+ "picomatch": "^3 || ^4"
+ },
+ "peerDependenciesMeta": {
+ "picomatch": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/tinyglobby/node_modules/picomatch": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/tinyrainbow": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz",
+ "integrity": "sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "license": "MIT",
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/toidentifier": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
+ "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/tree-kill": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz",
+ "integrity": "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "tree-kill": "cli.js"
+ }
+ },
+ "node_modules/ts-interface-checker": {
+ "version": "0.1.13",
+ "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz",
+ "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==",
+ "dev": true,
+ "license": "Apache-2.0"
+ },
+ "node_modules/tslib": {
+ "version": "2.8.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
+ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
+ "license": "0BSD"
+ },
+ "node_modules/tv4": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/tv4/-/tv4-1.3.0.tgz",
+ "integrity": "sha512-afizzfpJgvPr+eDkREK4MxJ/+r8nEEHcmitwgnPUqpaP+FpwQyadnxNoSACbgc/b1LsZYtODGoPiFxQrgJgjvw==",
+ "license": [
+ {
+ "type": "Public Domain",
+ "url": "http://geraintluff.github.io/tv4/LICENSE.txt"
+ },
+ {
+ "type": "MIT",
+ "url": "http://jsonary.com/LICENSE.txt"
+ }
+ ],
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/tx2": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/tx2/-/tx2-1.0.5.tgz",
+ "integrity": "sha512-sJ24w0y03Md/bxzK4FU8J8JveYYUbSs2FViLJ2D/8bytSiyPRbuE3DyL/9UKYXTZlV3yXq0L8GLlhobTnekCVg==",
+ "license": "MIT",
+ "optional": true,
+ "dependencies": {
+ "json-stringify-safe": "^5.0.1"
+ }
+ },
+ "node_modules/type-is": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz",
+ "integrity": "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==",
+ "license": "MIT",
+ "dependencies": {
+ "content-type": "^1.0.5",
+ "media-typer": "^1.1.0",
+ "mime-types": "^3.0.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/undici-types": {
+ "version": "7.16.0",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz",
+ "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==",
+ "license": "MIT"
+ },
+ "node_modules/unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz",
+ "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "escalade": "^3.2.0",
+ "picocolors": "^1.1.1"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/utils-merge": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+ "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/uuid": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz",
+ "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==",
+ "funding": [
+ "https://github.com/sponsors/broofa",
+ "https://github.com/sponsors/ctavan"
+ ],
+ "license": "MIT",
+ "bin": {
+ "uuid": "dist/esm/bin/uuid"
+ }
+ },
+ "node_modules/vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/vite": {
+ "version": "6.4.1",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.1.tgz",
+ "integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "esbuild": "^0.25.0",
+ "fdir": "^6.4.4",
+ "picomatch": "^4.0.2",
+ "postcss": "^8.5.3",
+ "rollup": "^4.34.9",
+ "tinyglobby": "^0.2.13"
+ },
+ "bin": {
+ "vite": "bin/vite.js"
+ },
+ "engines": {
+ "node": "^18.0.0 || ^20.0.0 || >=22.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/vitejs/vite?sponsor=1"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ },
+ "peerDependencies": {
+ "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0",
+ "jiti": ">=1.21.0",
+ "less": "*",
+ "lightningcss": "^1.21.0",
+ "sass": "*",
+ "sass-embedded": "*",
+ "stylus": "*",
+ "sugarss": "*",
+ "terser": "^5.16.0",
+ "tsx": "^4.8.1",
+ "yaml": "^2.4.2"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "jiti": {
+ "optional": true
+ },
+ "less": {
+ "optional": true
+ },
+ "lightningcss": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ },
+ "sass-embedded": {
+ "optional": true
+ },
+ "stylus": {
+ "optional": true
+ },
+ "sugarss": {
+ "optional": true
+ },
+ "terser": {
+ "optional": true
+ },
+ "tsx": {
+ "optional": true
+ },
+ "yaml": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/vite/node_modules/fdir": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
+ "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12.0.0"
+ },
+ "peerDependencies": {
+ "picomatch": "^3 || ^4"
+ },
+ "peerDependenciesMeta": {
+ "picomatch": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/vite/node_modules/picomatch": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/vitest": {
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.16.tgz",
+ "integrity": "sha512-E4t7DJ9pESL6E3I8nFjPa4xGUd3PmiWDLsDztS2qXSJWfHtbQnwAWylaBvSNY48I3vr8PTqIZlyK8TE3V3CA4Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@vitest/expect": "4.0.16",
+ "@vitest/mocker": "4.0.16",
+ "@vitest/pretty-format": "4.0.16",
+ "@vitest/runner": "4.0.16",
+ "@vitest/snapshot": "4.0.16",
+ "@vitest/spy": "4.0.16",
+ "@vitest/utils": "4.0.16",
+ "es-module-lexer": "^1.7.0",
+ "expect-type": "^1.2.2",
+ "magic-string": "^0.30.21",
+ "obug": "^2.1.1",
+ "pathe": "^2.0.3",
+ "picomatch": "^4.0.3",
+ "std-env": "^3.10.0",
+ "tinybench": "^2.9.0",
+ "tinyexec": "^1.0.2",
+ "tinyglobby": "^0.2.15",
+ "tinyrainbow": "^3.0.3",
+ "vite": "^6.0.0 || ^7.0.0",
+ "why-is-node-running": "^2.3.0"
+ },
+ "bin": {
+ "vitest": "vitest.mjs"
+ },
+ "engines": {
+ "node": "^20.0.0 || ^22.0.0 || >=24.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ },
+ "peerDependencies": {
+ "@edge-runtime/vm": "*",
+ "@opentelemetry/api": "^1.9.0",
+ "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0",
+ "@vitest/browser-playwright": "4.0.16",
+ "@vitest/browser-preview": "4.0.16",
+ "@vitest/browser-webdriverio": "4.0.16",
+ "@vitest/ui": "4.0.16",
+ "happy-dom": "*",
+ "jsdom": "*"
+ },
+ "peerDependenciesMeta": {
+ "@edge-runtime/vm": {
+ "optional": true
+ },
+ "@opentelemetry/api": {
+ "optional": true
+ },
+ "@types/node": {
+ "optional": true
+ },
+ "@vitest/browser-playwright": {
+ "optional": true
+ },
+ "@vitest/browser-preview": {
+ "optional": true
+ },
+ "@vitest/browser-webdriverio": {
+ "optional": true
+ },
+ "@vitest/ui": {
+ "optional": true
+ },
+ "happy-dom": {
+ "optional": true
+ },
+ "jsdom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/vitest/node_modules/picomatch": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/vizion": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/vizion/-/vizion-2.2.1.tgz",
+ "integrity": "sha512-sfAcO2yeSU0CSPFI/DmZp3FsFE9T+8913nv1xWBOyzODv13fwkn6Vl7HqxGpkr9F608M+8SuFId3s+BlZqfXww==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "async": "^2.6.3",
+ "git-node-fs": "^1.0.0",
+ "ini": "^1.3.5",
+ "js-git": "^0.7.8"
+ },
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/vizion/node_modules/async": {
+ "version": "2.6.4",
+ "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz",
+ "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==",
+ "license": "MIT",
+ "dependencies": {
+ "lodash": "^4.17.14"
+ }
+ },
+ "node_modules/why-is-node-running": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz",
+ "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "siginfo": "^2.0.0",
+ "stackback": "0.0.2"
+ },
+ "bin": {
+ "why-is-node-running": "cli.js"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
+ "license": "ISC"
+ },
+ "node_modules/ws": {
+ "version": "7.5.10",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz",
+ "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=8.3.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": "^5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/xmlhttprequest-ssl": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-2.1.2.tgz",
+ "integrity": "sha512-TEU+nJVUUnA4CYJFLvK5X9AOeH4KvDvhIfm0vV1GaQRtchnG0hgK5p8hw/xjv8cunWYCsiPCSDzObPyhEwq3KQ==",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/y18n": {
+ "version": "5.0.8",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
+ "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/yargs": {
+ "version": "17.7.2",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
+ "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "cliui": "^8.0.1",
+ "escalade": "^3.1.1",
+ "get-caller-file": "^2.0.5",
+ "require-directory": "^2.1.1",
+ "string-width": "^4.2.3",
+ "y18n": "^5.0.5",
+ "yargs-parser": "^21.1.1"
},
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/yargs-parser": {
+ "version": "21.1.1",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
+ "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/zod": {
+ "version": "3.25.76",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz",
+ "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==",
+ "license": "MIT",
"funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/express"
+ "url": "https://github.com/sponsors/colinhacks"
}
},
- "node_modules/serve-static": {
- "version": "2.2.1",
- "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.1.tgz",
- "integrity": "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==",
+ "packages/ai-toolkit": {
+ "name": "@portos/ai-toolkit",
+ "version": "0.1.0",
+ "extraneous": true,
"license": "MIT",
"dependencies": {
- "encodeurl": "^2.0.0",
- "escape-html": "^1.0.3",
- "parseurl": "^1.3.3",
- "send": "^1.2.0"
+ "uuid": "^11.0.3",
+ "zod": "^3.24.1"
},
- "engines": {
- "node": ">= 18"
+ "devDependencies": {
+ "@vitest/coverage-v8": "^4.0.16",
+ "supertest": "^7.1.4",
+ "vitest": "^4.0.16"
},
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/express"
+ "peerDependencies": {
+ "express": "^4.21.2 || ^5.2.1",
+ "react": "^18.3.1",
+ "react-dom": "^18.3.1",
+ "socket.io": "^4.8.3"
+ },
+ "peerDependenciesMeta": {
+ "socket.io": {
+ "optional": true
+ }
}
},
- "node_modules/setprototypeof": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
- "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
- "license": "ISC"
- },
- "node_modules/shell-quote": {
- "version": "1.8.3",
- "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.3.tgz",
- "integrity": "sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 0.4"
+ "server": {
+ "name": "portos-server",
+ "version": "0.9.19",
+ "dependencies": {
+ "cors": "^2.8.5",
+ "express": "^4.21.2",
+ "pm2": "^5.4.3",
+ "portos-ai-toolkit": "github:atomantic/portos-ai-toolkit#v0.2.0",
+ "socket.io": "^4.8.3",
+ "socket.io-client": "^4.8.3",
+ "uuid": "^11.0.3",
+ "zod": "^3.24.1"
},
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "devDependencies": {
+ "@vitest/coverage-v8": "^4.0.16",
+ "supertest": "^7.1.4",
+ "vitest": "^4.0.16"
}
},
- "node_modules/side-channel": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz",
- "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
+ "server/node_modules/accepts": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
+ "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
"license": "MIT",
"dependencies": {
- "es-errors": "^1.3.0",
- "object-inspect": "^1.13.3",
- "side-channel-list": "^1.0.0",
- "side-channel-map": "^1.0.1",
- "side-channel-weakmap": "^1.0.2"
+ "mime-types": "~2.1.34",
+ "negotiator": "0.6.3"
},
"engines": {
- "node": ">= 0.4"
- },
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "node": ">= 0.6"
}
},
- "node_modules/side-channel-list": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz",
- "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
+ "server/node_modules/body-parser": {
+ "version": "1.20.4",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.4.tgz",
+ "integrity": "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==",
"license": "MIT",
"dependencies": {
- "es-errors": "^1.3.0",
- "object-inspect": "^1.13.3"
+ "bytes": "~3.1.2",
+ "content-type": "~1.0.5",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "~1.2.0",
+ "http-errors": "~2.0.1",
+ "iconv-lite": "~0.4.24",
+ "on-finished": "~2.4.1",
+ "qs": "~6.14.0",
+ "raw-body": "~2.5.3",
+ "type-is": "~1.6.18",
+ "unpipe": "~1.0.0"
},
"engines": {
- "node": ">= 0.4"
- },
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
}
},
- "node_modules/side-channel-map": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz",
- "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
+ "server/node_modules/content-disposition": {
+ "version": "0.5.4",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
+ "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
"license": "MIT",
"dependencies": {
- "call-bound": "^1.0.2",
- "es-errors": "^1.3.0",
- "get-intrinsic": "^1.2.5",
- "object-inspect": "^1.13.3"
+ "safe-buffer": "5.2.1"
},
"engines": {
- "node": ">= 0.4"
- },
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "node": ">= 0.6"
}
},
- "node_modules/side-channel-weakmap": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
- "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
+ "server/node_modules/cookie-signature": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.7.tgz",
+ "integrity": "sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA==",
+ "license": "MIT"
+ },
+ "server/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"license": "MIT",
"dependencies": {
- "call-bound": "^1.0.2",
- "es-errors": "^1.3.0",
- "get-intrinsic": "^1.2.5",
- "object-inspect": "^1.13.3",
- "side-channel-map": "^1.0.1"
+ "ms": "2.0.0"
+ }
+ },
+ "server/node_modules/debug/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "license": "MIT"
+ },
+ "server/node_modules/express": {
+ "version": "4.22.1",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.22.1.tgz",
+ "integrity": "sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==",
+ "license": "MIT",
+ "dependencies": {
+ "accepts": "~1.3.8",
+ "array-flatten": "1.1.1",
+ "body-parser": "~1.20.3",
+ "content-disposition": "~0.5.4",
+ "content-type": "~1.0.4",
+ "cookie": "~0.7.1",
+ "cookie-signature": "~1.0.6",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "finalhandler": "~1.3.1",
+ "fresh": "~0.5.2",
+ "http-errors": "~2.0.0",
+ "merge-descriptors": "1.0.3",
+ "methods": "~1.1.2",
+ "on-finished": "~2.4.1",
+ "parseurl": "~1.3.3",
+ "path-to-regexp": "~0.1.12",
+ "proxy-addr": "~2.0.7",
+ "qs": "~6.14.0",
+ "range-parser": "~1.2.1",
+ "safe-buffer": "5.2.1",
+ "send": "~0.19.0",
+ "serve-static": "~1.16.2",
+ "setprototypeof": "1.2.0",
+ "statuses": "~2.0.1",
+ "type-is": "~1.6.18",
+ "utils-merge": "1.0.1",
+ "vary": "~1.1.2"
},
"engines": {
- "node": ">= 0.4"
+ "node": ">= 0.10.0"
},
"funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
}
},
- "node_modules/spawn-command": {
- "version": "0.0.2",
- "resolved": "https://registry.npmjs.org/spawn-command/-/spawn-command-0.0.2.tgz",
- "integrity": "sha512-zC8zGoGkmc8J9ndvml8Xksr1Amk9qBujgbF0JAIWO7kXr43w0h/0GJNM/Vustixu+YE8N/MTrQ7N31FvHUACxQ==",
- "dev": true
- },
- "node_modules/statuses": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz",
- "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==",
+ "server/node_modules/finalhandler": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.2.tgz",
+ "integrity": "sha512-aA4RyPcd3badbdABGDuTXCMTtOneUCAYH/gxoYRTZlIJdF0YPWuGqiAsIrhNnnqdXGswYk6dGujem4w80UJFhg==",
"license": "MIT",
+ "dependencies": {
+ "debug": "2.6.9",
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "on-finished": "~2.4.1",
+ "parseurl": "~1.3.3",
+ "statuses": "~2.0.2",
+ "unpipe": "~1.0.0"
+ },
"engines": {
"node": ">= 0.8"
}
},
- "node_modules/string-width": {
- "version": "4.2.3",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
- "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
- "dev": true,
+ "server/node_modules/fresh": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+ "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
"license": "MIT",
- "dependencies": {
- "emoji-regex": "^8.0.0",
- "is-fullwidth-code-point": "^3.0.0",
- "strip-ansi": "^6.0.1"
- },
"engines": {
- "node": ">=8"
+ "node": ">= 0.6"
}
},
- "node_modules/strip-ansi": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dev": true,
+ "server/node_modules/iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"license": "MIT",
"dependencies": {
- "ansi-regex": "^5.0.1"
+ "safer-buffer": ">= 2.1.2 < 3"
},
"engines": {
- "node": ">=8"
+ "node": ">=0.10.0"
}
},
- "node_modules/supports-color": {
- "version": "8.1.1",
- "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
- "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
- "dev": true,
+ "server/node_modules/media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
"license": "MIT",
- "dependencies": {
- "has-flag": "^4.0.0"
- },
"engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/chalk/supports-color?sponsor=1"
+ "node": ">= 0.6"
}
},
- "node_modules/toidentifier": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
- "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
+ "server/node_modules/merge-descriptors": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
+ "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==",
"license": "MIT",
- "engines": {
- "node": ">=0.6"
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/tree-kill": {
- "version": "1.2.2",
- "resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz",
- "integrity": "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==",
- "dev": true,
+ "server/node_modules/mime": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
+ "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
"license": "MIT",
"bin": {
- "tree-kill": "cli.js"
+ "mime": "cli.js"
+ },
+ "engines": {
+ "node": ">=4"
}
},
- "node_modules/tslib": {
- "version": "2.8.1",
- "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
- "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
- "dev": true,
- "license": "0BSD"
+ "server/node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
},
- "node_modules/type-is": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz",
- "integrity": "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==",
+ "server/node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"license": "MIT",
"dependencies": {
- "content-type": "^1.0.5",
- "media-typer": "^1.1.0",
- "mime-types": "^3.0.0"
+ "mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
- "node_modules/unpipe": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
- "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
+ "server/node_modules/negotiator": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
+ "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
"license": "MIT",
"engines": {
- "node": ">= 0.8"
+ "node": ">= 0.6"
}
},
- "node_modules/vary": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
- "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
+ "server/node_modules/path-to-regexp": {
+ "version": "0.1.12",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
+ "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
+ "license": "MIT"
+ },
+ "server/node_modules/raw-body": {
+ "version": "2.5.3",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.3.tgz",
+ "integrity": "sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==",
"license": "MIT",
+ "dependencies": {
+ "bytes": "~3.1.2",
+ "http-errors": "~2.0.1",
+ "iconv-lite": "~0.4.24",
+ "unpipe": "~1.0.0"
+ },
"engines": {
"node": ">= 0.8"
}
},
- "node_modules/wrap-ansi": {
- "version": "7.0.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
- "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
- "dev": true,
+ "server/node_modules/send": {
+ "version": "0.19.2",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.19.2.tgz",
+ "integrity": "sha512-VMbMxbDeehAxpOtWJXlcUS5E8iXh6QmN+BkRX1GARS3wRaXEEgzCcB10gTQazO42tpNIya8xIyNx8fll1OFPrg==",
"license": "MIT",
"dependencies": {
- "ansi-styles": "^4.0.0",
- "string-width": "^4.1.0",
- "strip-ansi": "^6.0.0"
- },
- "engines": {
- "node": ">=10"
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "~0.5.2",
+ "http-errors": "~2.0.1",
+ "mime": "1.6.0",
+ "ms": "2.1.3",
+ "on-finished": "~2.4.1",
+ "range-parser": "~1.2.1",
+ "statuses": "~2.0.2"
},
- "funding": {
- "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
- }
- },
- "node_modules/wrappy": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
- "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
- "license": "ISC"
- },
- "node_modules/y18n": {
- "version": "5.0.8",
- "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
- "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
- "dev": true,
- "license": "ISC",
"engines": {
- "node": ">=10"
+ "node": ">= 0.8.0"
}
},
- "node_modules/yargs": {
- "version": "17.7.2",
- "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
- "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
- "dev": true,
+ "server/node_modules/serve-static": {
+ "version": "1.16.3",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.3.tgz",
+ "integrity": "sha512-x0RTqQel6g5SY7Lg6ZreMmsOzncHFU7nhnRWkKgWuMTu5NN0DR5oruckMqRvacAN9d5w6ARnRBXl9xhDCgfMeA==",
"license": "MIT",
"dependencies": {
- "cliui": "^8.0.1",
- "escalade": "^3.1.1",
- "get-caller-file": "^2.0.5",
- "require-directory": "^2.1.1",
- "string-width": "^4.2.3",
- "y18n": "^5.0.5",
- "yargs-parser": "^21.1.1"
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "parseurl": "~1.3.3",
+ "send": "~0.19.1"
},
"engines": {
- "node": ">=12"
+ "node": ">= 0.8.0"
}
},
- "node_modules/yargs-parser": {
- "version": "21.1.1",
- "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
- "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
- "dev": true,
- "license": "ISC",
+ "server/node_modules/type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "license": "MIT",
+ "dependencies": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ },
"engines": {
- "node": ">=12"
+ "node": ">= 0.6"
}
}
}
diff --git a/package.json b/package.json
index 9f35f6c..03966d4 100644
--- a/package.json
+++ b/package.json
@@ -1,9 +1,16 @@
{
"name": "portos",
- "version": "0.8.10",
+ "version": "0.9.19",
"private": true,
"description": "Local dev machine App OS portal",
+ "author": "Adam Eivy (@antic|@atomantic)",
+ "license": "MIT",
"type": "module",
+ "workspaces": [
+ "packages/*",
+ "server",
+ "client"
+ ],
"scripts": {
"dev": "concurrently \"npm run dev:server\" \"npm run dev:client\"",
"dev:server": "cd server && npm run dev",
@@ -17,9 +24,15 @@
"test": "cd server && npm test"
},
"devDependencies": {
- "concurrently": "^8.2.2"
+ "@vitejs/plugin-react": "^4.3.4",
+ "autoprefixer": "^10.4.20",
+ "concurrently": "^8.2.2",
+ "postcss": "^8.4.49",
+ "tailwindcss": "^3.4.17",
+ "vite": "^6.0.6"
},
"dependencies": {
- "express": "^5.2.1"
+ "express": "^5.2.1",
+ "portos-ai-toolkit": "github:atomantic/portos-ai-toolkit#v0.2.0"
}
}
diff --git a/server/cos-runner/index.js b/server/cos-runner/index.js
index 65d65d7..3430412 100644
--- a/server/cos-runner/index.js
+++ b/server/cos-runner/index.js
@@ -10,7 +10,7 @@
import express from 'express';
import { spawn } from 'child_process';
-import { join, dirname } from 'path';
+import { join, dirname, basename } from 'path';
import { fileURLToPath } from 'url';
import { writeFile, mkdir, readFile } from 'fs/promises';
import { existsSync } from 'fs';
@@ -27,6 +27,32 @@ const PORT = process.env.PORT || 5558;
const HOST = process.env.HOST || '127.0.0.1';
const RUNS_DIR = join(ROOT_DIR, 'data/runs');
+// Allowlist of permitted CLI commands to prevent arbitrary code execution.
+// Only commands in this list can be spawned by the runner.
+const ALLOWED_COMMANDS = new Set([
+ 'claude',
+ 'aider',
+ 'codex',
+ 'copilot'
+]);
+
+/**
+ * Validate that a command is in the allowlist.
+ * Extracts the base command name from the full path using path.basename for cross-platform support.
+ * Handles Windows .exe extensions by stripping them before checking.
+ */
+function isAllowedCommand(command) {
+ if (!command || typeof command !== 'string') return false;
+ // Extract base command name from full path (e.g., /usr/bin/claude -> claude)
+ // Uses path.basename for correct handling on both Unix and Windows
+ let baseName = basename(command);
+ // Normalize for Windows: strip trailing .exe (case-insensitive)
+ if (baseName.toLowerCase().endsWith('.exe')) {
+ baseName = baseName.slice(0, -4);
+ }
+ return ALLOWED_COMMANDS.has(baseName);
+}
+
// Active agent processes (in memory)
const activeAgents = new Map();
@@ -205,6 +231,10 @@ app.post('/spawn', async (req, res) => {
workspacePath,
model,
envVars = {},
+ // New: CLI-agnostic parameters
+ cliCommand,
+ cliArgs,
+ // Legacy: Claude-specific (deprecated)
claudePath = '/Users/antic/.nvm/versions/node/v25.2.1/bin/claude'
} = req.body;
@@ -212,23 +242,47 @@ app.post('/spawn', async (req, res) => {
return res.status(400).json({ error: 'Missing required fields: agentId, taskId, prompt' });
}
- // Build spawn arguments
- const spawnArgs = [
- '--dangerously-skip-permissions',
- '--print'
- ];
-
- if (model) {
- spawnArgs.push('--model', model);
+ // Use new CLI params if provided, otherwise fallback to legacy Claude defaults
+ let command, spawnArgs;
+ if (cliCommand) {
+ // Validate command against allowlist to prevent arbitrary code execution
+ if (!isAllowedCommand(cliCommand)) {
+ return res.status(400).json({
+ error: `Command not allowed: ${cliCommand}. Permitted commands: ${[...ALLOWED_COMMANDS].join(', ')}`
+ });
+ }
+ command = cliCommand;
+ // Default to empty args if cliArgs not provided
+ const args = cliArgs ?? [];
+ // Normalize cliArgs to an array
+ if (Array.isArray(args)) {
+ spawnArgs = args;
+ } else if (typeof args === 'string') {
+ spawnArgs = [args];
+ } else {
+ return res.status(400).json({
+ error: 'Invalid cliArgs: expected an array or string'
+ });
+ }
+ } else {
+ // Legacy: Claude-specific args
+ command = claudePath;
+ spawnArgs = [
+ '--dangerously-skip-permissions',
+ '--print'
+ ];
+ if (model) {
+ spawnArgs.push('--model', model);
+ }
}
- console.log(`๐ค Spawning agent ${agentId} for task ${taskId}`);
+ console.log(`๐ค Spawning agent ${agentId} for task ${taskId} (CLI: ${command})`);
// Ensure workspacePath is valid
const cwd = workspacePath && typeof workspacePath === 'string' ? workspacePath : ROOT_DIR;
- // Spawn the Claude CLI process
- const claudeProcess = spawn(claudePath, spawnArgs, {
+ // Spawn the CLI process
+ const claudeProcess = spawn(command, spawnArgs, {
cwd,
shell: false,
stdio: ['pipe', 'pipe', 'pipe'],
diff --git a/server/index.js b/server/index.js
index 3459792..c24b62e 100644
--- a/server/index.js
+++ b/server/index.js
@@ -4,7 +4,7 @@ import cors from 'cors';
import { createServer } from 'http';
import { Server } from 'socket.io';
import { fileURLToPath } from 'url';
-import { dirname } from 'path';
+import { dirname, join } from 'path';
import healthRoutes from './routes/health.js';
import appsRoutes from './routes/apps.js';
@@ -12,11 +12,8 @@ import portsRoutes from './routes/ports.js';
import logsRoutes from './routes/logs.js';
import detectRoutes from './routes/detect.js';
import scaffoldRoutes from './routes/scaffold.js';
-import providersRoutes from './routes/providers.js';
-import runsRoutes from './routes/runs.js';
import historyRoutes from './routes/history.js';
import commandsRoutes from './routes/commands.js';
-import promptsRoutes from './routes/prompts.js';
import gitRoutes from './routes/git.js';
import usageRoutes from './routes/usage.js';
import screenshotsRoutes from './routes/screenshots.js';
@@ -26,12 +23,25 @@ import scriptsRoutes from './routes/scripts.js';
import memoryRoutes from './routes/memory.js';
import notificationsRoutes from './routes/notifications.js';
import standardizeRoutes from './routes/standardize.js';
+import brainRoutes from './routes/brain.js';
+import mediaRoutes from './routes/media.js';
+import digitalTwinRoutes from './routes/digital-twin.js';
+import lmstudioRoutes from './routes/lmstudio.js';
import { initSocket } from './services/socket.js';
import { initScriptRunner } from './services/scriptRunner.js';
-import { errorMiddleware, setupProcessErrorHandlers } from './lib/errorHandler.js';
+import { errorMiddleware, setupProcessErrorHandlers, asyncHandler } from './lib/errorHandler.js';
import { initAutoFixer } from './services/autoFixer.js';
import { initTaskLearning } from './services/taskLearning.js';
+import { recordSession, recordMessages } from './services/usage.js';
+import { errorEvents } from './lib/errorHandler.js';
import './services/subAgentSpawner.js'; // Initialize CoS agent spawner
+import { createAIToolkit } from 'portos-ai-toolkit/server';
+import { createPortOSProviderRoutes } from './routes/providers.js';
+import { createPortOSRunsRoutes } from './routes/runs.js';
+import { createPortOSPromptsRoutes } from './routes/prompts.js';
+import { setAIToolkit as setProvidersToolkit } from './services/providers.js';
+import { setAIToolkit as setRunnerToolkit } from './services/runner.js';
+import { setAIToolkit as setPromptsToolkit } from './services/promptService.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
@@ -53,6 +63,67 @@ const io = new Server(httpServer, {
// Initialize socket handlers
initSocket(io);
+// Build absolute paths from __dirname to ensure consistency regardless of cwd
+const DATA_DIR = join(__dirname, '..', 'data');
+const DATA_SAMPLE_DIR = join(__dirname, '..', 'data.sample');
+
+// Initialize AI Toolkit with PortOS configuration and hooks
+const aiToolkit = createAIToolkit({
+ dataDir: DATA_DIR,
+ providersFile: 'providers.json',
+ runsDir: 'runs',
+ promptsDir: 'prompts',
+ screenshotsDir: join(DATA_DIR, 'screenshots'),
+ sampleProvidersFile: join(DATA_SAMPLE_DIR, 'providers.json'),
+ io,
+ asyncHandler,
+ hooks: {
+ onRunCreated: (metadata) => {
+ recordSession(metadata.providerId, metadata.providerName, metadata.model).catch(err => {
+ console.error(`โ Failed to record usage session: ${err.message}`);
+ });
+ },
+ onRunCompleted: (metadata, output) => {
+ const estimatedTokens = Math.ceil(output.length / 4);
+ recordMessages(metadata.providerId, metadata.model, 1, estimatedTokens).catch(err => {
+ console.error(`โ Failed to record usage: ${err.message}`);
+ });
+ },
+ onRunFailed: (metadata, error, output) => {
+ const errorMessage = error?.message ?? String(error);
+ errorEvents.emit('error', {
+ code: 'AI_PROVIDER_EXECUTION_FAILED',
+ message: `AI provider ${metadata.providerName} execution failed: ${errorMessage}`,
+ severity: 'error',
+ canAutoFix: true,
+ timestamp: Date.now(),
+ context: {
+ runId: metadata.id,
+ provider: metadata.providerName,
+ providerId: metadata.providerId,
+ model: metadata.model,
+ exitCode: metadata.exitCode,
+ duration: metadata.duration,
+ workspacePath: metadata.workspacePath,
+ workspaceName: metadata.workspaceName,
+ errorDetails: errorMessage,
+ // Note: promptPreview and outputTail intentionally omitted to avoid leaking sensitive data
+ }
+ });
+ }
+ }
+});
+
+// Initialize compatibility shims for services that import from old service files
+setProvidersToolkit(aiToolkit);
+setRunnerToolkit(aiToolkit);
+setPromptsToolkit(aiToolkit);
+
+// Initialize prompts service to load stage configurations
+aiToolkit.services.prompts.init().catch(err => {
+ console.error(`โ Failed to initialize prompts: ${err.message}`);
+});
+
// Initialize auto-fixer for error recovery
initAutoFixer();
@@ -78,11 +149,14 @@ app.use('/api/logs', logsRoutes);
app.use('/api/detect', detectRoutes);
app.use('/api/scaffold', scaffoldRoutes);
app.use('/api', scaffoldRoutes); // Also mount at /api for /api/templates
-app.use('/api/providers', providersRoutes);
-app.use('/api/runs', runsRoutes);
+
+// AI Toolkit routes with PortOS extensions
+app.use('/api/providers', createPortOSProviderRoutes(aiToolkit));
+app.use('/api/runs', createPortOSRunsRoutes(aiToolkit));
+app.use('/api/prompts', createPortOSPromptsRoutes(aiToolkit));
+
app.use('/api/history', historyRoutes);
app.use('/api/commands', commandsRoutes);
-app.use('/api/prompts', promptsRoutes);
app.use('/api/git', gitRoutes);
app.use('/api/usage', usageRoutes);
app.use('/api/screenshots', screenshotsRoutes);
@@ -92,6 +166,10 @@ app.use('/api/cos', cosRoutes);
app.use('/api/memory', memoryRoutes);
app.use('/api/notifications', notificationsRoutes);
app.use('/api/standardize', standardizeRoutes);
+app.use('/api/brain', brainRoutes);
+app.use('/api/media', mediaRoutes);
+app.use('/api/digital-twin', digitalTwinRoutes);
+app.use('/api/lmstudio', lmstudioRoutes);
// Initialize script runner
initScriptRunner().catch(err => console.error(`โ Script runner init failed: ${err.message}`));
diff --git a/server/lib/bm25.js b/server/lib/bm25.js
new file mode 100644
index 0000000..283e470
--- /dev/null
+++ b/server/lib/bm25.js
@@ -0,0 +1,354 @@
+/**
+ * BM25 (Best Matching 25) Algorithm Implementation
+ *
+ * A ranking function used for text retrieval that considers:
+ * - Term frequency (TF): How often a term appears in a document
+ * - Inverse document frequency (IDF): How rare/important a term is across all documents
+ * - Document length normalization: Penalizes longer documents fairly
+ *
+ * @see https://en.wikipedia.org/wiki/Okapi_BM25
+ */
+
+// Default BM25 parameters
+const DEFAULT_K1 = 1.5 // Term frequency saturation parameter (1.2-2.0 typical)
+const DEFAULT_B = 0.75 // Document length normalization (0 = no normalization, 1 = full)
+
+/**
+ * Tokenize text into terms for indexing
+ * @param {string} text - Text to tokenize
+ * @returns {string[]} - Array of lowercase terms
+ */
+function tokenize(text) {
+ if (!text || typeof text !== 'string') return []
+
+ return text
+ .toLowerCase()
+ .replace(/[^\w\s]/g, ' ') // Replace punctuation with spaces
+ .split(/\s+/) // Split on whitespace
+ .filter(term => term.length > 1) // Remove single characters
+ .filter(term => !STOP_WORDS.has(term)) // Remove stop words
+}
+
+/**
+ * Common English stop words to exclude from indexing
+ */
+const STOP_WORDS = new Set([
+ 'a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'for', 'from',
+ 'has', 'he', 'in', 'is', 'it', 'its', 'of', 'on', 'or', 'that',
+ 'the', 'to', 'was', 'were', 'will', 'with', 'this', 'but', 'they',
+ 'have', 'had', 'what', 'when', 'where', 'who', 'which', 'why', 'how',
+ 'all', 'each', 'every', 'both', 'few', 'more', 'most', 'other',
+ 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so',
+ 'than', 'too', 'very', 'just', 'can', 'should', 'now', 'into',
+ 'would', 'could', 'also', 'been', 'being', 'do', 'does', 'did'
+])
+
+/**
+ * Calculate IDF (Inverse Document Frequency) for a term
+ * Uses the BM25 IDF formula: log((N - n + 0.5) / (n + 0.5) + 1)
+ *
+ * @param {number} N - Total number of documents
+ * @param {number} n - Number of documents containing the term
+ * @returns {number} - IDF value
+ */
+function calculateIDF(N, n) {
+ if (n === 0 || N === 0) return 0
+ return Math.log((N - n + 0.5) / (n + 0.5) + 1)
+}
+
+/**
+ * Build an inverted index from documents
+ *
+ * @param {Array<{id: string, text: string}>} documents - Documents to index
+ * @returns {Object} - Inverted index structure
+ */
+function buildInvertedIndex(documents) {
+ const index = {
+ terms: {}, // term -> { docFreq, postings: { docId -> termFreq } }
+ docLengths: {}, // docId -> number of terms
+ avgDocLength: 0, // Average document length
+ totalDocs: 0, // Total number of documents
+ docIds: new Set() // Set of all document IDs
+ }
+
+ let totalTerms = 0
+
+ for (const doc of documents) {
+ if (!doc.id || !doc.text) continue
+
+ const terms = tokenize(doc.text)
+ const docId = doc.id
+
+ index.docIds.add(docId)
+ index.docLengths[docId] = terms.length
+ totalTerms += terms.length
+
+ // Count term frequencies in this document
+ const termFreqs = {}
+ for (const term of terms) {
+ termFreqs[term] = (termFreqs[term] || 0) + 1
+ }
+
+ // Update inverted index
+ for (const [term, freq] of Object.entries(termFreqs)) {
+ if (!index.terms[term]) {
+ index.terms[term] = { docFreq: 0, postings: {} }
+ }
+
+ if (!index.terms[term].postings[docId]) {
+ index.terms[term].docFreq++
+ }
+ index.terms[term].postings[docId] = freq
+ }
+ }
+
+ index.totalDocs = index.docIds.size
+ index.avgDocLength = index.totalDocs > 0 ? totalTerms / index.totalDocs : 0
+
+ return index
+}
+
+/**
+ * Add a single document to an existing index
+ *
+ * @param {Object} index - Existing inverted index
+ * @param {string} docId - Document ID
+ * @param {string} text - Document text
+ * @returns {Object} - Updated index
+ */
+function addDocument(index, docId, text) {
+ if (!docId || !text) return index
+
+ // Remove existing if updating
+ if (index.docIds.has(docId)) {
+ removeDocument(index, docId)
+ }
+
+ const terms = tokenize(text)
+
+ index.docIds.add(docId)
+ index.docLengths[docId] = terms.length
+
+ // Update average document length
+ const oldTotal = index.avgDocLength * index.totalDocs
+ index.totalDocs = index.docIds.size
+ index.avgDocLength = (oldTotal + terms.length) / index.totalDocs
+
+ // Count term frequencies
+ const termFreqs = {}
+ for (const term of terms) {
+ termFreqs[term] = (termFreqs[term] || 0) + 1
+ }
+
+ // Update inverted index
+ for (const [term, freq] of Object.entries(termFreqs)) {
+ if (!index.terms[term]) {
+ index.terms[term] = { docFreq: 0, postings: {} }
+ }
+
+ if (!index.terms[term].postings[docId]) {
+ index.terms[term].docFreq++
+ }
+ index.terms[term].postings[docId] = freq
+ }
+
+ return index
+}
+
+/**
+ * Remove a document from the index
+ *
+ * @param {Object} index - Existing inverted index
+ * @param {string} docId - Document ID to remove
+ * @returns {Object} - Updated index
+ */
+function removeDocument(index, docId) {
+ if (!index.docIds.has(docId)) return index
+
+ const docLength = index.docLengths[docId] || 0
+
+ // Update average document length
+ const oldTotal = index.avgDocLength * index.totalDocs
+ index.docIds.delete(docId)
+ index.totalDocs = index.docIds.size
+
+ if (index.totalDocs > 0) {
+ index.avgDocLength = (oldTotal - docLength) / index.totalDocs
+ } else {
+ index.avgDocLength = 0
+ }
+
+ delete index.docLengths[docId]
+
+ // Remove from inverted index
+ for (const term of Object.keys(index.terms)) {
+ if (index.terms[term].postings[docId]) {
+ delete index.terms[term].postings[docId]
+ index.terms[term].docFreq--
+
+ // Clean up empty terms
+ if (index.terms[term].docFreq === 0) {
+ delete index.terms[term]
+ }
+ }
+ }
+
+ return index
+}
+
+/**
+ * Calculate BM25 score for a query against a document
+ *
+ * @param {string} query - Search query
+ * @param {string} docId - Document ID to score
+ * @param {Object} index - Inverted index
+ * @param {Object} options - BM25 parameters
+ * @returns {number} - BM25 score
+ */
+function score(query, docId, index, options = {}) {
+ const k1 = options.k1 ?? DEFAULT_K1
+ const b = options.b ?? DEFAULT_B
+
+ if (!index.docIds.has(docId)) return 0
+
+ const queryTerms = tokenize(query)
+ const docLength = index.docLengths[docId] || 0
+ const avgDocLength = index.avgDocLength || 1
+ const N = index.totalDocs
+
+ let totalScore = 0
+
+ for (const term of queryTerms) {
+ const termData = index.terms[term]
+ if (!termData) continue
+
+ const tf = termData.postings[docId] || 0
+ if (tf === 0) continue
+
+ const idf = calculateIDF(N, termData.docFreq)
+
+ // BM25 term score formula
+ const numerator = tf * (k1 + 1)
+ const denominator = tf + k1 * (1 - b + b * (docLength / avgDocLength))
+
+ totalScore += idf * (numerator / denominator)
+ }
+
+ return totalScore
+}
+
+/**
+ * Search the index for documents matching a query
+ *
+ * @param {string} query - Search query
+ * @param {Object} index - Inverted index
+ * @param {Object} options - Search options
+ * @param {number} options.limit - Maximum results to return
+ * @param {number} options.threshold - Minimum score threshold
+ * @param {number} options.k1 - BM25 k1 parameter
+ * @param {number} options.b - BM25 b parameter
+ * @returns {Array<{docId: string, score: number}>} - Ranked results
+ */
+function search(query, index, options = {}) {
+ const { limit = 10, threshold = 0, k1 = DEFAULT_K1, b = DEFAULT_B } = options
+
+ const queryTerms = tokenize(query)
+ if (queryTerms.length === 0) return []
+
+ // Find candidate documents (those containing at least one query term)
+ const candidates = new Set()
+ for (const term of queryTerms) {
+ const termData = index.terms[term]
+ if (termData) {
+ for (const docId of Object.keys(termData.postings)) {
+ candidates.add(docId)
+ }
+ }
+ }
+
+ // Score all candidates
+ const results = []
+ for (const docId of candidates) {
+ const docScore = score(query, docId, index, { k1, b })
+ if (docScore > threshold) {
+ results.push({ docId, score: docScore })
+ }
+ }
+
+ // Sort by score descending and apply limit
+ results.sort((a, b) => b.score - a.score)
+ return results.slice(0, limit)
+}
+
+/**
+ * Create an empty index structure
+ * @returns {Object} - Empty index
+ */
+function createEmptyIndex() {
+ return {
+ terms: {},
+ docLengths: {},
+ avgDocLength: 0,
+ totalDocs: 0,
+ docIds: new Set()
+ }
+}
+
+/**
+ * Serialize index for persistence (converts Set to Array)
+ * @param {Object} index - Index to serialize
+ * @returns {Object} - Serializable index
+ */
+function serializeIndex(index) {
+ return {
+ ...index,
+ docIds: Array.from(index.docIds)
+ }
+}
+
+/**
+ * Deserialize index from storage (converts Array back to Set)
+ * @param {Object} data - Serialized index data
+ * @returns {Object} - Usable index
+ */
+function deserializeIndex(data) {
+ if (!data) return createEmptyIndex()
+
+ return {
+ terms: data.terms || {},
+ docLengths: data.docLengths || {},
+ avgDocLength: data.avgDocLength || 0,
+ totalDocs: data.totalDocs || 0,
+ docIds: new Set(data.docIds || [])
+ }
+}
+
+/**
+ * Get index statistics
+ * @param {Object} index - The index
+ * @returns {Object} - Statistics about the index
+ */
+function getIndexStats(index) {
+ return {
+ totalDocuments: index.totalDocs,
+ totalTerms: Object.keys(index.terms).length,
+ avgDocumentLength: Math.round(index.avgDocLength * 100) / 100
+ }
+}
+
+export {
+ tokenize,
+ calculateIDF,
+ buildInvertedIndex,
+ addDocument,
+ removeDocument,
+ score,
+ search,
+ createEmptyIndex,
+ serializeIndex,
+ deserializeIndex,
+ getIndexStats,
+ STOP_WORDS,
+ DEFAULT_K1,
+ DEFAULT_B
+}
diff --git a/server/lib/bm25.test.js b/server/lib/bm25.test.js
new file mode 100644
index 0000000..af3eace
--- /dev/null
+++ b/server/lib/bm25.test.js
@@ -0,0 +1,345 @@
+import { describe, it, expect } from 'vitest';
+import {
+ tokenize,
+ calculateIDF,
+ buildInvertedIndex,
+ addDocument,
+ removeDocument,
+ score,
+ search,
+ createEmptyIndex,
+ serializeIndex,
+ deserializeIndex,
+ getIndexStats,
+ STOP_WORDS
+} from './bm25.js';
+
+describe('BM25 Algorithm', () => {
+ describe('tokenize', () => {
+ it('should tokenize text into lowercase terms', () => {
+ const result = tokenize('Hello World');
+ expect(result).toContain('hello');
+ expect(result).toContain('world');
+ });
+
+ it('should remove punctuation', () => {
+ const result = tokenize('Hello, World! How are you?');
+ expect(result.every(t => !/[,!?]/.test(t))).toBe(true);
+ });
+
+ it('should remove single character tokens', () => {
+ const result = tokenize('I am a test');
+ expect(result).not.toContain('i');
+ expect(result).not.toContain('a');
+ });
+
+ it('should remove stop words', () => {
+ const result = tokenize('the quick brown fox jumps over the lazy dog');
+ expect(result).not.toContain('the');
+ expect(result).not.toContain('is');
+ expect(result).toContain('quick');
+ expect(result).toContain('brown');
+ expect(result).toContain('fox');
+ expect(result).toContain('over'); // 'over' is not a stop word in our list
+ });
+
+ it('should handle empty input', () => {
+ expect(tokenize('')).toEqual([]);
+ expect(tokenize(null)).toEqual([]);
+ expect(tokenize(undefined)).toEqual([]);
+ });
+
+ it('should handle non-string input', () => {
+ expect(tokenize(123)).toEqual([]);
+ expect(tokenize({})).toEqual([]);
+ });
+ });
+
+ describe('calculateIDF', () => {
+ it('should return 0 when n is 0', () => {
+ expect(calculateIDF(100, 0)).toBe(0);
+ });
+
+ it('should return 0 when N is 0', () => {
+ expect(calculateIDF(0, 5)).toBe(0);
+ });
+
+ it('should return higher values for rarer terms', () => {
+ const commonIDF = calculateIDF(1000, 500);
+ const rareIDF = calculateIDF(1000, 10);
+ expect(rareIDF).toBeGreaterThan(commonIDF);
+ });
+
+ it('should return positive values for valid inputs', () => {
+ expect(calculateIDF(100, 10)).toBeGreaterThan(0);
+ });
+ });
+
+ describe('buildInvertedIndex', () => {
+ it('should build index from documents', () => {
+ const docs = [
+ { id: 'doc1', text: 'quick brown fox' },
+ { id: 'doc2', text: 'lazy brown dog' }
+ ];
+ const index = buildInvertedIndex(docs);
+
+ expect(index.totalDocs).toBe(2);
+ expect(index.docIds.has('doc1')).toBe(true);
+ expect(index.docIds.has('doc2')).toBe(true);
+ expect(index.terms['brown'].docFreq).toBe(2);
+ expect(index.terms['quick'].docFreq).toBe(1);
+ });
+
+ it('should track document lengths', () => {
+ const docs = [
+ { id: 'doc1', text: 'word word word' },
+ { id: 'doc2', text: 'single' }
+ ];
+ const index = buildInvertedIndex(docs);
+
+ // Document length is total term count (including duplicates)
+ expect(index.docLengths['doc1']).toBe(3); // "word" appears 3 times
+ expect(index.docLengths['doc2']).toBe(1);
+ });
+
+ it('should calculate average document length', () => {
+ const docs = [
+ { id: 'doc1', text: 'quick brown fox jumps' },
+ { id: 'doc2', text: 'lazy dog' }
+ ];
+ const index = buildInvertedIndex(docs);
+
+ expect(index.avgDocLength).toBeGreaterThan(0);
+ });
+
+ it('should handle empty documents array', () => {
+ const index = buildInvertedIndex([]);
+ expect(index.totalDocs).toBe(0);
+ expect(index.avgDocLength).toBe(0);
+ });
+
+ it('should skip documents without id or text', () => {
+ const docs = [
+ { id: 'doc1', text: 'valid document' },
+ { text: 'no id' },
+ { id: 'doc3' },
+ {}
+ ];
+ const index = buildInvertedIndex(docs);
+
+ expect(index.totalDocs).toBe(1);
+ });
+ });
+
+ describe('addDocument', () => {
+ it('should add a document to existing index', () => {
+ const index = createEmptyIndex();
+ addDocument(index, 'doc1', 'hello world');
+
+ expect(index.totalDocs).toBe(1);
+ expect(index.docIds.has('doc1')).toBe(true);
+ });
+
+ it('should update average doc length', () => {
+ const index = createEmptyIndex();
+ addDocument(index, 'doc1', 'word word word');
+ const avgAfterFirst = index.avgDocLength;
+
+ addDocument(index, 'doc2', 'single');
+ expect(index.avgDocLength).not.toBe(avgAfterFirst);
+ });
+
+ it('should replace existing document with same id', () => {
+ const index = createEmptyIndex();
+ addDocument(index, 'doc1', 'original content');
+ addDocument(index, 'doc1', 'updated content');
+
+ expect(index.totalDocs).toBe(1);
+ expect(index.terms['updated']).toBeDefined();
+ });
+
+ it('should handle empty text', () => {
+ const index = createEmptyIndex();
+ addDocument(index, 'doc1', '');
+
+ expect(index.totalDocs).toBe(0);
+ });
+ });
+
+ describe('removeDocument', () => {
+ it('should remove a document from index', () => {
+ const docs = [
+ { id: 'doc1', text: 'quick brown fox' },
+ { id: 'doc2', text: 'lazy brown dog' }
+ ];
+ const index = buildInvertedIndex(docs);
+
+ removeDocument(index, 'doc1');
+
+ expect(index.totalDocs).toBe(1);
+ expect(index.docIds.has('doc1')).toBe(false);
+ expect(index.terms['quick']).toBeUndefined();
+ });
+
+ it('should update term frequencies', () => {
+ const docs = [
+ { id: 'doc1', text: 'brown' },
+ { id: 'doc2', text: 'brown' }
+ ];
+ const index = buildInvertedIndex(docs);
+
+ expect(index.terms['brown'].docFreq).toBe(2);
+ removeDocument(index, 'doc1');
+ expect(index.terms['brown'].docFreq).toBe(1);
+ });
+
+ it('should handle non-existent document', () => {
+ const index = createEmptyIndex();
+ addDocument(index, 'doc1', 'test');
+
+ removeDocument(index, 'nonexistent');
+ expect(index.totalDocs).toBe(1);
+ });
+ });
+
+ describe('score', () => {
+ it('should return 0 for non-existent document', () => {
+ const index = createEmptyIndex();
+ addDocument(index, 'doc1', 'test document');
+
+ expect(score('test', 'nonexistent', index)).toBe(0);
+ });
+
+ it('should return higher score for more matches', () => {
+ const docs = [
+ { id: 'doc1', text: 'quick brown fox' },
+ { id: 'doc2', text: 'quick quick quick' }
+ ];
+ const index = buildInvertedIndex(docs);
+
+ const score1 = score('quick', 'doc1', index);
+ const score2 = score('quick', 'doc2', index);
+
+ expect(score2).toBeGreaterThan(score1);
+ });
+
+ it('should return 0 when no query terms match', () => {
+ const index = createEmptyIndex();
+ addDocument(index, 'doc1', 'hello world');
+
+ expect(score('xyz', 'doc1', index)).toBe(0);
+ });
+ });
+
+ describe('search', () => {
+ it('should return ranked results', () => {
+ const docs = [
+ { id: 'doc1', text: 'python programming language' },
+ { id: 'doc2', text: 'javascript programming web' },
+ { id: 'doc3', text: 'python python python scripts' }
+ ];
+ const index = buildInvertedIndex(docs);
+
+ const results = search('python programming', index);
+
+ expect(results.length).toBeGreaterThan(0);
+ expect(results[0].score).toBeGreaterThanOrEqual(results[results.length - 1].score);
+ });
+
+ it('should respect limit parameter', () => {
+ const docs = Array.from({ length: 20 }, (_, i) => ({
+ id: `doc${i}`,
+ text: `document ${i} content test`
+ }));
+ const index = buildInvertedIndex(docs);
+
+ const results = search('content', index, { limit: 5 });
+ expect(results.length).toBe(5);
+ });
+
+ it('should respect threshold parameter', () => {
+ const docs = [
+ { id: 'doc1', text: 'exact match query' },
+ { id: 'doc2', text: 'partial match' }
+ ];
+ const index = buildInvertedIndex(docs);
+
+ const allResults = search('exact match', index, { threshold: 0 });
+ const highResults = search('exact match', index, { threshold: 1.0 });
+
+ expect(highResults.length).toBeLessThanOrEqual(allResults.length);
+ });
+
+ it('should return empty array for empty query', () => {
+ const index = createEmptyIndex();
+ addDocument(index, 'doc1', 'test document');
+
+ expect(search('', index)).toEqual([]);
+ });
+
+ it('should return empty array for stop-word-only query', () => {
+ const index = createEmptyIndex();
+ addDocument(index, 'doc1', 'test document');
+
+ expect(search('the and is', index)).toEqual([]);
+ });
+ });
+
+ describe('serialization', () => {
+ it('should serialize index with Set to Array', () => {
+ const index = createEmptyIndex();
+ addDocument(index, 'doc1', 'test');
+
+ const serialized = serializeIndex(index);
+ expect(Array.isArray(serialized.docIds)).toBe(true);
+ });
+
+ it('should deserialize index back to usable form', () => {
+ const original = createEmptyIndex();
+ addDocument(original, 'doc1', 'test document');
+ addDocument(original, 'doc2', 'another test');
+
+ const serialized = serializeIndex(original);
+ const deserialized = deserializeIndex(serialized);
+
+ expect(deserialized.docIds instanceof Set).toBe(true);
+ expect(deserialized.docIds.has('doc1')).toBe(true);
+ expect(deserialized.docIds.has('doc2')).toBe(true);
+ expect(deserialized.totalDocs).toBe(2);
+ });
+
+ it('should handle null data in deserialize', () => {
+ const result = deserializeIndex(null);
+ expect(result.totalDocs).toBe(0);
+ expect(result.docIds instanceof Set).toBe(true);
+ });
+ });
+
+ describe('getIndexStats', () => {
+ it('should return correct statistics', () => {
+ const docs = [
+ { id: 'doc1', text: 'hello world test' },
+ { id: 'doc2', text: 'another test document' },
+ { id: 'doc3', text: 'third document here' }
+ ];
+ const index = buildInvertedIndex(docs);
+ const stats = getIndexStats(index);
+
+ expect(stats.totalDocuments).toBe(3);
+ expect(stats.totalTerms).toBeGreaterThan(0);
+ expect(stats.avgDocumentLength).toBeGreaterThan(0);
+ });
+ });
+
+ describe('STOP_WORDS', () => {
+ it('should be a Set', () => {
+ expect(STOP_WORDS instanceof Set).toBe(true);
+ });
+
+ it('should contain common English stop words', () => {
+ expect(STOP_WORDS.has('the')).toBe(true);
+ expect(STOP_WORDS.has('and')).toBe(true);
+ expect(STOP_WORDS.has('is')).toBe(true);
+ });
+ });
+});
diff --git a/server/lib/brainValidation.js b/server/lib/brainValidation.js
new file mode 100644
index 0000000..6033f0c
--- /dev/null
+++ b/server/lib/brainValidation.js
@@ -0,0 +1,285 @@
+import { z } from 'zod';
+
+// Destination enum
+export const destinationEnum = z.enum(['people', 'projects', 'ideas', 'admin', 'unknown']);
+
+// Project status enum
+export const projectStatusEnum = z.enum(['active', 'waiting', 'blocked', 'someday', 'done']);
+
+// Admin status enum
+export const adminStatusEnum = z.enum(['open', 'waiting', 'done']);
+
+// Inbox log status enum
+export const inboxStatusEnum = z.enum(['filed', 'needs_review', 'corrected', 'error']);
+
+// AI configuration schema
+export const aiConfigSchema = z.object({
+ providerId: z.string(),
+ modelId: z.string(),
+ promptTemplateId: z.string(),
+ temperature: z.number().min(0).max(2).optional(),
+ maxTokens: z.number().int().positive().optional()
+});
+
+// Classification result schema
+export const classificationSchema = z.object({
+ destination: destinationEnum,
+ confidence: z.number().min(0).max(1),
+ title: z.string().min(1).max(200),
+ extracted: z.record(z.unknown()),
+ reasons: z.array(z.string()).max(5).optional()
+});
+
+// Filed info schema
+export const filedSchema = z.object({
+ destination: destinationEnum.exclude(['unknown']),
+ destinationId: z.string().uuid()
+});
+
+// Correction schema
+export const correctionSchema = z.object({
+ correctedAt: z.string().datetime(),
+ previousDestination: destinationEnum,
+ newDestination: destinationEnum.exclude(['unknown']),
+ note: z.string().max(500).optional()
+});
+
+// Error schema
+export const errorSchema = z.object({
+ message: z.string(),
+ stack: z.string().optional()
+});
+
+// Inbox Log Record schema
+export const inboxLogRecordSchema = z.object({
+ id: z.string().uuid(),
+ capturedText: z.string().min(1).max(10000),
+ capturedAt: z.string().datetime(),
+ source: z.literal('brain_ui'),
+ ai: aiConfigSchema.optional(),
+ classification: classificationSchema.optional(),
+ status: inboxStatusEnum,
+ filed: filedSchema.optional(),
+ correction: correctionSchema.optional(),
+ error: errorSchema.optional()
+});
+
+// People Record schema
+export const peopleRecordSchema = z.object({
+ id: z.string().uuid(),
+ name: z.string().min(1).max(200),
+ context: z.string().max(2000).optional().default(''),
+ followUps: z.array(z.string().max(500)).optional().default([]),
+ lastTouched: z.string().datetime().optional(),
+ tags: z.array(z.string().max(50)).optional().default([]),
+ createdAt: z.string().datetime(),
+ updatedAt: z.string().datetime()
+});
+
+// Project Record schema
+export const projectRecordSchema = z.object({
+ id: z.string().uuid(),
+ name: z.string().min(1).max(200),
+ status: projectStatusEnum,
+ nextAction: z.string().min(1).max(500),
+ notes: z.string().max(5000).optional(),
+ tags: z.array(z.string().max(50)).optional().default([]),
+ createdAt: z.string().datetime(),
+ updatedAt: z.string().datetime()
+});
+
+// Idea Record schema
+export const ideaRecordSchema = z.object({
+ id: z.string().uuid(),
+ title: z.string().min(1).max(200),
+ oneLiner: z.string().min(1).max(500),
+ notes: z.string().max(5000).optional(),
+ tags: z.array(z.string().max(50)).optional().default([]),
+ createdAt: z.string().datetime(),
+ updatedAt: z.string().datetime()
+});
+
+// Admin Record schema
+export const adminRecordSchema = z.object({
+ id: z.string().uuid(),
+ title: z.string().min(1).max(200),
+ status: adminStatusEnum,
+ dueDate: z.string().datetime().optional(),
+ nextAction: z.string().max(500).optional(),
+ notes: z.string().max(5000).optional(),
+ createdAt: z.string().datetime(),
+ updatedAt: z.string().datetime()
+});
+
+// Meta/Settings schema
+export const brainSettingsSchema = z.object({
+ version: z.number().int().positive().default(1),
+ confidenceThreshold: z.number().min(0).max(1).default(0.6),
+ dailyDigestTime: z.string().regex(/^\d{2}:\d{2}$/).default('09:00'),
+ weeklyReviewTime: z.string().regex(/^\d{2}:\d{2}$/).default('16:00'),
+ weeklyReviewDay: z.enum(['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']).default('sunday'),
+ defaultProvider: z.string().default('lmstudio'),
+ defaultModel: z.string().default('gptoss-20b'),
+ lastDailyDigest: z.string().datetime().optional(),
+ lastWeeklyReview: z.string().datetime().optional()
+});
+
+// Digest Record schema
+export const digestRecordSchema = z.object({
+ id: z.string().uuid(),
+ generatedAt: z.string().datetime(),
+ digestText: z.string().max(2000),
+ topActions: z.array(z.string().max(200)).max(3),
+ stuckThing: z.string().max(200),
+ smallWin: z.string().max(200),
+ ai: aiConfigSchema.optional()
+});
+
+// Weekly Review Record schema
+export const reviewRecordSchema = z.object({
+ id: z.string().uuid(),
+ generatedAt: z.string().datetime(),
+ reviewText: z.string().max(3000),
+ whatHappened: z.array(z.string().max(200)).max(5),
+ biggestOpenLoops: z.array(z.string().max(200)).max(3),
+ suggestedActionsNextWeek: z.array(z.string().max(200)).max(3),
+ recurringTheme: z.string().max(500),
+ ai: aiConfigSchema.optional()
+});
+
+// --- Input schemas for API endpoints ---
+
+// Capture input schema
+export const captureInputSchema = z.object({
+ text: z.string().min(1).max(10000),
+ providerOverride: z.string().optional(),
+ modelOverride: z.string().optional()
+});
+
+// Resolve review input schema
+export const resolveReviewInputSchema = z.object({
+ inboxLogId: z.string().uuid(),
+ destination: destinationEnum.exclude(['unknown']),
+ editedExtracted: z.record(z.unknown()).optional()
+});
+
+// Fix classification input schema
+export const fixInputSchema = z.object({
+ inboxLogId: z.string().uuid(),
+ newDestination: destinationEnum.exclude(['unknown']),
+ updatedFields: z.record(z.unknown()).optional(),
+ note: z.string().max(500).optional()
+});
+
+// Update inbox entry input schema
+export const updateInboxInputSchema = z.object({
+ capturedText: z.string().min(1).max(10000)
+});
+
+// Create/Update People input schema
+export const peopleInputSchema = z.object({
+ name: z.string().min(1).max(200),
+ context: z.string().max(2000).optional(),
+ followUps: z.array(z.string().max(500)).optional(),
+ lastTouched: z.string().datetime().optional(),
+ tags: z.array(z.string().max(50)).optional()
+});
+
+// Create/Update Project input schema
+export const projectInputSchema = z.object({
+ name: z.string().min(1).max(200),
+ status: projectStatusEnum.optional().default('active'),
+ nextAction: z.string().min(1).max(500),
+ notes: z.string().max(5000).optional(),
+ tags: z.array(z.string().max(50)).optional()
+});
+
+// Create/Update Idea input schema
+export const ideaInputSchema = z.object({
+ title: z.string().min(1).max(200),
+ oneLiner: z.string().min(1).max(500),
+ notes: z.string().max(5000).optional(),
+ tags: z.array(z.string().max(50)).optional()
+});
+
+// Create/Update Admin input schema
+export const adminInputSchema = z.object({
+ title: z.string().min(1).max(200),
+ status: adminStatusEnum.optional().default('open'),
+ dueDate: z.string().datetime().optional(),
+ nextAction: z.string().max(500).optional(),
+ notes: z.string().max(5000).optional()
+});
+
+// Settings update input schema
+export const settingsUpdateInputSchema = brainSettingsSchema.partial().omit({ version: true, lastDailyDigest: true, lastWeeklyReview: true });
+
+// Inbox query schema
+export const inboxQuerySchema = z.object({
+ status: inboxStatusEnum.optional(),
+ limit: z.coerce.number().int().min(1).max(100).optional().default(50),
+ offset: z.coerce.number().int().min(0).optional().default(0)
+});
+
+// --- Extracted field schemas for AI classification ---
+
+// Extracted People fields
+export const extractedPeopleSchema = z.object({
+ name: z.string().min(1).max(200),
+ context: z.string().max(2000).optional().default(''),
+ followUps: z.array(z.string().max(500)).optional().default([]),
+ lastTouched: z.string().datetime().nullable().optional(),
+ tags: z.array(z.string().max(50)).optional().default([])
+});
+
+// Extracted Project fields
+export const extractedProjectSchema = z.object({
+ name: z.string().min(1).max(200),
+ status: projectStatusEnum.optional().default('active'),
+ nextAction: z.string().min(1).max(500),
+ notes: z.string().max(5000).optional().default(''),
+ tags: z.array(z.string().max(50)).optional().default([])
+});
+
+// Extracted Idea fields
+export const extractedIdeaSchema = z.object({
+ title: z.string().min(1).max(200),
+ oneLiner: z.string().min(1).max(500),
+ notes: z.string().max(5000).optional().default(''),
+ tags: z.array(z.string().max(50)).optional().default([])
+});
+
+// Extracted Admin fields
+export const extractedAdminSchema = z.object({
+ title: z.string().min(1).max(200),
+ status: adminStatusEnum.optional().default('open'),
+ dueDate: z.string().datetime().nullable().optional(),
+ nextAction: z.string().max(500).nullable().optional(),
+ notes: z.string().max(5000).optional().default('')
+});
+
+// AI Classifier output schema (what we expect from the AI)
+export const classifierOutputSchema = z.object({
+ destination: destinationEnum,
+ confidence: z.number().min(0).max(1),
+ title: z.string().min(1).max(200),
+ extracted: z.record(z.unknown()),
+ reasons: z.array(z.string()).max(5).optional().default([])
+});
+
+// Daily digest AI output schema
+export const digestOutputSchema = z.object({
+ digestText: z.string(),
+ topActions: z.array(z.string()).max(3),
+ stuckThing: z.string(),
+ smallWin: z.string()
+});
+
+// Weekly review AI output schema
+export const reviewOutputSchema = z.object({
+ reviewText: z.string(),
+ whatHappened: z.array(z.string()).max(5),
+ biggestOpenLoops: z.array(z.string()).max(3),
+ suggestedActionsNextWeek: z.array(z.string()).max(3),
+ recurringTheme: z.string()
+});
diff --git a/server/lib/digitalTwinValidation.js b/server/lib/digitalTwinValidation.js
new file mode 100644
index 0000000..6be5cf9
--- /dev/null
+++ b/server/lib/digitalTwinValidation.js
@@ -0,0 +1,380 @@
+import { z } from 'zod';
+
+// Document category enum
+export const documentCategoryEnum = z.enum([
+ 'core', // Core identity, values, philosophy
+ 'audio', // Music, audio preferences
+ 'behavioral', // Behavioral test suites
+ 'enrichment', // Generated from enrichment Q&A
+ 'entertainment', // Movies, books, TV, games
+ 'professional', // Career, skills, work style
+ 'lifestyle', // Routines, health, habits
+ 'social', // Communication, relationships
+ 'creative' // Aesthetic preferences, creative interests
+]);
+
+// Test result enum
+export const testResultEnum = z.enum(['passed', 'partial', 'failed', 'pending']);
+
+// Export format enum
+export const exportFormatEnum = z.enum(['system_prompt', 'claude_md', 'json', 'individual']);
+
+// Enrichment category enum
+export const enrichmentCategoryEnum = z.enum([
+ 'core_memories',
+ 'favorite_books',
+ 'favorite_movies',
+ 'music_taste',
+ 'communication',
+ 'decision_making',
+ 'values',
+ 'aesthetics',
+ 'daily_routines',
+ 'career_skills',
+ 'non_negotiables',
+ 'decision_heuristics',
+ 'error_intolerance',
+ 'personality_assessments'
+]);
+
+// Document metadata schema
+export const documentMetaSchema = z.object({
+ id: z.string().min(1),
+ filename: z.string().min(1),
+ title: z.string().min(1).max(200),
+ category: documentCategoryEnum,
+ version: z.string().optional(),
+ enabled: z.boolean().default(true),
+ priority: z.number().int().min(0).default(0),
+ weight: z.number().int().min(1).max(10).default(5)
+});
+
+// Test history entry schema
+export const testHistoryEntrySchema = z.object({
+ runId: z.string().uuid(),
+ providerId: z.string(),
+ model: z.string(),
+ score: z.number().min(0).max(1),
+ passed: z.number().int().min(0),
+ failed: z.number().int().min(0),
+ partial: z.number().int().min(0),
+ total: z.number().int().min(0),
+ timestamp: z.string().datetime()
+});
+
+// Individual test result schema
+export const testResultSchema = z.object({
+ testId: z.number().int().min(1),
+ testName: z.string(),
+ prompt: z.string(),
+ expectedBehavior: z.string(),
+ failureSignals: z.string(),
+ response: z.string().optional(),
+ result: testResultEnum,
+ reasoning: z.string().optional()
+});
+
+// Enrichment progress schema
+export const enrichmentProgressSchema = z.object({
+ completedCategories: z.array(enrichmentCategoryEnum).default([]),
+ lastSession: z.string().datetime().nullable().optional(),
+ questionsAnswered: z.record(enrichmentCategoryEnum, z.number().int().min(0)).optional()
+});
+
+// Digital Twin settings schema
+export const digitalTwinSettingsSchema = z.object({
+ autoInjectToCoS: z.boolean().default(true),
+ maxContextTokens: z.number().int().min(1000).max(100000).default(4000)
+});
+export const soulSettingsSchema = digitalTwinSettingsSchema; // Alias for backwards compatibility
+
+// --- Phase 1: Quantitative Personality Modeling Schemas ---
+
+// Big Five personality traits (OCEAN model)
+export const bigFiveSchema = z.object({
+ O: z.number().min(0).max(1).describe('Openness to experience'),
+ C: z.number().min(0).max(1).describe('Conscientiousness'),
+ E: z.number().min(0).max(1).describe('Extraversion'),
+ A: z.number().min(0).max(1).describe('Agreeableness'),
+ N: z.number().min(0).max(1).describe('Neuroticism')
+});
+
+// Communication profile schema
+export const communicationProfileSchema = z.object({
+ formality: z.number().int().min(1).max(10).describe('1=very casual, 10=very formal'),
+ verbosity: z.number().int().min(1).max(10).describe('1=terse, 10=elaborate'),
+ avgSentenceLength: z.number().min(5).max(50).optional(),
+ emojiUsage: z.enum(['never', 'rare', 'occasional', 'frequent']).default('rare'),
+ preferredTone: z.string().max(100).optional(),
+ distinctiveMarkers: z.array(z.string().max(200)).max(10).optional()
+});
+
+// Valued trait with priority
+export const valuedTraitSchema = z.object({
+ value: z.string().min(1).max(100),
+ priority: z.number().int().min(1).max(10),
+ description: z.string().max(500).optional(),
+ conflictsWith: z.array(z.string()).optional()
+});
+
+// Full traits schema
+export const traitsSchema = z.object({
+ bigFive: bigFiveSchema.optional(),
+ valuesHierarchy: z.array(valuedTraitSchema).max(20).optional(),
+ communicationProfile: communicationProfileSchema.optional(),
+ lastAnalyzed: z.string().datetime().optional(),
+ analysisVersion: z.string().optional()
+});
+
+// --- Phase 2: Confidence Scoring Schemas ---
+
+// Confidence dimension enum
+export const confidenceDimensionEnum = z.enum([
+ 'openness', 'conscientiousness', 'extraversion', 'agreeableness', 'neuroticism',
+ 'values', 'communication', 'decision_making', 'boundaries', 'identity'
+]);
+
+// Gap recommendation
+export const gapRecommendationSchema = z.object({
+ dimension: confidenceDimensionEnum,
+ confidence: z.number().min(0).max(1),
+ evidenceCount: z.number().int().min(0),
+ requiredEvidence: z.number().int().min(1),
+ suggestedQuestions: z.array(z.string().max(500)).max(5),
+ suggestedCategory: enrichmentCategoryEnum.optional()
+});
+
+// Full confidence schema
+export const confidenceSchema = z.object({
+ overall: z.number().min(0).max(1),
+ dimensions: z.record(confidenceDimensionEnum, z.number().min(0).max(1)),
+ gaps: z.array(gapRecommendationSchema),
+ lastCalculated: z.string().datetime().optional()
+});
+
+// Full meta.json schema
+export const digitalTwinMetaSchema = z.object({
+ version: z.string().default('1.0.0'),
+ documents: z.array(documentMetaSchema).default([]),
+ testHistory: z.array(testHistoryEntrySchema).default([]),
+ enrichment: enrichmentProgressSchema.default({ completedCategories: [], lastSession: null }),
+ settings: digitalTwinSettingsSchema.default({ autoInjectToCoS: true, maxContextTokens: 4000 }),
+ traits: traitsSchema.optional(),
+ confidence: confidenceSchema.optional()
+});
+export const soulMetaSchema = digitalTwinMetaSchema; // Alias for backwards compatibility
+
+// --- Input schemas for API endpoints ---
+
+// Create document input
+export const createDocumentInputSchema = z.object({
+ filename: z.string().min(1).max(100).regex(/^[\w\-]+\.md$/, 'Filename must be a valid markdown filename'),
+ title: z.string().min(1).max(200),
+ category: documentCategoryEnum,
+ content: z.string().min(1).max(1000000),
+ enabled: z.boolean().optional().default(true),
+ priority: z.number().int().min(0).optional().default(0)
+});
+
+// Update document input
+export const updateDocumentInputSchema = z.object({
+ content: z.string().min(1).max(1000000).optional(),
+ title: z.string().min(1).max(200).optional(),
+ enabled: z.boolean().optional(),
+ priority: z.number().int().min(0).optional(),
+ weight: z.number().int().min(1).max(10).optional()
+});
+
+// Run tests input
+export const runTestsInputSchema = z.object({
+ providerId: z.string().min(1),
+ model: z.string().min(1),
+ testIds: z.array(z.number().int().min(1)).optional()
+});
+
+// Run multi-model tests input
+export const runMultiTestsInputSchema = z.object({
+ providers: z.array(z.object({
+ providerId: z.string().min(1),
+ model: z.string().min(1)
+ })).min(1).max(10),
+ testIds: z.array(z.number().int().min(1)).optional()
+});
+
+// Enrichment question input
+export const enrichmentQuestionInputSchema = z.object({
+ category: enrichmentCategoryEnum,
+ providerOverride: z.string().optional(),
+ modelOverride: z.string().optional()
+});
+
+// Enrichment answer input
+export const enrichmentAnswerInputSchema = z.object({
+ questionId: z.string().uuid(),
+ category: enrichmentCategoryEnum,
+ question: z.string().min(1),
+ answer: z.string().min(1).max(10000),
+ providerOverride: z.string().optional(),
+ modelOverride: z.string().optional()
+});
+
+// Export input
+export const exportInputSchema = z.object({
+ format: exportFormatEnum,
+ documentIds: z.array(z.string()).optional(),
+ includeDisabled: z.boolean().optional().default(false)
+});
+
+// Settings update input
+export const settingsUpdateInputSchema = soulSettingsSchema.partial();
+
+// Test history query
+export const testHistoryQuerySchema = z.object({
+ limit: z.coerce.number().int().min(1).max(100).optional().default(10)
+});
+
+// Contradiction detection input
+export const contradictionInputSchema = z.object({
+ providerId: z.string().min(1),
+ model: z.string().min(1)
+});
+
+// Dynamic test generation input
+export const generateTestsInputSchema = z.object({
+ providerId: z.string().min(1),
+ model: z.string().min(1)
+});
+
+// Writing sample analysis input
+export const writingAnalysisInputSchema = z.object({
+ samples: z.array(z.string().min(10)).min(1).max(10),
+ providerId: z.string().min(1),
+ model: z.string().min(1)
+});
+
+// List-based enrichment item
+export const listItemSchema = z.object({
+ title: z.string().min(1).max(500),
+ note: z.string().max(2000).optional()
+});
+
+// Analyze list input
+export const analyzeListInputSchema = z.object({
+ category: enrichmentCategoryEnum,
+ items: z.array(listItemSchema).min(1).max(50),
+ providerId: z.string().min(1),
+ model: z.string().min(1)
+});
+
+// Save list document input
+export const saveListDocumentInputSchema = z.object({
+ category: enrichmentCategoryEnum,
+ content: z.string().min(1).max(100000),
+ items: z.array(listItemSchema).min(1).max(50)
+});
+
+// Get list items input
+export const getListItemsInputSchema = z.object({
+ category: enrichmentCategoryEnum
+});
+
+// --- Input schemas for trait and confidence endpoints ---
+
+// Analyze traits input
+export const analyzeTraitsInputSchema = z.object({
+ providerId: z.string().min(1),
+ model: z.string().min(1),
+ forceReanalyze: z.boolean().optional().default(false)
+});
+
+// Update traits input (manual override)
+export const updateTraitsInputSchema = z.object({
+ bigFive: bigFiveSchema.partial().optional(),
+ valuesHierarchy: z.array(valuedTraitSchema).max(20).optional(),
+ communicationProfile: communicationProfileSchema.partial().optional()
+});
+
+// Calculate confidence input
+export const calculateConfidenceInputSchema = z.object({
+ providerId: z.string().min(1).optional(),
+ model: z.string().min(1).optional()
+});
+
+// --- Phase 4: External Data Import Schemas ---
+
+// Import source enum
+export const importSourceEnum = z.enum([
+ 'goodreads',
+ 'spotify',
+ 'lastfm',
+ 'letterboxd',
+ 'ical'
+]);
+
+// Goodreads book entry (parsed from CSV)
+export const goodreadsBookSchema = z.object({
+ title: z.string(),
+ author: z.string().optional(),
+ rating: z.number().min(0).max(5).optional(),
+ dateRead: z.string().optional(),
+ shelves: z.array(z.string()).optional(),
+ review: z.string().optional()
+});
+
+// Spotify track/artist entry (parsed from JSON export)
+export const spotifyEntrySchema = z.object({
+ trackName: z.string().optional(),
+ artistName: z.string(),
+ albumName: z.string().optional(),
+ playCount: z.number().int().optional(),
+ msPlayed: z.number().int().optional()
+});
+
+// Letterboxd film entry
+export const letterboxdFilmSchema = z.object({
+ title: z.string(),
+ year: z.number().int().optional(),
+ rating: z.number().min(0).max(5).optional(),
+ watchedDate: z.string().optional(),
+ review: z.string().optional(),
+ tags: z.array(z.string()).optional()
+});
+
+// Calendar event for pattern analysis
+export const calendarEventSchema = z.object({
+ summary: z.string(),
+ start: z.string(),
+ end: z.string().optional(),
+ recurring: z.boolean().optional(),
+ categories: z.array(z.string()).optional()
+});
+
+// Import data input (raw data to parse)
+export const importDataInputSchema = z.object({
+ source: importSourceEnum,
+ data: z.string().min(1).max(10000000), // Up to 10MB of text data
+ providerId: z.string().min(1),
+ model: z.string().min(1)
+});
+
+// Import analysis result
+export const importAnalysisResultSchema = z.object({
+ source: importSourceEnum,
+ itemCount: z.number().int(),
+ insights: z.object({
+ patterns: z.array(z.string()).optional(),
+ preferences: z.array(z.string()).optional(),
+ personalityInferences: z.object({
+ bigFive: bigFiveSchema.partial().optional(),
+ values: z.array(z.string()).optional(),
+ interests: z.array(z.string()).optional()
+ }).optional()
+ }),
+ suggestedDocuments: z.array(z.object({
+ filename: z.string(),
+ title: z.string(),
+ category: documentCategoryEnum,
+ content: z.string()
+ })).optional(),
+ rawSummary: z.string().optional()
+});
diff --git a/server/lib/fileUtils.js b/server/lib/fileUtils.js
new file mode 100644
index 0000000..e65a08b
--- /dev/null
+++ b/server/lib/fileUtils.js
@@ -0,0 +1,250 @@
+/**
+ * File System Utilities
+ *
+ * Shared utilities for file operations used across services.
+ */
+
+import { mkdir, readFile } from 'fs/promises';
+import { join, dirname } from 'path';
+import { fileURLToPath } from 'url';
+
+// Cache __dirname calculation for services importing this module
+const __lib_filename = fileURLToPath(import.meta.url);
+const __lib_dirname = dirname(__lib_filename);
+
+/**
+ * Base directories relative to project root
+ */
+export const PATHS = {
+ root: join(__lib_dirname, '../..'),
+ data: join(__lib_dirname, '../../data'),
+ cos: join(__lib_dirname, '../../data/cos'),
+ brain: join(__lib_dirname, '../../data/brain'),
+ digitalTwin: join(__lib_dirname, '../../data/digital-twin'),
+ runs: join(__lib_dirname, '../../data/runs'),
+ memory: join(__lib_dirname, '../../data/cos/memory'),
+ agents: join(__lib_dirname, '../../data/cos/agents'),
+ scripts: join(__lib_dirname, '../../data/cos/scripts'),
+ reports: join(__lib_dirname, '../../data/cos/reports')
+};
+
+/**
+ * Ensure a directory exists, creating it recursively if needed.
+ * Uses mkdir with recursive: true which is idempotent and avoids TOCTOU races.
+ *
+ * @param {string} dir - Directory path to ensure exists
+ * @returns {Promise
}
+ *
+ * @example
+ * await ensureDir(PATHS.data);
+ * await ensureDir('/custom/path/to/dir');
+ */
+export async function ensureDir(dir) {
+ // mkdir with recursive: true is idempotent - it succeeds if dir exists
+ await mkdir(dir, { recursive: true });
+}
+
+/**
+ * Ensure multiple directories exist.
+ *
+ * @param {string[]} dirs - Array of directory paths to ensure exist
+ * @returns {Promise}
+ *
+ * @example
+ * await ensureDirs([PATHS.data, PATHS.cos, PATHS.memory]);
+ */
+export async function ensureDirs(dirs) {
+ for (const dir of dirs) {
+ await ensureDir(dir);
+ }
+}
+
+/**
+ * Get a path relative to the data directory.
+ *
+ * @param {...string} segments - Path segments to join
+ * @returns {string} Full path under data directory
+ *
+ * @example
+ * const filePath = dataPath('cos', 'state.json');
+ * // Returns: /path/to/project/data/cos/state.json
+ */
+export function dataPath(...segments) {
+ return join(PATHS.data, ...segments);
+}
+
+/**
+ * Get a path relative to the project root.
+ *
+ * @param {...string} segments - Path segments to join
+ * @returns {string} Full path under project root
+ *
+ * @example
+ * const filePath = rootPath('data', 'TASKS.md');
+ * // Returns: /path/to/project/data/TASKS.md
+ */
+export function rootPath(...segments) {
+ return join(PATHS.root, ...segments);
+}
+
+/**
+ * Check if a string is potentially valid JSON.
+ * Performs quick structural validation before parsing.
+ *
+ * @param {string} str - String to validate
+ * @param {Object} options - Validation options
+ * @param {boolean} [options.allowArray=true] - Allow array JSON (default: true)
+ * @returns {boolean} True if the string appears to be valid JSON
+ *
+ * @example
+ * isValidJSON('{"key": "value"}') // true
+ * isValidJSON('[1, 2, 3]') // true
+ * isValidJSON('') // false
+ * isValidJSON('{"incomplete":') // false
+ */
+export function isValidJSON(str, { allowArray = true } = {}) {
+ if (!str || !str.trim()) return false;
+ const trimmed = str.trim();
+
+ // Check for basic JSON structure (object or array)
+ const isObject = trimmed.startsWith('{') && trimmed.endsWith('}');
+ const isArray = trimmed.startsWith('[') && trimmed.endsWith(']');
+
+ if (!isObject && !(allowArray && isArray)) return false;
+
+ return true;
+}
+
+/**
+ * Safely parse JSON with validation and fallback.
+ * Avoids "Unexpected end of JSON input" errors from empty/corrupted files.
+ *
+ * @param {string} str - JSON string to parse
+ * @param {*} defaultValue - Default value if parsing fails (default: null)
+ * @param {Object} options - Parse options
+ * @param {boolean} [options.allowArray=true] - Allow array JSON
+ * @param {boolean} [options.logError=false] - Log parsing errors
+ * @param {string} [options.context=''] - Context for error logging
+ * @returns {*} Parsed JSON or default value
+ *
+ * @example
+ * safeJSONParse('{"key": "value"}', {}) // { key: "value" }
+ * safeJSONParse('', {}) // {}
+ * safeJSONParse('invalid', []) // []
+ * safeJSONParse(null, { default: true }) // { default: true }
+ */
+export function safeJSONParse(str, defaultValue = null, { allowArray = true, logError = false, context = '' } = {}) {
+ if (!isValidJSON(str, { allowArray })) {
+ if (logError && str) {
+ console.warn(`Invalid JSON${context ? ` in ${context}` : ''}: empty or malformed content`);
+ }
+ return defaultValue;
+ }
+
+ // Attempt actual parse - the validation above catches structural issues
+ // but syntax errors like trailing commas still need handling
+ try {
+ return JSON.parse(str);
+ } catch (err) {
+ if (logError) {
+ console.warn(`Failed to parse JSON${context ? ` in ${context}` : ''}: ${err.message}`);
+ }
+ return defaultValue;
+ }
+}
+
+/**
+ * Read a JSON file safely with validation and default fallback.
+ * Combines file reading with safe JSON parsing.
+ *
+ * @param {string} filePath - Path to JSON file
+ * @param {*} defaultValue - Default value if file doesn't exist or is invalid
+ * @param {Object} options - Options
+ * @param {boolean} [options.allowArray=true] - Allow array JSON
+ * @param {boolean} [options.logError=true] - Log errors
+ * @returns {Promise<*>} Parsed JSON or default value
+ *
+ * @example
+ * const config = await readJSONFile('./config.json', { port: 3000 });
+ * const items = await readJSONFile('./items.json', []);
+ */
+export async function readJSONFile(filePath, defaultValue = null, { allowArray = true, logError = true } = {}) {
+ let content;
+ try {
+ content = await readFile(filePath, 'utf-8');
+ } catch (err) {
+ // ENOENT = file doesn't exist, return default silently
+ if (err.code === 'ENOENT') {
+ return defaultValue;
+ }
+ // Log other I/O errors if requested
+ if (logError) {
+ console.warn(`Failed to read file ${filePath}: ${err.message}`);
+ }
+ return defaultValue;
+ }
+ return safeJSONParse(content, defaultValue, { allowArray, logError, context: filePath });
+}
+
+/**
+ * Parse JSONL (JSON Lines) content safely.
+ * Handles empty lines, whitespace, and malformed lines gracefully.
+ *
+ * @param {string} content - JSONL content (newline-separated JSON objects)
+ * @param {Object} options - Options
+ * @param {boolean} [options.logErrors=false] - Log individual line parsing errors
+ * @param {string} [options.context=''] - Context for error logging
+ * @returns {Array} Array of parsed objects (invalid lines are skipped)
+ *
+ * @example
+ * const lines = safeJSONLParse('{"a":1}\n{"b":2}\n'); // [{ a: 1 }, { b: 2 }]
+ * const lines = safeJSONLParse('{"a":1}\ninvalid\n{"b":2}'); // [{ a: 1 }, { b: 2 }]
+ */
+export function safeJSONLParse(content, { logErrors = false, context = '' } = {}) {
+ if (!content || !content.trim()) return [];
+
+ // Split on CRLF or LF to handle both Windows and Unix line endings
+ const lines = content
+ .split(/\r?\n/)
+ .map(line => line.trim())
+ .filter(Boolean);
+ const results = [];
+
+ for (const line of lines) {
+ const parsed = safeJSONParse(line, null, { allowArray: false, logError: logErrors, context });
+ if (parsed !== null) {
+ results.push(parsed);
+ }
+ }
+
+ return results;
+}
+
+/**
+ * Read a JSONL file safely.
+ *
+ * @param {string} filePath - Path to JSONL file
+ * @param {Object} options - Options
+ * @param {boolean} [options.logErrors=false] - Log individual line parsing errors
+ * @returns {Promise} Array of parsed objects
+ *
+ * @example
+ * const entries = await readJSONLFile('./logs.jsonl');
+ */
+export async function readJSONLFile(filePath, { logErrors = false } = {}) {
+ let content;
+ try {
+ content = await readFile(filePath, 'utf-8');
+ } catch (err) {
+ // ENOENT = file doesn't exist, return empty array silently
+ if (err.code === 'ENOENT') {
+ return [];
+ }
+ // Log other I/O errors if requested
+ if (logErrors) {
+ console.warn(`Failed to read file ${filePath}: ${err.message}`);
+ }
+ return [];
+ }
+ return safeJSONLParse(content, { logErrors, context: filePath });
+}
diff --git a/server/lib/fileUtils.test.js b/server/lib/fileUtils.test.js
new file mode 100644
index 0000000..19147c5
--- /dev/null
+++ b/server/lib/fileUtils.test.js
@@ -0,0 +1,306 @@
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
+import { writeFile, rm, mkdir } from 'fs/promises';
+import { join } from 'path';
+import { tmpdir } from 'os';
+import {
+ isValidJSON,
+ safeJSONParse,
+ safeJSONLParse,
+ readJSONFile,
+ readJSONLFile
+} from './fileUtils.js';
+
+describe('fileUtils', () => {
+ describe('isValidJSON', () => {
+ it('should return true for valid JSON object', () => {
+ expect(isValidJSON('{"key": "value"}')).toBe(true);
+ });
+
+ it('should return true for valid JSON array when allowed', () => {
+ expect(isValidJSON('[1, 2, 3]')).toBe(true);
+ });
+
+ it('should return false for JSON array when not allowed', () => {
+ expect(isValidJSON('[1, 2, 3]', { allowArray: false })).toBe(false);
+ });
+
+ it('should return false for empty string', () => {
+ expect(isValidJSON('')).toBe(false);
+ });
+
+ it('should return false for whitespace-only string', () => {
+ expect(isValidJSON(' ')).toBe(false);
+ });
+
+ it('should return false for null', () => {
+ expect(isValidJSON(null)).toBe(false);
+ });
+
+ it('should return false for undefined', () => {
+ expect(isValidJSON(undefined)).toBe(false);
+ });
+
+ it('should return false for string not starting with { or [', () => {
+ expect(isValidJSON('hello')).toBe(false);
+ });
+
+ it('should return false for incomplete object (missing end)', () => {
+ expect(isValidJSON('{"key":')).toBe(false);
+ });
+
+ it('should return false for incomplete array (missing end)', () => {
+ expect(isValidJSON('[1, 2')).toBe(false);
+ });
+
+ it('should handle whitespace around valid JSON', () => {
+ expect(isValidJSON(' {"key": "value"} ')).toBe(true);
+ });
+
+ it('should handle nested objects', () => {
+ expect(isValidJSON('{"outer": {"inner": "value"}}')).toBe(true);
+ });
+ });
+
+ describe('safeJSONParse', () => {
+ it('should parse valid JSON object', () => {
+ const result = safeJSONParse('{"key": "value"}', {});
+ expect(result).toEqual({ key: 'value' });
+ });
+
+ it('should parse valid JSON array', () => {
+ const result = safeJSONParse('[1, 2, 3]', []);
+ expect(result).toEqual([1, 2, 3]);
+ });
+
+ it('should return default value for empty string', () => {
+ const result = safeJSONParse('', { default: true });
+ expect(result).toEqual({ default: true });
+ });
+
+ it('should return default value for null input', () => {
+ const result = safeJSONParse(null, []);
+ expect(result).toEqual([]);
+ });
+
+ it('should return default value for invalid JSON', () => {
+ const result = safeJSONParse('not json', { fallback: 'value' });
+ expect(result).toEqual({ fallback: 'value' });
+ });
+
+ it('should return default value for JSON with trailing comma', () => {
+ const result = safeJSONParse('{"a": 1,}', {});
+ expect(result).toEqual({});
+ });
+
+ it('should return default value for truncated JSON', () => {
+ const result = safeJSONParse('{"key": "value', {});
+ expect(result).toEqual({});
+ });
+
+ it('should return null as default when no defaultValue provided', () => {
+ const result = safeJSONParse('invalid');
+ expect(result).toBe(null);
+ });
+
+ it('should reject arrays when allowArray is false', () => {
+ const result = safeJSONParse('[1, 2, 3]', {}, { allowArray: false });
+ expect(result).toEqual({});
+ });
+
+ it('should log warning when logError is true', () => {
+ const consoleSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
+ safeJSONParse('invalid', {}, { logError: true });
+ expect(consoleSpy).toHaveBeenCalled();
+ consoleSpy.mockRestore();
+ });
+
+ it('should include context in log message', () => {
+ const consoleSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
+ safeJSONParse('invalid', {}, { logError: true, context: 'test-file.json' });
+ expect(consoleSpy).toHaveBeenCalledWith(expect.stringContaining('test-file.json'));
+ consoleSpy.mockRestore();
+ });
+
+ it('should not log for empty input even with logError true', () => {
+ const consoleSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
+ safeJSONParse('', {}, { logError: true });
+ expect(consoleSpy).not.toHaveBeenCalled();
+ consoleSpy.mockRestore();
+ });
+
+ it('should handle syntax error in structurally valid JSON', () => {
+ // Passes structural check but fails JSON.parse
+ const consoleWarnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
+ const result = safeJSONParse('{"key": undefined}', { fallback: true }, { logError: true });
+ expect(result).toEqual({ fallback: true });
+ expect(consoleWarnSpy).toHaveBeenCalled();
+ consoleWarnSpy.mockRestore();
+ });
+ });
+
+ describe('safeJSONLParse', () => {
+ it('should parse valid JSONL content', () => {
+ const content = '{"a": 1}\n{"b": 2}\n{"c": 3}';
+ const result = safeJSONLParse(content);
+ expect(result).toEqual([{ a: 1 }, { b: 2 }, { c: 3 }]);
+ });
+
+ it('should skip empty lines', () => {
+ const content = '{"a": 1}\n\n{"b": 2}\n';
+ const result = safeJSONLParse(content);
+ expect(result).toEqual([{ a: 1 }, { b: 2 }]);
+ });
+
+ it('should skip whitespace-only lines', () => {
+ const content = '{"a": 1}\n \n{"b": 2}';
+ const result = safeJSONLParse(content);
+ expect(result).toEqual([{ a: 1 }, { b: 2 }]);
+ });
+
+ it('should skip invalid lines and continue parsing', () => {
+ const content = '{"a": 1}\ninvalid json\n{"b": 2}';
+ const result = safeJSONLParse(content);
+ expect(result).toEqual([{ a: 1 }, { b: 2 }]);
+ });
+
+ it('should return empty array for empty content', () => {
+ expect(safeJSONLParse('')).toEqual([]);
+ });
+
+ it('should return empty array for null content', () => {
+ expect(safeJSONLParse(null)).toEqual([]);
+ });
+
+ it('should return empty array for whitespace-only content', () => {
+ expect(safeJSONLParse(' \n ')).toEqual([]);
+ });
+
+ it('should handle single line without trailing newline', () => {
+ const result = safeJSONLParse('{"single": "line"}');
+ expect(result).toEqual([{ single: 'line' }]);
+ });
+
+ it('should reject array values in lines (JSONL expects objects)', () => {
+ const content = '{"a": 1}\n[1, 2, 3]\n{"b": 2}';
+ const result = safeJSONLParse(content);
+ // Arrays are rejected because allowArray: false is passed internally
+ expect(result).toEqual([{ a: 1 }, { b: 2 }]);
+ });
+
+ it('should handle lines with only truncated JSON', () => {
+ const content = '{"complete": true}\n{"incomplete":';
+ const result = safeJSONLParse(content);
+ expect(result).toEqual([{ complete: true }]);
+ });
+
+ it('should handle CRLF line endings (Windows)', () => {
+ const content = '{"a": 1}\r\n{"b": 2}\r\n{"c": 3}';
+ const result = safeJSONLParse(content);
+ expect(result).toEqual([{ a: 1 }, { b: 2 }, { c: 3 }]);
+ });
+
+ it('should handle mixed LF and CRLF line endings', () => {
+ const content = '{"a": 1}\n{"b": 2}\r\n{"c": 3}';
+ const result = safeJSONLParse(content);
+ expect(result).toEqual([{ a: 1 }, { b: 2 }, { c: 3 }]);
+ });
+ });
+
+ describe('readJSONFile', () => {
+ const testDir = join(tmpdir(), 'fileutils-test-' + Date.now());
+
+ beforeEach(async () => {
+ await mkdir(testDir, { recursive: true });
+ });
+
+ afterEach(async () => {
+ await rm(testDir, { recursive: true, force: true });
+ });
+
+ it('should read and parse valid JSON file', async () => {
+ const filePath = join(testDir, 'valid.json');
+ await writeFile(filePath, '{"key": "value"}');
+
+ const result = await readJSONFile(filePath, {});
+ expect(result).toEqual({ key: 'value' });
+ });
+
+ it('should return default value for non-existent file', async () => {
+ const result = await readJSONFile('/nonexistent/path.json', { default: true });
+ expect(result).toEqual({ default: true });
+ });
+
+ it('should return default value for empty file', async () => {
+ const filePath = join(testDir, 'empty.json');
+ await writeFile(filePath, '');
+
+ const result = await readJSONFile(filePath, { empty: true });
+ expect(result).toEqual({ empty: true });
+ });
+
+ it('should return default value for corrupted file', async () => {
+ const filePath = join(testDir, 'corrupted.json');
+ await writeFile(filePath, '{"incomplete":');
+
+ const result = await readJSONFile(filePath, { fallback: true });
+ expect(result).toEqual({ fallback: true });
+ });
+
+ it('should handle arrays when allowArray is true', async () => {
+ const filePath = join(testDir, 'array.json');
+ await writeFile(filePath, '[1, 2, 3]');
+
+ const result = await readJSONFile(filePath, []);
+ expect(result).toEqual([1, 2, 3]);
+ });
+
+ it('should reject arrays when allowArray is false', async () => {
+ const filePath = join(testDir, 'array.json');
+ await writeFile(filePath, '[1, 2, 3]');
+
+ const result = await readJSONFile(filePath, {}, { allowArray: false });
+ expect(result).toEqual({});
+ });
+ });
+
+ describe('readJSONLFile', () => {
+ const testDir = join(tmpdir(), 'fileutils-jsonl-test-' + Date.now());
+
+ beforeEach(async () => {
+ await mkdir(testDir, { recursive: true });
+ });
+
+ afterEach(async () => {
+ await rm(testDir, { recursive: true, force: true });
+ });
+
+ it('should read and parse valid JSONL file', async () => {
+ const filePath = join(testDir, 'valid.jsonl');
+ await writeFile(filePath, '{"a": 1}\n{"b": 2}\n{"c": 3}');
+
+ const result = await readJSONLFile(filePath);
+ expect(result).toEqual([{ a: 1 }, { b: 2 }, { c: 3 }]);
+ });
+
+ it('should return empty array for non-existent file', async () => {
+ const result = await readJSONLFile('/nonexistent/path.jsonl');
+ expect(result).toEqual([]);
+ });
+
+ it('should return empty array for empty file', async () => {
+ const filePath = join(testDir, 'empty.jsonl');
+ await writeFile(filePath, '');
+
+ const result = await readJSONLFile(filePath);
+ expect(result).toEqual([]);
+ });
+
+ it('should skip invalid lines in JSONL file', async () => {
+ const filePath = join(testDir, 'mixed.jsonl');
+ await writeFile(filePath, '{"valid": 1}\nnot json\n{"also": "valid"}');
+
+ const result = await readJSONLFile(filePath);
+ expect(result).toEqual([{ valid: 1 }, { also: 'valid' }]);
+ });
+ });
+});
diff --git a/server/lib/taskParser.js b/server/lib/taskParser.js
index 90bcf6d..3c3441a 100644
--- a/server/lib/taskParser.js
+++ b/server/lib/taskParser.js
@@ -87,6 +87,49 @@ function parseTaskLine(line) {
};
}
+// Sentinel prefix for JSON-encoded metadata values
+const JSON_SENTINEL = '__json__:';
+
+/**
+ * Unescape newlines in metadata values.
+ *
+ * For values prefixed with the JSON sentinel (produced by escapeNewlines),
+ * this uses JSON.parse to correctly restore backslashes, newlines, etc.
+ * For legacy or simple values, falls back to simple replacement for backwards compatibility.
+ */
+function unescapeNewlines(value) {
+ if (typeof value !== 'string') return value;
+ // Check for explicit JSON sentinel prefix
+ if (value.startsWith(JSON_SENTINEL)) {
+ const jsonPart = value.slice(JSON_SENTINEL.length);
+ try {
+ return JSON.parse(jsonPart);
+ } catch {
+ // Fall through to legacy behavior if parsing fails
+ }
+ }
+ // Legacy fallback for backwards compatibility with pre-sentinel data only.
+ // New values with special characters always use the sentinel prefix (see escapeNewlines),
+ // so this branch only runs on historical data that was escaped with the old method.
+ // Values that were never intended to be newline-escaped won't have \\n sequences.
+ return value.replace(/\\n/g, '\n');
+}
+
+/**
+ * Escape newlines in metadata values.
+ *
+ * For values containing special characters (newlines, backslashes), uses JSON
+ * string escaping with a sentinel prefix for reversibility. Simple values are stored as-is.
+ */
+function escapeNewlines(value) {
+ if (typeof value !== 'string') return String(value);
+ // Only use JSON encoding if the value contains characters that need escaping
+ if (value.includes('\n') || value.includes('\\')) {
+ return JSON_SENTINEL + JSON.stringify(value);
+ }
+ return value;
+}
+
/**
* Parse metadata line (indented under task)
* Format: - Key: Value
@@ -97,7 +140,7 @@ function parseMetadataLine(line) {
return {
key: match[1].toLowerCase(),
- value: match[2].trim()
+ value: unescapeNewlines(match[2].trim())
};
}
@@ -205,10 +248,11 @@ export function generateTasksMarkdown(tasks, includeApprovalFlags = false) {
: '';
lines.push(`- ${checkbox} #${task.id} | ${task.priority}${approvalFlag} | ${task.description}`);
- // Add metadata
+ // Add metadata (escape newlines in values for single-line storage)
for (const [key, value] of Object.entries(task.metadata)) {
const capitalizedKey = key.charAt(0).toUpperCase() + key.slice(1);
- lines.push(` - ${capitalizedKey}: ${value}`);
+ const escapedValue = escapeNewlines(String(value));
+ lines.push(` - ${capitalizedKey}: ${escapedValue}`);
}
}
diff --git a/server/lib/taskParser.test.js b/server/lib/taskParser.test.js
index 6e01189..d1093d3 100644
--- a/server/lib/taskParser.test.js
+++ b/server/lib/taskParser.test.js
@@ -265,6 +265,23 @@ describe('Task Parser', () => {
expect(markdown).toContain('- App: my-app');
});
+ it('should escape newlines in metadata values for round-trip preservation', () => {
+ const multiLineContext = '## Additional Instructions\nFix the bug\n\n## Previous Context\nAgent ID: agent-123';
+ const tasks = [
+ { id: 'task-001', status: 'pending', priority: 'MEDIUM', priorityValue: 2, description: 'Resume task', metadata: { context: multiLineContext } }
+ ];
+
+ const markdown = generateTasksMarkdown(tasks);
+
+ // Should contain escaped newlines
+ expect(markdown).toContain('\\n');
+ expect(markdown).not.toContain('\n## Additional');
+
+ // Round-trip test: parse it back and verify context is preserved
+ const parsed = parseTasksMarkdown(markdown);
+ expect(parsed[0].metadata.context).toBe(multiLineContext);
+ });
+
it('should sort tasks by priority within sections', () => {
const tasks = [
{ id: 'task-001', status: 'pending', priority: 'LOW', priorityValue: 1, description: 'Low', metadata: {} },
diff --git a/server/package-lock.json b/server/package-lock.json
index 49af8b6..2eaf726 100644
--- a/server/package-lock.json
+++ b/server/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "portos-server",
- "version": "0.7.6",
+ "version": "0.9.3",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "portos-server",
- "version": "0.7.6",
+ "version": "0.9.3",
"dependencies": {
"cors": "^2.8.5",
"express": "^4.21.2",
@@ -17,23 +17,9 @@
"zod": "^3.24.1"
},
"devDependencies": {
- "@vitest/coverage-v8": "^2.1.8",
+ "@vitest/coverage-v8": "^4.0.16",
"supertest": "^7.1.4",
- "vitest": "^2.1.8"
- }
- },
- "node_modules/@ampproject/remapping": {
- "version": "2.3.0",
- "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz",
- "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==",
- "dev": true,
- "license": "Apache-2.0",
- "dependencies": {
- "@jridgewell/gen-mapping": "^0.3.5",
- "@jridgewell/trace-mapping": "^0.3.24"
- },
- "engines": {
- "node": ">=6.0.0"
+ "vitest": "^4.0.16"
}
},
"node_modules/@babel/helper-string-parser": {
@@ -87,16 +73,19 @@
}
},
"node_modules/@bcoe/v8-coverage": {
- "version": "0.2.3",
- "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz",
- "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==",
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-1.0.2.tgz",
+ "integrity": "sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==",
"dev": true,
- "license": "MIT"
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ }
},
"node_modules/@esbuild/aix-ppc64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz",
- "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz",
+ "integrity": "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==",
"cpu": [
"ppc64"
],
@@ -107,13 +96,13 @@
"aix"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/android-arm": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz",
- "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.2.tgz",
+ "integrity": "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==",
"cpu": [
"arm"
],
@@ -124,13 +113,13 @@
"android"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/android-arm64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz",
- "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.2.tgz",
+ "integrity": "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==",
"cpu": [
"arm64"
],
@@ -141,13 +130,13 @@
"android"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/android-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz",
- "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.2.tgz",
+ "integrity": "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==",
"cpu": [
"x64"
],
@@ -158,13 +147,13 @@
"android"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/darwin-arm64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz",
- "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.2.tgz",
+ "integrity": "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==",
"cpu": [
"arm64"
],
@@ -175,13 +164,13 @@
"darwin"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/darwin-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz",
- "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.2.tgz",
+ "integrity": "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==",
"cpu": [
"x64"
],
@@ -192,13 +181,13 @@
"darwin"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/freebsd-arm64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz",
- "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.2.tgz",
+ "integrity": "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==",
"cpu": [
"arm64"
],
@@ -209,13 +198,13 @@
"freebsd"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/freebsd-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz",
- "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.2.tgz",
+ "integrity": "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==",
"cpu": [
"x64"
],
@@ -226,13 +215,13 @@
"freebsd"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/linux-arm": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz",
- "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.2.tgz",
+ "integrity": "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==",
"cpu": [
"arm"
],
@@ -243,13 +232,13 @@
"linux"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/linux-arm64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz",
- "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.2.tgz",
+ "integrity": "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==",
"cpu": [
"arm64"
],
@@ -260,13 +249,13 @@
"linux"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/linux-ia32": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz",
- "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.2.tgz",
+ "integrity": "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==",
"cpu": [
"ia32"
],
@@ -277,13 +266,13 @@
"linux"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/linux-loong64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz",
- "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.2.tgz",
+ "integrity": "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==",
"cpu": [
"loong64"
],
@@ -294,13 +283,13 @@
"linux"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/linux-mips64el": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz",
- "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.2.tgz",
+ "integrity": "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==",
"cpu": [
"mips64el"
],
@@ -311,13 +300,13 @@
"linux"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/linux-ppc64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz",
- "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.2.tgz",
+ "integrity": "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==",
"cpu": [
"ppc64"
],
@@ -328,13 +317,13 @@
"linux"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/linux-riscv64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz",
- "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.2.tgz",
+ "integrity": "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==",
"cpu": [
"riscv64"
],
@@ -345,13 +334,13 @@
"linux"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/linux-s390x": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz",
- "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.2.tgz",
+ "integrity": "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==",
"cpu": [
"s390x"
],
@@ -362,13 +351,13 @@
"linux"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/linux-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz",
- "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.2.tgz",
+ "integrity": "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==",
"cpu": [
"x64"
],
@@ -379,13 +368,30 @@
"linux"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-arm64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.2.tgz",
+ "integrity": "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
}
},
"node_modules/@esbuild/netbsd-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz",
- "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.2.tgz",
+ "integrity": "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==",
"cpu": [
"x64"
],
@@ -396,13 +402,30 @@
"netbsd"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-arm64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.2.tgz",
+ "integrity": "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
}
},
"node_modules/@esbuild/openbsd-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz",
- "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.2.tgz",
+ "integrity": "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==",
"cpu": [
"x64"
],
@@ -413,13 +436,30 @@
"openbsd"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openharmony-arm64": {
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.2.tgz",
+ "integrity": "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ],
+ "engines": {
+ "node": ">=18"
}
},
"node_modules/@esbuild/sunos-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz",
- "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.2.tgz",
+ "integrity": "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==",
"cpu": [
"x64"
],
@@ -430,13 +470,13 @@
"sunos"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/win32-arm64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz",
- "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.2.tgz",
+ "integrity": "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==",
"cpu": [
"arm64"
],
@@ -447,13 +487,13 @@
"win32"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/win32-ia32": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz",
- "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.2.tgz",
+ "integrity": "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==",
"cpu": [
"ia32"
],
@@ -464,13 +504,13 @@
"win32"
],
"engines": {
- "node": ">=12"
+ "node": ">=18"
}
},
"node_modules/@esbuild/win32-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz",
- "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.2.tgz",
+ "integrity": "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==",
"cpu": [
"x64"
],
@@ -481,46 +521,7 @@
"win32"
],
"engines": {
- "node": ">=12"
- }
- },
- "node_modules/@isaacs/cliui": {
- "version": "8.0.2",
- "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
- "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "string-width": "^5.1.2",
- "string-width-cjs": "npm:string-width@^4.2.0",
- "strip-ansi": "^7.0.1",
- "strip-ansi-cjs": "npm:strip-ansi@^6.0.1",
- "wrap-ansi": "^8.1.0",
- "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@istanbuljs/schema": {
- "version": "0.1.3",
- "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz",
- "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/@jridgewell/gen-mapping": {
- "version": "0.3.13",
- "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
- "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@jridgewell/sourcemap-codec": "^1.5.0",
- "@jridgewell/trace-mapping": "^0.3.24"
+ "node": ">=18"
}
},
"node_modules/@jridgewell/resolve-uri": {
@@ -574,17 +575,6 @@
"@noble/hashes": "^1.1.5"
}
},
- "node_modules/@pkgjs/parseargs": {
- "version": "0.11.0",
- "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
- "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
- "dev": true,
- "license": "MIT",
- "optional": true,
- "engines": {
- "node": ">=14"
- }
- },
"node_modules/@pm2/agent": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/@pm2/agent/-/agent-2.0.4.tgz",
@@ -1188,12 +1178,30 @@
"integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==",
"license": "MIT"
},
+ "node_modules/@standard-schema/spec": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz",
+ "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/@tootallnate/quickjs-emscripten": {
"version": "0.23.0",
"resolved": "https://registry.npmjs.org/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz",
"integrity": "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==",
"license": "MIT"
},
+ "node_modules/@types/chai": {
+ "version": "5.2.3",
+ "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz",
+ "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/deep-eql": "*",
+ "assertion-error": "^2.0.1"
+ }
+ },
"node_modules/@types/cors": {
"version": "2.8.19",
"resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.19.tgz",
@@ -1203,6 +1211,13 @@
"@types/node": "*"
}
},
+ "node_modules/@types/deep-eql": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz",
+ "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/@types/estree": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
@@ -1220,31 +1235,30 @@
}
},
"node_modules/@vitest/coverage-v8": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-2.1.8.tgz",
- "integrity": "sha512-2Y7BPlKH18mAZYAW1tYByudlCYrQyl5RGvnnDYJKW5tCiO5qg3KSAy3XAxcxKz900a0ZXxWtKrMuZLe3lKBpJw==",
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-4.0.16.tgz",
+ "integrity": "sha512-2rNdjEIsPRzsdu6/9Eq0AYAzYdpP6Bx9cje9tL3FE5XzXRQF1fNU9pe/1yE8fCrS0HD+fBtt6gLPh6LI57tX7A==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@ampproject/remapping": "^2.3.0",
- "@bcoe/v8-coverage": "^0.2.3",
- "debug": "^4.3.7",
+ "@bcoe/v8-coverage": "^1.0.2",
+ "@vitest/utils": "4.0.16",
+ "ast-v8-to-istanbul": "^0.3.8",
"istanbul-lib-coverage": "^3.2.2",
"istanbul-lib-report": "^3.0.1",
"istanbul-lib-source-maps": "^5.0.6",
- "istanbul-reports": "^3.1.7",
- "magic-string": "^0.30.12",
- "magicast": "^0.3.5",
- "std-env": "^3.8.0",
- "test-exclude": "^7.0.1",
- "tinyrainbow": "^1.2.0"
+ "istanbul-reports": "^3.2.0",
+ "magicast": "^0.5.1",
+ "obug": "^2.1.1",
+ "std-env": "^3.10.0",
+ "tinyrainbow": "^3.0.3"
},
"funding": {
"url": "https://opencollective.com/vitest"
},
"peerDependencies": {
- "@vitest/browser": "2.1.8",
- "vitest": "2.1.8"
+ "@vitest/browser": "4.0.16",
+ "vitest": "4.0.16"
},
"peerDependenciesMeta": {
"@vitest/browser": {
@@ -1252,64 +1266,41 @@
}
}
},
- "node_modules/@vitest/coverage-v8/node_modules/debug": {
- "version": "4.4.3",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
- "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ms": "^2.1.3"
- },
- "engines": {
- "node": ">=6.0"
- },
- "peerDependenciesMeta": {
- "supports-color": {
- "optional": true
- }
- }
- },
- "node_modules/@vitest/coverage-v8/node_modules/ms": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
- "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/@vitest/expect": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.8.tgz",
- "integrity": "sha512-8ytZ/fFHq2g4PJVAtDX57mayemKgDR6X3Oa2Foro+EygiOJHUXhCqBAAKQYYajZpFoIfvBCF1j6R6IYRSIUFuw==",
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.16.tgz",
+ "integrity": "sha512-eshqULT2It7McaJkQGLkPjPjNph+uevROGuIMJdG3V+0BSR2w9u6J9Lwu+E8cK5TETlfou8GRijhafIMhXsimA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/spy": "2.1.8",
- "@vitest/utils": "2.1.8",
- "chai": "^5.1.2",
- "tinyrainbow": "^1.2.0"
+ "@standard-schema/spec": "^1.0.0",
+ "@types/chai": "^5.2.2",
+ "@vitest/spy": "4.0.16",
+ "@vitest/utils": "4.0.16",
+ "chai": "^6.2.1",
+ "tinyrainbow": "^3.0.3"
},
"funding": {
"url": "https://opencollective.com/vitest"
}
},
"node_modules/@vitest/mocker": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.8.tgz",
- "integrity": "sha512-7guJ/47I6uqfttp33mgo6ga5Gr1VnL58rcqYKyShoRK9ebu8T5Rs6HN3s1NABiBeVTdWNrwUMcHH54uXZBN4zA==",
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.16.tgz",
+ "integrity": "sha512-yb6k4AZxJTB+q9ycAvsoxGn+j/po0UaPgajllBgt1PzoMAAmJGYFdDk0uCcRcxb3BrME34I6u8gHZTQlkqSZpg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/spy": "2.1.8",
+ "@vitest/spy": "4.0.16",
"estree-walker": "^3.0.3",
- "magic-string": "^0.30.12"
+ "magic-string": "^0.30.21"
},
"funding": {
"url": "https://opencollective.com/vitest"
},
"peerDependencies": {
"msw": "^2.4.9",
- "vite": "^5.0.0"
+ "vite": "^6.0.0 || ^7.0.0-0"
},
"peerDependenciesMeta": {
"msw": {
@@ -1321,96 +1312,66 @@
}
},
"node_modules/@vitest/pretty-format": {
- "version": "2.1.9",
- "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.9.tgz",
- "integrity": "sha512-KhRIdGV2U9HOUzxfiHmY8IFHTdqtOhIzCpd8WRdJiE7D/HUcZVD0EgQCVjm+Q9gkUXWgBvMmTtZgIG48wq7sOQ==",
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.16.tgz",
+ "integrity": "sha512-eNCYNsSty9xJKi/UdVD8Ou16alu7AYiS2fCPRs0b1OdhJiV89buAXQLpTbe+X8V9L6qrs9CqyvU7OaAopJYPsA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "tinyrainbow": "^1.2.0"
+ "tinyrainbow": "^3.0.3"
},
"funding": {
"url": "https://opencollective.com/vitest"
}
},
"node_modules/@vitest/runner": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.1.8.tgz",
- "integrity": "sha512-17ub8vQstRnRlIU5k50bG+QOMLHRhYPAna5tw8tYbj+jzjcspnwnwtPtiOlkuKC4+ixDPTuLZiqiWWQ2PSXHVg==",
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.16.tgz",
+ "integrity": "sha512-VWEDm5Wv9xEo80ctjORcTQRJ539EGPB3Pb9ApvVRAY1U/WkHXmmYISqU5E79uCwcW7xYUV38gwZD+RV755fu3Q==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/utils": "2.1.8",
- "pathe": "^1.1.2"
+ "@vitest/utils": "4.0.16",
+ "pathe": "^2.0.3"
},
"funding": {
"url": "https://opencollective.com/vitest"
}
},
"node_modules/@vitest/snapshot": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.1.8.tgz",
- "integrity": "sha512-20T7xRFbmnkfcmgVEz+z3AU/3b0cEzZOt/zmnvZEctg64/QZbSDJEVm9fLnnlSi74KibmRsO9/Qabi+t0vCRPg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@vitest/pretty-format": "2.1.8",
- "magic-string": "^0.30.12",
- "pathe": "^1.1.2"
- },
- "funding": {
- "url": "https://opencollective.com/vitest"
- }
- },
- "node_modules/@vitest/snapshot/node_modules/@vitest/pretty-format": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.8.tgz",
- "integrity": "sha512-9HiSZ9zpqNLKlbIDRWOnAWqgcA7xu+8YxXSekhr0Ykab7PAYFkhkwoqVArPOtJhPmYeE2YHgKZlj3CP36z2AJQ==",
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.16.tgz",
+ "integrity": "sha512-sf6NcrYhYBsSYefxnry+DR8n3UV4xWZwWxYbCJUt2YdvtqzSPR7VfGrY0zsv090DAbjFZsi7ZaMi1KnSRyK1XA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "tinyrainbow": "^1.2.0"
+ "@vitest/pretty-format": "4.0.16",
+ "magic-string": "^0.30.21",
+ "pathe": "^2.0.3"
},
"funding": {
"url": "https://opencollective.com/vitest"
}
},
"node_modules/@vitest/spy": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.1.8.tgz",
- "integrity": "sha512-5swjf2q95gXeYPevtW0BLk6H8+bPlMb4Vw/9Em4hFxDcaOxS+e0LOX4yqNxoHzMR2akEB2xfpnWUzkZokmgWDg==",
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.16.tgz",
+ "integrity": "sha512-4jIOWjKP0ZUaEmJm00E0cOBLU+5WE0BpeNr3XN6TEF05ltro6NJqHWxXD0kA8/Zc8Nh23AT8WQxwNG+WeROupw==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "tinyspy": "^3.0.2"
- },
"funding": {
"url": "https://opencollective.com/vitest"
}
},
"node_modules/@vitest/utils": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.8.tgz",
- "integrity": "sha512-dwSoui6djdwbfFmIgbIjX2ZhIoG7Ex/+xpxyiEgIGzjliY8xGkcpITKTlp6B4MgtGkF2ilvm97cPM96XZaAgcA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@vitest/pretty-format": "2.1.8",
- "loupe": "^3.1.2",
- "tinyrainbow": "^1.2.0"
- },
- "funding": {
- "url": "https://opencollective.com/vitest"
- }
- },
- "node_modules/@vitest/utils/node_modules/@vitest/pretty-format": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.8.tgz",
- "integrity": "sha512-9HiSZ9zpqNLKlbIDRWOnAWqgcA7xu+8YxXSekhr0Ykab7PAYFkhkwoqVArPOtJhPmYeE2YHgKZlj3CP36z2AJQ==",
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.16.tgz",
+ "integrity": "sha512-h8z9yYhV3e1LEfaQ3zdypIrnAg/9hguReGZoS7Gl0aBG5xgA410zBqECqmaF/+RkTggRsfnzc1XaAHA6bmUufA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "tinyrainbow": "^1.2.0"
+ "@vitest/pretty-format": "4.0.16",
+ "tinyrainbow": "^3.0.3"
},
"funding": {
"url": "https://opencollective.com/vitest"
@@ -1462,19 +1423,6 @@
"node": ">=6"
}
},
- "node_modules/ansi-regex": {
- "version": "6.2.2",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
- "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/ansi-regex?sponsor=1"
- }
- },
"node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
@@ -1550,6 +1498,18 @@
"integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
"license": "0BSD"
},
+ "node_modules/ast-v8-to-istanbul": {
+ "version": "0.3.10",
+ "resolved": "https://registry.npmjs.org/ast-v8-to-istanbul/-/ast-v8-to-istanbul-0.3.10.tgz",
+ "integrity": "sha512-p4K7vMz2ZSk3wN8l5o3y2bJAoZXT3VuJI5OLTATY/01CYWumWvwkUw0SqDBnNq6IiTO3qDa1eSQDibAV8g7XOQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/trace-mapping": "^0.3.31",
+ "estree-walker": "^3.0.3",
+ "js-tokens": "^9.0.1"
+ }
+ },
"node_modules/async": {
"version": "3.2.6",
"resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz",
@@ -1563,13 +1523,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/balanced-match": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
- "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/base64id": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz",
@@ -1642,16 +1595,6 @@
"npm": "1.2.8000 || >= 1.4.16"
}
},
- "node_modules/brace-expansion": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
- "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "balanced-match": "^1.0.0"
- }
- },
"node_modules/braces": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
@@ -1679,16 +1622,6 @@
"node": ">= 0.8"
}
},
- "node_modules/cac": {
- "version": "6.7.14",
- "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz",
- "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/call-bind-apply-helpers": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
@@ -1719,18 +1652,11 @@
}
},
"node_modules/chai": {
- "version": "5.3.3",
- "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz",
- "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==",
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz",
+ "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "assertion-error": "^2.0.1",
- "check-error": "^2.1.1",
- "deep-eql": "^5.0.1",
- "loupe": "^3.1.0",
- "pathval": "^2.0.0"
- },
"engines": {
"node": ">=18"
}
@@ -1754,16 +1680,6 @@
"integrity": "sha512-syedaZ9cPe7r3hoQA9twWYKu5AIyCswN5+szkmPBe9ccdLrj4bYaCnLVPTLd2kgVRc7+zoX4tyPgRnFKCj5YjQ==",
"license": "MIT/X11"
},
- "node_modules/check-error": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.3.tgz",
- "integrity": "sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 16"
- }
- },
"node_modules/chokidar": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz",
@@ -1908,21 +1824,6 @@
"integrity": "sha512-/f6gpQuxDaqXu+1kwQYSckUglPaOrHdbIlBAu0YuW8/Cdb45XwXYNUBXg3r/9Mo6n540Kn/smKcZWko5x99KrQ==",
"license": "MIT"
},
- "node_modules/cross-spawn": {
- "version": "7.0.6",
- "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
- "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "path-key": "^3.1.0",
- "shebang-command": "^2.0.0",
- "which": "^2.0.1"
- },
- "engines": {
- "node": ">= 8"
- }
- },
"node_modules/culvert": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/culvert/-/culvert-0.1.2.tgz",
@@ -1953,16 +1854,6 @@
"ms": "2.0.0"
}
},
- "node_modules/deep-eql": {
- "version": "5.0.2",
- "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz",
- "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=6"
- }
- },
"node_modules/degenerator": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/degenerator/-/degenerator-5.0.1.tgz",
@@ -2031,26 +1922,12 @@
"node": ">= 0.4"
}
},
- "node_modules/eastasianwidth": {
- "version": "0.2.0",
- "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
- "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/ee-first": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
"integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
"license": "MIT"
},
- "node_modules/emoji-regex": {
- "version": "9.2.2",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
- "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/encodeurl": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
@@ -2256,9 +2133,9 @@
}
},
"node_modules/esbuild": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz",
- "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==",
+ "version": "0.27.2",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.2.tgz",
+ "integrity": "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==",
"dev": true,
"hasInstallScript": true,
"license": "MIT",
@@ -2266,32 +2143,35 @@
"esbuild": "bin/esbuild"
},
"engines": {
- "node": ">=12"
+ "node": ">=18"
},
"optionalDependencies": {
- "@esbuild/aix-ppc64": "0.21.5",
- "@esbuild/android-arm": "0.21.5",
- "@esbuild/android-arm64": "0.21.5",
- "@esbuild/android-x64": "0.21.5",
- "@esbuild/darwin-arm64": "0.21.5",
- "@esbuild/darwin-x64": "0.21.5",
- "@esbuild/freebsd-arm64": "0.21.5",
- "@esbuild/freebsd-x64": "0.21.5",
- "@esbuild/linux-arm": "0.21.5",
- "@esbuild/linux-arm64": "0.21.5",
- "@esbuild/linux-ia32": "0.21.5",
- "@esbuild/linux-loong64": "0.21.5",
- "@esbuild/linux-mips64el": "0.21.5",
- "@esbuild/linux-ppc64": "0.21.5",
- "@esbuild/linux-riscv64": "0.21.5",
- "@esbuild/linux-s390x": "0.21.5",
- "@esbuild/linux-x64": "0.21.5",
- "@esbuild/netbsd-x64": "0.21.5",
- "@esbuild/openbsd-x64": "0.21.5",
- "@esbuild/sunos-x64": "0.21.5",
- "@esbuild/win32-arm64": "0.21.5",
- "@esbuild/win32-ia32": "0.21.5",
- "@esbuild/win32-x64": "0.21.5"
+ "@esbuild/aix-ppc64": "0.27.2",
+ "@esbuild/android-arm": "0.27.2",
+ "@esbuild/android-arm64": "0.27.2",
+ "@esbuild/android-x64": "0.27.2",
+ "@esbuild/darwin-arm64": "0.27.2",
+ "@esbuild/darwin-x64": "0.27.2",
+ "@esbuild/freebsd-arm64": "0.27.2",
+ "@esbuild/freebsd-x64": "0.27.2",
+ "@esbuild/linux-arm": "0.27.2",
+ "@esbuild/linux-arm64": "0.27.2",
+ "@esbuild/linux-ia32": "0.27.2",
+ "@esbuild/linux-loong64": "0.27.2",
+ "@esbuild/linux-mips64el": "0.27.2",
+ "@esbuild/linux-ppc64": "0.27.2",
+ "@esbuild/linux-riscv64": "0.27.2",
+ "@esbuild/linux-s390x": "0.27.2",
+ "@esbuild/linux-x64": "0.27.2",
+ "@esbuild/netbsd-arm64": "0.27.2",
+ "@esbuild/netbsd-x64": "0.27.2",
+ "@esbuild/openbsd-arm64": "0.27.2",
+ "@esbuild/openbsd-x64": "0.27.2",
+ "@esbuild/openharmony-arm64": "0.27.2",
+ "@esbuild/sunos-x64": "0.27.2",
+ "@esbuild/win32-arm64": "0.27.2",
+ "@esbuild/win32-ia32": "0.27.2",
+ "@esbuild/win32-x64": "0.27.2"
}
},
"node_modules/escape-html": {
@@ -2523,48 +2403,18 @@
}
}
},
- "node_modules/foreground-child": {
- "version": "3.3.1",
- "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz",
- "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==",
+ "node_modules/form-data": {
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
+ "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
"dev": true,
- "license": "ISC",
+ "license": "MIT",
"dependencies": {
- "cross-spawn": "^7.0.6",
- "signal-exit": "^4.0.1"
- },
- "engines": {
- "node": ">=14"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/foreground-child/node_modules/signal-exit": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
- "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
- "dev": true,
- "license": "ISC",
- "engines": {
- "node": ">=14"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/form-data": {
- "version": "4.0.5",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
- "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "asynckit": "^0.4.0",
- "combined-stream": "^1.0.8",
- "es-set-tostringtag": "^2.1.0",
- "hasown": "^2.0.2",
- "mime-types": "^2.1.12"
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
+ "mime-types": "^2.1.12"
},
"engines": {
"node": ">= 6"
@@ -2715,27 +2565,6 @@
"integrity": "sha512-2e/nZezdVlyCopOCYHeW0onkbZg7xP1Ad6pndPy1rCygeRykefUS6r7oA5cJRGEFvseiaz5a/qUHFVX1dd6Isg==",
"license": "MIT"
},
- "node_modules/glob": {
- "version": "10.5.0",
- "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz",
- "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "foreground-child": "^3.1.0",
- "jackspeak": "^3.1.2",
- "minimatch": "^9.0.4",
- "minipass": "^7.1.2",
- "package-json-from-dist": "^1.0.0",
- "path-scurry": "^1.11.1"
- },
- "bin": {
- "glob": "dist/esm/bin.mjs"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
@@ -2986,16 +2815,6 @@
"node": ">=0.10.0"
}
},
- "node_modules/is-fullwidth-code-point": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
- "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/is-glob": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
@@ -3017,13 +2836,6 @@
"node": ">=0.12.0"
}
},
- "node_modules/isexe": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
- "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
- "dev": true,
- "license": "ISC"
- },
"node_modules/istanbul-lib-coverage": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz",
@@ -3103,22 +2915,6 @@
"node": ">=8"
}
},
- "node_modules/jackspeak": {
- "version": "3.4.3",
- "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz",
- "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "dependencies": {
- "@isaacs/cliui": "^8.0.2"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- },
- "optionalDependencies": {
- "@pkgjs/parseargs": "^0.11.0"
- }
- },
"node_modules/js-git": {
"version": "0.7.8",
"resolved": "https://registry.npmjs.org/js-git/-/js-git-0.7.8.tgz",
@@ -3131,6 +2927,13 @@
"pako": "^0.2.5"
}
},
+ "node_modules/js-tokens": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz",
+ "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/js-yaml": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
@@ -3165,13 +2968,6 @@
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
"license": "MIT"
},
- "node_modules/loupe": {
- "version": "3.2.1",
- "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz",
- "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/lru-cache": {
"version": "7.18.3",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
@@ -3192,15 +2988,15 @@
}
},
"node_modules/magicast": {
- "version": "0.3.5",
- "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.3.5.tgz",
- "integrity": "sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==",
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.5.1.tgz",
+ "integrity": "sha512-xrHS24IxaLrvuo613F719wvOIv9xPHFWQHuvGUBmPnCA/3MQxKI3b+r7n1jAoDHmsbC5bRhTZYR77invLAxVnw==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@babel/parser": "^7.25.4",
- "@babel/types": "^7.25.4",
- "source-map-js": "^1.2.0"
+ "@babel/parser": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "source-map-js": "^1.2.1"
}
},
"node_modules/make-dir": {
@@ -3288,32 +3084,6 @@
"node": ">= 0.6"
}
},
- "node_modules/minimatch": {
- "version": "9.0.5",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
- "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "brace-expansion": "^2.0.1"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/minipass": {
- "version": "7.1.2",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
- "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
- "dev": true,
- "license": "ISC",
- "engines": {
- "node": ">=16 || 14 >=14.17"
- }
- },
"node_modules/mkdirp": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz",
@@ -3462,6 +3232,17 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/obug": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz",
+ "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==",
+ "dev": true,
+ "funding": [
+ "https://github.com/sponsors/sxzz",
+ "https://opencollective.com/debug"
+ ],
+ "license": "MIT"
+ },
"node_modules/on-finished": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
@@ -3539,13 +3320,6 @@
"node": ">= 14"
}
},
- "node_modules/package-json-from-dist": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz",
- "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==",
- "dev": true,
- "license": "BlueOak-1.0.0"
- },
"node_modules/pako": {
"version": "0.2.9",
"resolved": "https://registry.npmjs.org/pako/-/pako-0.2.9.tgz",
@@ -3561,46 +3335,12 @@
"node": ">= 0.8"
}
},
- "node_modules/path-key": {
- "version": "3.1.1",
- "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
- "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/path-parse": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
"license": "MIT"
},
- "node_modules/path-scurry": {
- "version": "1.11.1",
- "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz",
- "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "dependencies": {
- "lru-cache": "^10.2.0",
- "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
- },
- "engines": {
- "node": ">=16 || 14 >=14.18"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/path-scurry/node_modules/lru-cache": {
- "version": "10.4.3",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz",
- "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==",
- "dev": true,
- "license": "ISC"
- },
"node_modules/path-to-regexp": {
"version": "0.1.12",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
@@ -3608,22 +3348,12 @@
"license": "MIT"
},
"node_modules/pathe": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz",
- "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==",
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz",
+ "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==",
"dev": true,
"license": "MIT"
},
- "node_modules/pathval": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz",
- "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 14.16"
- }
- },
"node_modules/picocolors": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
@@ -4253,29 +3983,6 @@
"integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
"license": "ISC"
},
- "node_modules/shebang-command": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
- "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "shebang-regex": "^3.0.0"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/shebang-regex": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
- "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/shimmer": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/shimmer/-/shimmer-1.2.1.tgz",
@@ -4655,114 +4362,10 @@
"dev": true,
"license": "MIT"
},
- "node_modules/string-width": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
- "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "eastasianwidth": "^0.2.0",
- "emoji-regex": "^9.2.2",
- "strip-ansi": "^7.0.1"
- },
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
- "node_modules/string-width-cjs": {
- "name": "string-width",
- "version": "4.2.3",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
- "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "emoji-regex": "^8.0.0",
- "is-fullwidth-code-point": "^3.0.0",
- "strip-ansi": "^6.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/string-width-cjs/node_modules/ansi-regex": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
- "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/string-width-cjs/node_modules/emoji-regex": {
- "version": "8.0.0",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
- "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/string-width-cjs/node_modules/strip-ansi": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-regex": "^5.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/strip-ansi": {
- "version": "7.1.2",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
- "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-regex": "^6.0.1"
- },
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/strip-ansi?sponsor=1"
- }
- },
- "node_modules/strip-ansi-cjs": {
- "name": "strip-ansi",
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-regex": "^5.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/strip-ansi-cjs/node_modules/ansi-regex": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
- "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/superagent": {
- "version": "10.2.3",
- "resolved": "https://registry.npmjs.org/superagent/-/superagent-10.2.3.tgz",
- "integrity": "sha512-y/hkYGeXAj7wUMjxRbB21g/l6aAEituGXM9Rwl4o20+SX3e8YOSV6BxFXl+dL3Uk0mjSL3kCbNkwURm8/gEDig==",
+ "version": "10.3.0",
+ "resolved": "https://registry.npmjs.org/superagent/-/superagent-10.3.0.tgz",
+ "integrity": "sha512-B+4Ik7ROgVKrQsXTV0Jwp2u+PXYLSlqtDAhYnkkD+zn3yg8s/zjA2MeGayPoY/KICrbitwneDHrjSotxKL+0XQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -4770,11 +4373,11 @@
"cookiejar": "^2.1.4",
"debug": "^4.3.7",
"fast-safe-stringify": "^2.1.1",
- "form-data": "^4.0.4",
+ "form-data": "^4.0.5",
"formidable": "^3.5.4",
"methods": "^1.1.2",
"mime": "2.6.0",
- "qs": "^6.11.2"
+ "qs": "^6.14.1"
},
"engines": {
"node": ">=14.18.0"
@@ -4819,19 +4422,30 @@
"license": "MIT"
},
"node_modules/supertest": {
- "version": "7.1.4",
- "resolved": "https://registry.npmjs.org/supertest/-/supertest-7.1.4.tgz",
- "integrity": "sha512-tjLPs7dVyqgItVFirHYqe2T+MfWc2VOBQ8QFKKbWTA3PU7liZR8zoSpAi/C1k1ilm9RsXIKYf197oap9wXGVYg==",
+ "version": "7.2.2",
+ "resolved": "https://registry.npmjs.org/supertest/-/supertest-7.2.2.tgz",
+ "integrity": "sha512-oK8WG9diS3DlhdUkcFn4tkNIiIbBx9lI2ClF8K+b2/m8Eyv47LSawxUzZQSNKUrVb2KsqeTDCcjAAVPYaSLVTA==",
"dev": true,
"license": "MIT",
"dependencies": {
+ "cookie-signature": "^1.2.2",
"methods": "^1.1.2",
- "superagent": "^10.2.3"
+ "superagent": "^10.3.0"
},
"engines": {
"node": ">=14.18.0"
}
},
+ "node_modules/supertest/node_modules/cookie-signature": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz",
+ "integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.6.0"
+ }
+ },
"node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
@@ -4883,21 +4497,6 @@
"url": "https://www.buymeacoffee.com/systeminfo"
}
},
- "node_modules/test-exclude": {
- "version": "7.0.1",
- "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-7.0.1.tgz",
- "integrity": "sha512-pFYqmTw68LXVjeWJMST4+borgQP2AyMNbg1BpZh9LbyhUeNkeaPF9gzfPGUAnSMV3qPYdWUwDIjjCLiSDOl7vg==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "@istanbuljs/schema": "^0.1.2",
- "glob": "^10.4.1",
- "minimatch": "^9.0.4"
- },
- "engines": {
- "node": ">=18"
- }
- },
"node_modules/tinybench": {
"version": "2.9.0",
"resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz",
@@ -4906,36 +4505,68 @@
"license": "MIT"
},
"node_modules/tinyexec": {
- "version": "0.3.2",
- "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz",
- "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==",
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz",
+ "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==",
"dev": true,
- "license": "MIT"
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ }
},
- "node_modules/tinypool": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz",
- "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==",
+ "node_modules/tinyglobby": {
+ "version": "0.2.15",
+ "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz",
+ "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==",
"dev": true,
"license": "MIT",
+ "dependencies": {
+ "fdir": "^6.5.0",
+ "picomatch": "^4.0.3"
+ },
"engines": {
- "node": "^18.0.0 || >=20.0.0"
+ "node": ">=12.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/SuperchupuDev"
}
},
- "node_modules/tinyrainbow": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-1.2.0.tgz",
- "integrity": "sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==",
+ "node_modules/tinyglobby/node_modules/fdir": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
+ "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
"dev": true,
"license": "MIT",
"engines": {
- "node": ">=14.0.0"
+ "node": ">=12.0.0"
+ },
+ "peerDependencies": {
+ "picomatch": "^3 || ^4"
+ },
+ "peerDependenciesMeta": {
+ "picomatch": {
+ "optional": true
+ }
}
},
- "node_modules/tinyspy": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz",
- "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==",
+ "node_modules/tinyglobby/node_modules/picomatch": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
+ "dev": true,
+ "license": "MIT",
+ "peer": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/tinyrainbow": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz",
+ "integrity": "sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==",
"dev": true,
"license": "MIT",
"engines": {
@@ -5057,21 +4688,25 @@
}
},
"node_modules/vite": {
- "version": "5.4.21",
- "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz",
- "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==",
+ "version": "7.3.1",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz",
+ "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==",
"dev": true,
"license": "MIT",
+ "peer": true,
"dependencies": {
- "esbuild": "^0.21.3",
- "postcss": "^8.4.43",
- "rollup": "^4.20.0"
+ "esbuild": "^0.27.0",
+ "fdir": "^6.5.0",
+ "picomatch": "^4.0.3",
+ "postcss": "^8.5.6",
+ "rollup": "^4.43.0",
+ "tinyglobby": "^0.2.15"
},
"bin": {
"vite": "bin/vite.js"
},
"engines": {
- "node": "^18.0.0 || >=20.0.0"
+ "node": "^20.19.0 || >=22.12.0"
},
"funding": {
"url": "https://github.com/vitejs/vite?sponsor=1"
@@ -5080,19 +4715,25 @@
"fsevents": "~2.3.3"
},
"peerDependencies": {
- "@types/node": "^18.0.0 || >=20.0.0",
- "less": "*",
+ "@types/node": "^20.19.0 || >=22.12.0",
+ "jiti": ">=1.21.0",
+ "less": "^4.0.0",
"lightningcss": "^1.21.0",
- "sass": "*",
- "sass-embedded": "*",
- "stylus": "*",
- "sugarss": "*",
- "terser": "^5.4.0"
+ "sass": "^1.70.0",
+ "sass-embedded": "^1.70.0",
+ "stylus": ">=0.54.8",
+ "sugarss": "^5.0.0",
+ "terser": "^5.16.0",
+ "tsx": "^4.8.1",
+ "yaml": "^2.4.2"
},
"peerDependenciesMeta": {
"@types/node": {
"optional": true
},
+ "jiti": {
+ "optional": true
+ },
"less": {
"optional": true
},
@@ -5113,99 +4754,93 @@
},
"terser": {
"optional": true
+ },
+ "tsx": {
+ "optional": true
+ },
+ "yaml": {
+ "optional": true
}
}
},
- "node_modules/vite-node": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.8.tgz",
- "integrity": "sha512-uPAwSr57kYjAUux+8E2j0q0Fxpn8M9VoyfGiRI8Kfktz9NcYMCenwY5RnZxnF1WTu3TGiYipirIzacLL3VVGFg==",
+ "node_modules/vite/node_modules/fdir": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
+ "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "cac": "^6.7.14",
- "debug": "^4.3.7",
- "es-module-lexer": "^1.5.4",
- "pathe": "^1.1.2",
- "vite": "^5.0.0"
- },
- "bin": {
- "vite-node": "vite-node.mjs"
- },
"engines": {
- "node": "^18.0.0 || >=20.0.0"
- },
- "funding": {
- "url": "https://opencollective.com/vitest"
- }
- },
- "node_modules/vite-node/node_modules/debug": {
- "version": "4.4.3",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
- "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ms": "^2.1.3"
+ "node": ">=12.0.0"
},
- "engines": {
- "node": ">=6.0"
+ "peerDependencies": {
+ "picomatch": "^3 || ^4"
},
"peerDependenciesMeta": {
- "supports-color": {
+ "picomatch": {
"optional": true
}
}
},
- "node_modules/vite-node/node_modules/ms": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
- "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "node_modules/vite/node_modules/picomatch": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
- "license": "MIT"
+ "license": "MIT",
+ "peer": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
},
"node_modules/vitest": {
- "version": "2.1.8",
- "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.8.tgz",
- "integrity": "sha512-1vBKTZskHw/aosXqQUlVWWlGUxSJR8YtiyZDJAFeW2kPAeX6S3Sool0mjspO+kXLuxVWlEDDowBAeqeAQefqLQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@vitest/expect": "2.1.8",
- "@vitest/mocker": "2.1.8",
- "@vitest/pretty-format": "^2.1.8",
- "@vitest/runner": "2.1.8",
- "@vitest/snapshot": "2.1.8",
- "@vitest/spy": "2.1.8",
- "@vitest/utils": "2.1.8",
- "chai": "^5.1.2",
- "debug": "^4.3.7",
- "expect-type": "^1.1.0",
- "magic-string": "^0.30.12",
- "pathe": "^1.1.2",
- "std-env": "^3.8.0",
+ "version": "4.0.16",
+ "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.16.tgz",
+ "integrity": "sha512-E4t7DJ9pESL6E3I8nFjPa4xGUd3PmiWDLsDztS2qXSJWfHtbQnwAWylaBvSNY48I3vr8PTqIZlyK8TE3V3CA4Q==",
+ "dev": true,
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "@vitest/expect": "4.0.16",
+ "@vitest/mocker": "4.0.16",
+ "@vitest/pretty-format": "4.0.16",
+ "@vitest/runner": "4.0.16",
+ "@vitest/snapshot": "4.0.16",
+ "@vitest/spy": "4.0.16",
+ "@vitest/utils": "4.0.16",
+ "es-module-lexer": "^1.7.0",
+ "expect-type": "^1.2.2",
+ "magic-string": "^0.30.21",
+ "obug": "^2.1.1",
+ "pathe": "^2.0.3",
+ "picomatch": "^4.0.3",
+ "std-env": "^3.10.0",
"tinybench": "^2.9.0",
- "tinyexec": "^0.3.1",
- "tinypool": "^1.0.1",
- "tinyrainbow": "^1.2.0",
- "vite": "^5.0.0",
- "vite-node": "2.1.8",
+ "tinyexec": "^1.0.2",
+ "tinyglobby": "^0.2.15",
+ "tinyrainbow": "^3.0.3",
+ "vite": "^6.0.0 || ^7.0.0",
"why-is-node-running": "^2.3.0"
},
"bin": {
"vitest": "vitest.mjs"
},
"engines": {
- "node": "^18.0.0 || >=20.0.0"
+ "node": "^20.0.0 || ^22.0.0 || >=24.0.0"
},
"funding": {
"url": "https://opencollective.com/vitest"
},
"peerDependencies": {
"@edge-runtime/vm": "*",
- "@types/node": "^18.0.0 || >=20.0.0",
- "@vitest/browser": "2.1.8",
- "@vitest/ui": "2.1.8",
+ "@opentelemetry/api": "^1.9.0",
+ "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0",
+ "@vitest/browser-playwright": "4.0.16",
+ "@vitest/browser-preview": "4.0.16",
+ "@vitest/browser-webdriverio": "4.0.16",
+ "@vitest/ui": "4.0.16",
"happy-dom": "*",
"jsdom": "*"
},
@@ -5213,10 +4848,19 @@
"@edge-runtime/vm": {
"optional": true
},
+ "@opentelemetry/api": {
+ "optional": true
+ },
"@types/node": {
"optional": true
},
- "@vitest/browser": {
+ "@vitest/browser-playwright": {
+ "optional": true
+ },
+ "@vitest/browser-preview": {
+ "optional": true
+ },
+ "@vitest/browser-webdriverio": {
"optional": true
},
"@vitest/ui": {
@@ -5230,31 +4874,19 @@
}
}
},
- "node_modules/vitest/node_modules/debug": {
- "version": "4.4.3",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
- "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "node_modules/vitest/node_modules/picomatch": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "ms": "^2.1.3"
- },
"engines": {
- "node": ">=6.0"
+ "node": ">=12"
},
- "peerDependenciesMeta": {
- "supports-color": {
- "optional": true
- }
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
}
},
- "node_modules/vitest/node_modules/ms": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
- "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/vizion": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/vizion/-/vizion-2.2.1.tgz",
@@ -5279,22 +4911,6 @@
"lodash": "^4.17.14"
}
},
- "node_modules/which": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
- "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "isexe": "^2.0.0"
- },
- "bin": {
- "node-which": "bin/node-which"
- },
- "engines": {
- "node": ">= 8"
- }
- },
"node_modules/why-is-node-running": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz",
@@ -5312,101 +4928,6 @@
"node": ">=8"
}
},
- "node_modules/wrap-ansi": {
- "version": "8.1.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
- "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-styles": "^6.1.0",
- "string-width": "^5.0.1",
- "strip-ansi": "^7.0.1"
- },
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
- }
- },
- "node_modules/wrap-ansi-cjs": {
- "name": "wrap-ansi",
- "version": "7.0.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
- "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-styles": "^4.0.0",
- "string-width": "^4.1.0",
- "strip-ansi": "^6.0.0"
- },
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
- }
- },
- "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
- "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": {
- "version": "8.0.0",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
- "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/wrap-ansi-cjs/node_modules/string-width": {
- "version": "4.2.3",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
- "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "emoji-regex": "^8.0.0",
- "is-fullwidth-code-point": "^3.0.0",
- "strip-ansi": "^6.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-regex": "^5.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/wrap-ansi/node_modules/ansi-styles": {
- "version": "6.2.3",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
- "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/ansi-styles?sponsor=1"
- }
- },
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
diff --git a/server/package.json b/server/package.json
index ae0e56d..ca7069b 100644
--- a/server/package.json
+++ b/server/package.json
@@ -1,6 +1,6 @@
{
"name": "portos-server",
- "version": "0.8.10",
+ "version": "0.9.19",
"private": true,
"type": "module",
"scripts": {
@@ -12,6 +12,7 @@
},
"dependencies": {
"cors": "^2.8.5",
+ "portos-ai-toolkit": "github:atomantic/portos-ai-toolkit#v0.2.0",
"express": "^4.21.2",
"pm2": "^5.4.3",
"socket.io": "^4.8.3",
@@ -20,8 +21,8 @@
"zod": "^3.24.1"
},
"devDependencies": {
- "@vitest/coverage-v8": "^2.1.8",
+ "@vitest/coverage-v8": "^4.0.16",
"supertest": "^7.1.4",
- "vitest": "^2.1.8"
+ "vitest": "^4.0.16"
}
}
diff --git a/server/routes/apps.js b/server/routes/apps.js
index 0ea4902..900ea1c 100644
--- a/server/routes/apps.js
+++ b/server/routes/apps.js
@@ -129,17 +129,32 @@ router.post('/:id/start', asyncHandler(async (req, res, next) => {
throw new ServerError('App not found', { status: 404, code: 'NOT_FOUND' });
}
- const results = {};
- const commands = app.startCommands || ['npm run dev'];
const processNames = app.pm2ProcessNames || [app.name.toLowerCase().replace(/\s+/g, '-')];
- for (let i = 0; i < processNames.length; i++) {
- const name = processNames[i];
- const command = commands[i] || commands[0];
+ // Check if ecosystem config exists - prefer using it for proper env var handling
+ const hasEcosystem = ['ecosystem.config.cjs', 'ecosystem.config.js']
+ .some(f => existsSync(`${app.repoPath}/${f}`));
+
+ let results = {};
- const result = await pm2Service.startWithCommand(name, app.repoPath, command)
+ if (hasEcosystem) {
+ // Use ecosystem config for proper env/port configuration
+ const result = await pm2Service.startFromEcosystem(app.repoPath, processNames)
.catch(err => ({ success: false, error: err.message }));
- results[name] = result;
+ // Map result to each process name for consistent response format
+ for (const name of processNames) {
+ results[name] = result;
+ }
+ } else {
+ // Fallback to command-based start for apps without ecosystem config
+ const commands = app.startCommands || ['npm run dev'];
+ for (let i = 0; i < processNames.length; i++) {
+ const name = processNames[i];
+ const command = commands[i] || commands[0];
+ const result = await pm2Service.startWithCommand(name, app.repoPath, command)
+ .catch(err => ({ success: false, error: err.message }));
+ results[name] = result;
+ }
}
const allSuccess = Object.values(results).every(r => r.success !== false);
diff --git a/server/routes/brain.js b/server/routes/brain.js
new file mode 100644
index 0000000..36d6799
--- /dev/null
+++ b/server/routes/brain.js
@@ -0,0 +1,524 @@
+/**
+ * Brain API Routes
+ *
+ * Handles all HTTP endpoints for the Brain feature:
+ * - Capture and classify thoughts
+ * - CRUD for People, Projects, Ideas, Admin
+ * - Daily digest and weekly review
+ * - Settings management
+ */
+
+import { Router } from 'express';
+import * as brainService from '../services/brain.js';
+import { getProviderById } from '../services/providers.js';
+import { asyncHandler, ServerError } from '../lib/errorHandler.js';
+import { validate } from '../lib/validation.js';
+import {
+ captureInputSchema,
+ resolveReviewInputSchema,
+ fixInputSchema,
+ updateInboxInputSchema,
+ inboxQuerySchema,
+ peopleInputSchema,
+ projectInputSchema,
+ ideaInputSchema,
+ adminInputSchema,
+ settingsUpdateInputSchema
+} from '../lib/brainValidation.js';
+
+const router = Router();
+
+// =============================================================================
+// CAPTURE & INBOX
+// =============================================================================
+
+/**
+ * POST /api/brain/capture
+ * Capture a thought, classify it, and store it
+ */
+router.post('/capture', asyncHandler(async (req, res) => {
+ const validation = validate(captureInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { text, providerOverride, modelOverride } = validation.data;
+ const result = await brainService.captureThought(text, providerOverride, modelOverride);
+ res.json(result);
+}));
+
+/**
+ * GET /api/brain/inbox
+ * Get inbox log entries with optional filters
+ */
+router.get('/inbox', asyncHandler(async (req, res) => {
+ const validation = validate(inboxQuerySchema, req.query);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const entries = await brainService.getInboxLog(validation.data);
+ const counts = await brainService.getInboxLogCounts();
+ res.json({ entries, counts });
+}));
+
+/**
+ * GET /api/brain/inbox/:id
+ * Get a single inbox log entry
+ */
+router.get('/inbox/:id', asyncHandler(async (req, res) => {
+ const entry = await brainService.getInboxLogById(req.params.id);
+ if (!entry) {
+ throw new ServerError('Inbox entry not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(entry);
+}));
+
+/**
+ * POST /api/brain/review/resolve
+ * Resolve a needs_review inbox item
+ */
+router.post('/review/resolve', asyncHandler(async (req, res) => {
+ const validation = validate(resolveReviewInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { inboxLogId, destination, editedExtracted } = validation.data;
+ const result = await brainService.resolveReview(inboxLogId, destination, editedExtracted);
+ res.json(result);
+}));
+
+/**
+ * POST /api/brain/fix
+ * Fix/correct a filed inbox item
+ */
+router.post('/fix', asyncHandler(async (req, res) => {
+ const validation = validate(fixInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { inboxLogId, newDestination, updatedFields, note } = validation.data;
+ const result = await brainService.fixClassification(inboxLogId, newDestination, updatedFields, note);
+ res.json(result);
+}));
+
+/**
+ * POST /api/brain/inbox/:id/retry
+ * Retry AI classification for a needs_review item
+ */
+router.post('/inbox/:id/retry', asyncHandler(async (req, res) => {
+ const { providerOverride, modelOverride } = req.body;
+ const result = await brainService.retryClassification(req.params.id, providerOverride, modelOverride);
+ res.json(result);
+}));
+
+/**
+ * PUT /api/brain/inbox/:id
+ * Update an inbox entry (edit captured text)
+ */
+router.put('/inbox/:id', asyncHandler(async (req, res) => {
+ const validation = validate(updateInboxInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const result = await brainService.updateInboxEntry(req.params.id, validation.data);
+ if (!result) {
+ throw new ServerError('Inbox entry not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(result);
+}));
+
+/**
+ * DELETE /api/brain/inbox/:id
+ * Delete an inbox entry
+ */
+router.delete('/inbox/:id', asyncHandler(async (req, res) => {
+ const deleted = await brainService.deleteInboxEntry(req.params.id);
+ if (!deleted) {
+ throw new ServerError('Inbox entry not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.status(204).send();
+}));
+
+// =============================================================================
+// PEOPLE CRUD
+// =============================================================================
+
+router.get('/people', asyncHandler(async (req, res) => {
+ const people = await brainService.getPeople();
+ res.json(people);
+}));
+
+router.get('/people/:id', asyncHandler(async (req, res) => {
+ const person = await brainService.getPersonById(req.params.id);
+ if (!person) {
+ throw new ServerError('Person not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(person);
+}));
+
+router.post('/people', asyncHandler(async (req, res) => {
+ const validation = validate(peopleInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+ const person = await brainService.createPerson(validation.data);
+ res.status(201).json(person);
+}));
+
+router.put('/people/:id', asyncHandler(async (req, res) => {
+ const validation = validate(peopleInputSchema.partial(), req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+ const person = await brainService.updatePerson(req.params.id, validation.data);
+ if (!person) {
+ throw new ServerError('Person not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(person);
+}));
+
+router.delete('/people/:id', asyncHandler(async (req, res) => {
+ const deleted = await brainService.deletePerson(req.params.id);
+ if (!deleted) {
+ throw new ServerError('Person not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.status(204).send();
+}));
+
+// =============================================================================
+// PROJECTS CRUD
+// =============================================================================
+
+router.get('/projects', asyncHandler(async (req, res) => {
+ const { status } = req.query;
+ const filters = status ? { status } : undefined;
+ const projects = await brainService.getProjects(filters);
+ res.json(projects);
+}));
+
+router.get('/projects/:id', asyncHandler(async (req, res) => {
+ const project = await brainService.getProjectById(req.params.id);
+ if (!project) {
+ throw new ServerError('Project not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(project);
+}));
+
+router.post('/projects', asyncHandler(async (req, res) => {
+ const validation = validate(projectInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+ const project = await brainService.createProject(validation.data);
+ res.status(201).json(project);
+}));
+
+router.put('/projects/:id', asyncHandler(async (req, res) => {
+ const validation = validate(projectInputSchema.partial(), req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+ const project = await brainService.updateProject(req.params.id, validation.data);
+ if (!project) {
+ throw new ServerError('Project not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(project);
+}));
+
+router.delete('/projects/:id', asyncHandler(async (req, res) => {
+ const deleted = await brainService.deleteProject(req.params.id);
+ if (!deleted) {
+ throw new ServerError('Project not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.status(204).send();
+}));
+
+// =============================================================================
+// IDEAS CRUD
+// =============================================================================
+
+router.get('/ideas', asyncHandler(async (req, res) => {
+ const ideas = await brainService.getIdeas();
+ res.json(ideas);
+}));
+
+router.get('/ideas/:id', asyncHandler(async (req, res) => {
+ const idea = await brainService.getIdeaById(req.params.id);
+ if (!idea) {
+ throw new ServerError('Idea not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(idea);
+}));
+
+router.post('/ideas', asyncHandler(async (req, res) => {
+ const validation = validate(ideaInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+ const idea = await brainService.createIdea(validation.data);
+ res.status(201).json(idea);
+}));
+
+router.put('/ideas/:id', asyncHandler(async (req, res) => {
+ const validation = validate(ideaInputSchema.partial(), req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+ const idea = await brainService.updateIdea(req.params.id, validation.data);
+ if (!idea) {
+ throw new ServerError('Idea not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(idea);
+}));
+
+router.delete('/ideas/:id', asyncHandler(async (req, res) => {
+ const deleted = await brainService.deleteIdea(req.params.id);
+ if (!deleted) {
+ throw new ServerError('Idea not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.status(204).send();
+}));
+
+// =============================================================================
+// ADMIN CRUD
+// =============================================================================
+
+router.get('/admin', asyncHandler(async (req, res) => {
+ const { status } = req.query;
+ const filters = status ? { status } : undefined;
+ const adminItems = await brainService.getAdminItems(filters);
+ res.json(adminItems);
+}));
+
+router.get('/admin/:id', asyncHandler(async (req, res) => {
+ const item = await brainService.getAdminById(req.params.id);
+ if (!item) {
+ throw new ServerError('Admin item not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(item);
+}));
+
+router.post('/admin', asyncHandler(async (req, res) => {
+ const validation = validate(adminInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+ const item = await brainService.createAdminItem(validation.data);
+ res.status(201).json(item);
+}));
+
+router.put('/admin/:id', asyncHandler(async (req, res) => {
+ const validation = validate(adminInputSchema.partial(), req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+ const item = await brainService.updateAdminItem(req.params.id, validation.data);
+ if (!item) {
+ throw new ServerError('Admin item not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(item);
+}));
+
+router.delete('/admin/:id', asyncHandler(async (req, res) => {
+ const deleted = await brainService.deleteAdminItem(req.params.id);
+ if (!deleted) {
+ throw new ServerError('Admin item not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.status(204).send();
+}));
+
+// =============================================================================
+// DIGEST & REVIEW
+// =============================================================================
+
+/**
+ * GET /api/brain/digest/latest
+ * Get the most recent daily digest
+ */
+router.get('/digest/latest', asyncHandler(async (req, res) => {
+ const digest = await brainService.getLatestDigest();
+ res.json(digest);
+}));
+
+/**
+ * GET /api/brain/digests
+ * Get digest history
+ */
+router.get('/digests', asyncHandler(async (req, res) => {
+ const limit = parseInt(req.query.limit) || 10;
+ const digests = await brainService.getDigests(limit);
+ res.json(digests);
+}));
+
+/**
+ * POST /api/brain/digest/run
+ * Manually trigger daily digest generation
+ */
+router.post('/digest/run', asyncHandler(async (req, res) => {
+ const { providerOverride, modelOverride } = req.body;
+ const digest = await brainService.runDailyDigest(providerOverride, modelOverride);
+ res.json(digest);
+}));
+
+/**
+ * GET /api/brain/review/latest
+ * Get the most recent weekly review
+ */
+router.get('/review/latest', asyncHandler(async (req, res) => {
+ const review = await brainService.getLatestReview();
+ res.json(review);
+}));
+
+/**
+ * GET /api/brain/reviews
+ * Get review history
+ */
+router.get('/reviews', asyncHandler(async (req, res) => {
+ const limit = parseInt(req.query.limit) || 10;
+ const reviews = await brainService.getReviews(limit);
+ res.json(reviews);
+}));
+
+/**
+ * POST /api/brain/review/run
+ * Manually trigger weekly review generation
+ */
+router.post('/review/run', asyncHandler(async (req, res) => {
+ const { providerOverride, modelOverride } = req.body;
+ const review = await brainService.runWeeklyReview(providerOverride, modelOverride);
+ res.json(review);
+}));
+
+// =============================================================================
+// SETTINGS & SUMMARY
+// =============================================================================
+
+/**
+ * GET /api/brain/settings
+ * Get brain settings
+ */
+router.get('/settings', asyncHandler(async (req, res) => {
+ const settings = await brainService.loadMeta();
+ res.json(settings);
+}));
+
+/**
+ * PUT /api/brain/settings
+ * Update brain settings
+ */
+router.put('/settings', asyncHandler(async (req, res) => {
+ const validation = validate(settingsUpdateInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ // Validate provider and model if provided
+ if (validation.data.defaultProvider || validation.data.defaultModel) {
+ const providerId = validation.data.defaultProvider;
+ const modelId = validation.data.defaultModel;
+
+ // Get current settings to use existing provider if only model is being updated
+ const currentSettings = await brainService.loadMeta();
+ const effectiveProviderId = providerId || currentSettings.defaultProvider;
+
+ // Validate provider exists
+ const provider = await getProviderById(effectiveProviderId);
+ if (!provider) {
+ throw new ServerError(`Provider "${effectiveProviderId}" not found`, {
+ status: 400,
+ code: 'INVALID_PROVIDER'
+ });
+ }
+
+ // Validate model exists in provider's models
+ if (modelId) {
+ if (!provider.models || provider.models.length === 0) {
+ throw new ServerError(`Provider "${effectiveProviderId}" has no models configured`, {
+ status: 400,
+ code: 'NO_MODELS'
+ });
+ }
+ if (!provider.models.includes(modelId)) {
+ throw new ServerError(`Model "${modelId}" not found in provider "${effectiveProviderId}"`, {
+ status: 400,
+ code: 'INVALID_MODEL',
+ context: { availableModels: provider.models }
+ });
+ }
+ }
+ }
+
+ const settings = await brainService.updateMeta(validation.data);
+ res.json(settings);
+}));
+
+/**
+ * GET /api/brain/summary
+ * Get brain data summary for dashboard
+ */
+router.get('/summary', asyncHandler(async (req, res) => {
+ const summary = await brainService.getSummary();
+ res.json(summary);
+}));
+
+export default router;
diff --git a/server/routes/brain.test.js b/server/routes/brain.test.js
new file mode 100644
index 0000000..c8dfd51
--- /dev/null
+++ b/server/routes/brain.test.js
@@ -0,0 +1,750 @@
+import { describe, it, expect, vi, beforeEach } from 'vitest';
+import express from 'express';
+import request from 'supertest';
+import brainRoutes from './brain.js';
+
+// Mock the brain service
+vi.mock('../services/brain.js', () => ({
+ // Capture & Inbox
+ captureThought: vi.fn(),
+ getInboxLog: vi.fn(),
+ getInboxLogById: vi.fn(),
+ getInboxLogCounts: vi.fn(),
+ resolveReview: vi.fn(),
+ fixClassification: vi.fn(),
+ retryClassification: vi.fn(),
+ // People
+ getPeople: vi.fn(),
+ getPersonById: vi.fn(),
+ createPerson: vi.fn(),
+ updatePerson: vi.fn(),
+ deletePerson: vi.fn(),
+ // Projects
+ getProjects: vi.fn(),
+ getProjectById: vi.fn(),
+ createProject: vi.fn(),
+ updateProject: vi.fn(),
+ deleteProject: vi.fn(),
+ // Ideas
+ getIdeas: vi.fn(),
+ getIdeaById: vi.fn(),
+ createIdea: vi.fn(),
+ updateIdea: vi.fn(),
+ deleteIdea: vi.fn(),
+ // Admin
+ getAdminItems: vi.fn(),
+ getAdminById: vi.fn(),
+ createAdminItem: vi.fn(),
+ updateAdminItem: vi.fn(),
+ deleteAdminItem: vi.fn(),
+ // Digest & Review
+ getLatestDigest: vi.fn(),
+ getDigests: vi.fn(),
+ runDailyDigest: vi.fn(),
+ getLatestReview: vi.fn(),
+ getReviews: vi.fn(),
+ runWeeklyReview: vi.fn(),
+ // Settings & Summary
+ loadMeta: vi.fn(),
+ updateMeta: vi.fn(),
+ getSummary: vi.fn()
+}));
+
+// Import mocked module
+import * as brainService from '../services/brain.js';
+
+describe('Brain Routes', () => {
+ let app;
+
+ beforeEach(() => {
+ app = express();
+ app.use(express.json());
+ app.use('/api/brain', brainRoutes);
+ vi.clearAllMocks();
+ });
+
+ // ===========================================================================
+ // CAPTURE & INBOX
+ // ===========================================================================
+
+ describe('POST /api/brain/capture', () => {
+ it('should capture a thought and return result', async () => {
+ const mockResult = {
+ inboxLog: {
+ id: 'inbox-001',
+ capturedText: 'Test thought',
+ status: 'filed',
+ classification: {
+ destination: 'ideas',
+ confidence: 0.9,
+ title: 'Test Idea'
+ }
+ },
+ filedRecord: { id: 'idea-001', title: 'Test Idea' },
+ message: 'Filed to ideas: Test Idea'
+ };
+ brainService.captureThought.mockResolvedValue(mockResult);
+
+ const response = await request(app)
+ .post('/api/brain/capture')
+ .send({ text: 'Test thought' });
+
+ expect(response.status).toBe(200);
+ expect(response.body.inboxLog.id).toBe('inbox-001');
+ expect(brainService.captureThought).toHaveBeenCalledWith('Test thought', undefined, undefined);
+ });
+
+ it('should return 400 if text is missing', async () => {
+ const response = await request(app)
+ .post('/api/brain/capture')
+ .send({});
+
+ expect(response.status).toBe(400);
+ });
+
+ it('should pass provider and model overrides', async () => {
+ brainService.captureThought.mockResolvedValue({ inboxLog: { id: 'inbox-002' } });
+
+ await request(app)
+ .post('/api/brain/capture')
+ .send({ text: 'Test', providerOverride: 'openai', modelOverride: 'gpt-4' });
+
+ expect(brainService.captureThought).toHaveBeenCalledWith('Test', 'openai', 'gpt-4');
+ });
+ });
+
+ describe('GET /api/brain/inbox', () => {
+ it('should return inbox entries with counts', async () => {
+ const mockEntries = [
+ { id: 'inbox-001', status: 'filed' },
+ { id: 'inbox-002', status: 'needs_review' }
+ ];
+ const mockCounts = { total: 2, filed: 1, needs_review: 1 };
+ brainService.getInboxLog.mockResolvedValue(mockEntries);
+ brainService.getInboxLogCounts.mockResolvedValue(mockCounts);
+
+ const response = await request(app).get('/api/brain/inbox');
+
+ expect(response.status).toBe(200);
+ expect(response.body.entries).toHaveLength(2);
+ expect(response.body.counts.total).toBe(2);
+ });
+
+ it('should pass filters to service', async () => {
+ brainService.getInboxLog.mockResolvedValue([]);
+ brainService.getInboxLogCounts.mockResolvedValue({});
+
+ await request(app).get('/api/brain/inbox?status=needs_review&limit=50');
+
+ expect(brainService.getInboxLog).toHaveBeenCalledWith(
+ expect.objectContaining({ status: 'needs_review', limit: 50 })
+ );
+ });
+ });
+
+ describe('GET /api/brain/inbox/:id', () => {
+ it('should return inbox entry by ID', async () => {
+ brainService.getInboxLogById.mockResolvedValue({ id: 'inbox-001', capturedText: 'Test' });
+
+ const response = await request(app).get('/api/brain/inbox/inbox-001');
+
+ expect(response.status).toBe(200);
+ expect(response.body.id).toBe('inbox-001');
+ });
+
+ it('should return 404 if not found', async () => {
+ brainService.getInboxLogById.mockResolvedValue(null);
+
+ const response = await request(app).get('/api/brain/inbox/inbox-999');
+
+ expect(response.status).toBe(404);
+ });
+ });
+
+ describe('POST /api/brain/review/resolve', () => {
+ it('should resolve a needs_review item', async () => {
+ const testUuid = '550e8400-e29b-41d4-a716-446655440000';
+ brainService.resolveReview.mockResolvedValue({
+ inboxLog: { id: testUuid, status: 'filed' },
+ filedRecord: { id: 'project-001' }
+ });
+
+ const response = await request(app)
+ .post('/api/brain/review/resolve')
+ .send({
+ inboxLogId: testUuid,
+ destination: 'projects',
+ editedExtracted: { name: 'Test Project' }
+ });
+
+ expect(response.status).toBe(200);
+ expect(brainService.resolveReview).toHaveBeenCalledWith(
+ testUuid,
+ 'projects',
+ { name: 'Test Project' }
+ );
+ });
+
+ it('should return 400 if required fields are missing', async () => {
+ const response = await request(app)
+ .post('/api/brain/review/resolve')
+ .send({ inboxLogId: '550e8400-e29b-41d4-a716-446655440000' });
+
+ expect(response.status).toBe(400);
+ });
+ });
+
+ describe('POST /api/brain/fix', () => {
+ it('should fix a filed classification', async () => {
+ const testUuid = '550e8400-e29b-41d4-a716-446655440001';
+ brainService.fixClassification.mockResolvedValue({
+ inboxLog: { id: testUuid, status: 'corrected' },
+ newRecord: { id: 'people-001' }
+ });
+
+ const response = await request(app)
+ .post('/api/brain/fix')
+ .send({
+ inboxLogId: testUuid,
+ newDestination: 'people',
+ updatedFields: { name: 'John Doe' },
+ note: 'Wrong category'
+ });
+
+ expect(response.status).toBe(200);
+ expect(brainService.fixClassification).toHaveBeenCalledWith(
+ testUuid,
+ 'people',
+ { name: 'John Doe' },
+ 'Wrong category'
+ );
+ });
+
+ it('should return 400 if required fields are missing', async () => {
+ const response = await request(app)
+ .post('/api/brain/fix')
+ .send({ inboxLogId: '550e8400-e29b-41d4-a716-446655440001' });
+
+ expect(response.status).toBe(400);
+ });
+ });
+
+ describe('POST /api/brain/inbox/:id/retry', () => {
+ it('should retry classification', async () => {
+ brainService.retryClassification.mockResolvedValue({
+ inboxLog: { id: 'inbox-001', status: 'filed' }
+ });
+
+ const response = await request(app)
+ .post('/api/brain/inbox/inbox-001/retry')
+ .send({});
+
+ expect(response.status).toBe(200);
+ expect(brainService.retryClassification).toHaveBeenCalledWith('inbox-001', undefined, undefined);
+ });
+ });
+
+ // ===========================================================================
+ // PEOPLE CRUD
+ // ===========================================================================
+
+ describe('GET /api/brain/people', () => {
+ it('should return all people', async () => {
+ brainService.getPeople.mockResolvedValue([
+ { id: 'people-001', name: 'John' },
+ { id: 'people-002', name: 'Jane' }
+ ]);
+
+ const response = await request(app).get('/api/brain/people');
+
+ expect(response.status).toBe(200);
+ expect(response.body).toHaveLength(2);
+ });
+ });
+
+ describe('GET /api/brain/people/:id', () => {
+ it('should return person by ID', async () => {
+ brainService.getPersonById.mockResolvedValue({ id: 'people-001', name: 'John' });
+
+ const response = await request(app).get('/api/brain/people/people-001');
+
+ expect(response.status).toBe(200);
+ expect(response.body.name).toBe('John');
+ });
+
+ it('should return 404 if not found', async () => {
+ brainService.getPersonById.mockResolvedValue(null);
+
+ const response = await request(app).get('/api/brain/people/people-999');
+
+ expect(response.status).toBe(404);
+ });
+ });
+
+ describe('POST /api/brain/people', () => {
+ it('should create a person', async () => {
+ brainService.createPerson.mockResolvedValue({
+ id: 'people-001',
+ name: 'John Doe',
+ context: 'Work colleague'
+ });
+
+ const response = await request(app)
+ .post('/api/brain/people')
+ .send({ name: 'John Doe', context: 'Work colleague' });
+
+ expect(response.status).toBe(201);
+ expect(response.body.id).toBe('people-001');
+ });
+
+ it('should return 400 if name is missing', async () => {
+ const response = await request(app)
+ .post('/api/brain/people')
+ .send({ context: 'Test' });
+
+ expect(response.status).toBe(400);
+ });
+ });
+
+ describe('PUT /api/brain/people/:id', () => {
+ it('should update a person', async () => {
+ brainService.updatePerson.mockResolvedValue({ id: 'people-001', name: 'John Updated' });
+
+ const response = await request(app)
+ .put('/api/brain/people/people-001')
+ .send({ name: 'John Updated' });
+
+ expect(response.status).toBe(200);
+ expect(response.body.name).toBe('John Updated');
+ });
+
+ it('should return 404 if not found', async () => {
+ brainService.updatePerson.mockResolvedValue(null);
+
+ const response = await request(app)
+ .put('/api/brain/people/people-999')
+ .send({ name: 'Test' });
+
+ expect(response.status).toBe(404);
+ });
+ });
+
+ describe('DELETE /api/brain/people/:id', () => {
+ it('should delete a person', async () => {
+ brainService.deletePerson.mockResolvedValue(true);
+
+ const response = await request(app).delete('/api/brain/people/people-001');
+
+ expect(response.status).toBe(204);
+ });
+
+ it('should return 404 if not found', async () => {
+ brainService.deletePerson.mockResolvedValue(false);
+
+ const response = await request(app).delete('/api/brain/people/people-999');
+
+ expect(response.status).toBe(404);
+ });
+ });
+
+ // ===========================================================================
+ // PROJECTS CRUD
+ // ===========================================================================
+
+ describe('GET /api/brain/projects', () => {
+ it('should return all projects', async () => {
+ brainService.getProjects.mockResolvedValue([
+ { id: 'proj-001', name: 'Project A' }
+ ]);
+
+ const response = await request(app).get('/api/brain/projects');
+
+ expect(response.status).toBe(200);
+ expect(response.body).toHaveLength(1);
+ });
+
+ it('should filter by status', async () => {
+ brainService.getProjects.mockResolvedValue([]);
+
+ await request(app).get('/api/brain/projects?status=active');
+
+ expect(brainService.getProjects).toHaveBeenCalledWith({ status: 'active' });
+ });
+ });
+
+ describe('POST /api/brain/projects', () => {
+ it('should create a project', async () => {
+ brainService.createProject.mockResolvedValue({
+ id: 'proj-001',
+ name: 'New Project',
+ status: 'active'
+ });
+
+ const response = await request(app)
+ .post('/api/brain/projects')
+ .send({ name: 'New Project', status: 'active', nextAction: 'Define scope' });
+
+ expect(response.status).toBe(201);
+ expect(response.body.id).toBe('proj-001');
+ });
+
+ it('should return 400 if name is missing', async () => {
+ const response = await request(app)
+ .post('/api/brain/projects')
+ .send({ status: 'active' });
+
+ expect(response.status).toBe(400);
+ });
+ });
+
+ // ===========================================================================
+ // IDEAS CRUD
+ // ===========================================================================
+
+ describe('GET /api/brain/ideas', () => {
+ it('should return all ideas', async () => {
+ brainService.getIdeas.mockResolvedValue([
+ { id: 'idea-001', title: 'Great Idea' }
+ ]);
+
+ const response = await request(app).get('/api/brain/ideas');
+
+ expect(response.status).toBe(200);
+ expect(response.body).toHaveLength(1);
+ });
+ });
+
+ describe('POST /api/brain/ideas', () => {
+ it('should create an idea', async () => {
+ brainService.createIdea.mockResolvedValue({
+ id: 'idea-001',
+ title: 'New Idea',
+ oneLiner: 'A brief description'
+ });
+
+ const response = await request(app)
+ .post('/api/brain/ideas')
+ .send({ title: 'New Idea', oneLiner: 'A brief description' });
+
+ expect(response.status).toBe(201);
+ expect(response.body.id).toBe('idea-001');
+ });
+
+ it('should return 400 if title is missing', async () => {
+ const response = await request(app)
+ .post('/api/brain/ideas')
+ .send({ oneLiner: 'Test' });
+
+ expect(response.status).toBe(400);
+ });
+ });
+
+ // ===========================================================================
+ // ADMIN CRUD
+ // ===========================================================================
+
+ describe('GET /api/brain/admin', () => {
+ it('should return all admin items', async () => {
+ brainService.getAdminItems.mockResolvedValue([
+ { id: 'admin-001', title: 'Renew license' }
+ ]);
+
+ const response = await request(app).get('/api/brain/admin');
+
+ expect(response.status).toBe(200);
+ expect(response.body).toHaveLength(1);
+ });
+
+ it('should filter by status', async () => {
+ brainService.getAdminItems.mockResolvedValue([]);
+
+ await request(app).get('/api/brain/admin?status=open');
+
+ expect(brainService.getAdminItems).toHaveBeenCalledWith({ status: 'open' });
+ });
+ });
+
+ describe('POST /api/brain/admin', () => {
+ it('should create an admin item', async () => {
+ brainService.createAdminItem.mockResolvedValue({
+ id: 'admin-001',
+ title: 'Renew license',
+ status: 'open'
+ });
+
+ const response = await request(app)
+ .post('/api/brain/admin')
+ .send({ title: 'Renew license', status: 'open' });
+
+ expect(response.status).toBe(201);
+ expect(response.body.id).toBe('admin-001');
+ });
+
+ it('should return 400 if title is missing', async () => {
+ const response = await request(app)
+ .post('/api/brain/admin')
+ .send({ status: 'open' });
+
+ expect(response.status).toBe(400);
+ });
+ });
+
+ // ===========================================================================
+ // DIGEST & REVIEW
+ // ===========================================================================
+
+ describe('GET /api/brain/digest/latest', () => {
+ it('should return latest digest', async () => {
+ brainService.getLatestDigest.mockResolvedValue({
+ id: 'digest-001',
+ digestText: 'Today summary...'
+ });
+
+ const response = await request(app).get('/api/brain/digest/latest');
+
+ expect(response.status).toBe(200);
+ expect(response.body.id).toBe('digest-001');
+ });
+ });
+
+ describe('GET /api/brain/digests', () => {
+ it('should return digest history', async () => {
+ brainService.getDigests.mockResolvedValue([
+ { id: 'digest-001' },
+ { id: 'digest-002' }
+ ]);
+
+ const response = await request(app).get('/api/brain/digests');
+
+ expect(response.status).toBe(200);
+ expect(response.body).toHaveLength(2);
+ });
+
+ it('should pass limit parameter', async () => {
+ brainService.getDigests.mockResolvedValue([]);
+
+ await request(app).get('/api/brain/digests?limit=5');
+
+ expect(brainService.getDigests).toHaveBeenCalledWith(5);
+ });
+ });
+
+ describe('POST /api/brain/digest/run', () => {
+ it('should run daily digest manually', async () => {
+ brainService.runDailyDigest.mockResolvedValue({
+ id: 'digest-001',
+ digestText: 'New digest...'
+ });
+
+ const response = await request(app)
+ .post('/api/brain/digest/run')
+ .send({});
+
+ expect(response.status).toBe(200);
+ expect(brainService.runDailyDigest).toHaveBeenCalled();
+ });
+ });
+
+ describe('GET /api/brain/review/latest', () => {
+ it('should return latest weekly review', async () => {
+ brainService.getLatestReview.mockResolvedValue({
+ id: 'review-001',
+ reviewText: 'Weekly summary...'
+ });
+
+ const response = await request(app).get('/api/brain/review/latest');
+
+ expect(response.status).toBe(200);
+ expect(response.body.id).toBe('review-001');
+ });
+ });
+
+ describe('GET /api/brain/reviews', () => {
+ it('should return review history', async () => {
+ brainService.getReviews.mockResolvedValue([{ id: 'review-001' }]);
+
+ const response = await request(app).get('/api/brain/reviews');
+
+ expect(response.status).toBe(200);
+ expect(response.body).toHaveLength(1);
+ });
+ });
+
+ describe('POST /api/brain/review/run', () => {
+ it('should run weekly review manually', async () => {
+ brainService.runWeeklyReview.mockResolvedValue({
+ id: 'review-001',
+ reviewText: 'New review...'
+ });
+
+ const response = await request(app)
+ .post('/api/brain/review/run')
+ .send({});
+
+ expect(response.status).toBe(200);
+ expect(brainService.runWeeklyReview).toHaveBeenCalled();
+ });
+ });
+
+ // ===========================================================================
+ // SETTINGS & SUMMARY
+ // ===========================================================================
+
+ describe('GET /api/brain/settings', () => {
+ it('should return brain settings', async () => {
+ brainService.loadMeta.mockResolvedValue({
+ confidenceThreshold: 0.6,
+ dailyDigestTime: '09:00',
+ defaultProvider: 'lmstudio'
+ });
+
+ const response = await request(app).get('/api/brain/settings');
+
+ expect(response.status).toBe(200);
+ expect(response.body.confidenceThreshold).toBe(0.6);
+ });
+ });
+
+ describe('PUT /api/brain/settings', () => {
+ it('should update brain settings', async () => {
+ brainService.updateMeta.mockResolvedValue({
+ confidenceThreshold: 0.8,
+ dailyDigestTime: '10:00'
+ });
+
+ const response = await request(app)
+ .put('/api/brain/settings')
+ .send({ confidenceThreshold: 0.8, dailyDigestTime: '10:00' });
+
+ expect(response.status).toBe(200);
+ expect(brainService.updateMeta).toHaveBeenCalledWith(
+ expect.objectContaining({ confidenceThreshold: 0.8 })
+ );
+ });
+ });
+
+ describe('GET /api/brain/summary', () => {
+ it('should return brain summary', async () => {
+ brainService.getSummary.mockResolvedValue({
+ peopleCount: 5,
+ projectsCount: 3,
+ ideasCount: 10,
+ adminCount: 2,
+ needsReviewCount: 1
+ });
+
+ const response = await request(app).get('/api/brain/summary');
+
+ expect(response.status).toBe(200);
+ expect(response.body.peopleCount).toBe(5);
+ });
+ });
+
+ // ===========================================================================
+ // CAPTURE FLOW - ALWAYS CREATES INBOX LOG
+ // ===========================================================================
+
+ describe('Capture Flow - Always Creates Inbox Log', () => {
+ it('should create inbox log even when AI classification fails', async () => {
+ // Simulate AI failure that still creates inbox log in needs_review state
+ brainService.captureThought.mockResolvedValue({
+ inboxLog: {
+ id: 'inbox-001',
+ capturedText: 'Test thought',
+ status: 'needs_review',
+ classification: {
+ destination: 'unknown',
+ confidence: 0
+ }
+ },
+ message: 'Thought captured but AI unavailable. Queued for manual review.'
+ });
+
+ const response = await request(app)
+ .post('/api/brain/capture')
+ .send({ text: 'Test thought' });
+
+ expect(response.status).toBe(200);
+ expect(response.body.inboxLog.id).toBeDefined();
+ expect(response.body.inboxLog.status).toBe('needs_review');
+ });
+ });
+
+ // ===========================================================================
+ // CONFIDENCE THRESHOLD GATING
+ // ===========================================================================
+
+ describe('Confidence Threshold Gating', () => {
+ it('should file directly when confidence is above threshold', async () => {
+ brainService.captureThought.mockResolvedValue({
+ inboxLog: {
+ id: 'inbox-001',
+ status: 'filed',
+ classification: { confidence: 0.9, destination: 'ideas' }
+ },
+ filedRecord: { id: 'idea-001' }
+ });
+
+ const response = await request(app)
+ .post('/api/brain/capture')
+ .send({ text: 'High confidence thought' });
+
+ expect(response.status).toBe(200);
+ expect(response.body.inboxLog.status).toBe('filed');
+ });
+
+ it('should send to needs_review when confidence is below threshold', async () => {
+ brainService.captureThought.mockResolvedValue({
+ inboxLog: {
+ id: 'inbox-002',
+ status: 'needs_review',
+ classification: { confidence: 0.4, destination: 'ideas' }
+ },
+ message: 'Thought captured but needs review. Confidence: 40%'
+ });
+
+ const response = await request(app)
+ .post('/api/brain/capture')
+ .send({ text: 'Low confidence thought' });
+
+ expect(response.status).toBe(200);
+ expect(response.body.inboxLog.status).toBe('needs_review');
+ });
+ });
+
+ // ===========================================================================
+ // FIX/MOVE BEHAVIOR
+ // ===========================================================================
+
+ describe('Fix/Move Behavior Updates Records', () => {
+ it('should update inbox log status to corrected after fix', async () => {
+ const testUuid = '550e8400-e29b-41d4-a716-446655440002';
+ brainService.fixClassification.mockResolvedValue({
+ inboxLog: {
+ id: testUuid,
+ status: 'corrected',
+ correction: {
+ previousDestination: 'ideas',
+ newDestination: 'projects',
+ note: 'Actually a project'
+ }
+ },
+ newRecord: { id: 'proj-001' }
+ });
+
+ const response = await request(app)
+ .post('/api/brain/fix')
+ .send({
+ inboxLogId: testUuid,
+ newDestination: 'projects',
+ updatedFields: { name: 'Test Project' },
+ note: 'Actually a project'
+ });
+
+ expect(response.status).toBe(200);
+ expect(response.body.inboxLog.status).toBe('corrected');
+ expect(response.body.inboxLog.correction.previousDestination).toBe('ideas');
+ expect(response.body.inboxLog.correction.newDestination).toBe('projects');
+ });
+ });
+});
diff --git a/server/routes/cos.js b/server/routes/cos.js
index 28f8efd..5a14c2b 100644
--- a/server/routes/cos.js
+++ b/server/routes/cos.js
@@ -9,6 +9,7 @@ import * as appActivity from '../services/appActivity.js';
import * as taskLearning from '../services/taskLearning.js';
import * as weeklyDigest from '../services/weeklyDigest.js';
import * as taskSchedule from '../services/taskSchedule.js';
+import { enhanceTaskPrompt } from '../services/taskEnhancer.js';
import { asyncHandler, ServerError } from '../lib/errorHandler.js';
const router = Router();
@@ -94,15 +95,27 @@ router.post('/tasks/reorder', asyncHandler(async (req, res) => {
res.json(result);
}));
+// POST /api/cos/tasks/enhance - Enhance a task prompt with AI
+router.post('/tasks/enhance', asyncHandler(async (req, res) => {
+ const { description, context } = req.body;
+
+ if (!description) {
+ throw new ServerError('Description is required', { status: 400, code: 'VALIDATION_ERROR' });
+ }
+
+ const result = await enhanceTaskPrompt(description, context);
+ res.json(result);
+}));
+
// POST /api/cos/tasks - Add a new task
router.post('/tasks', asyncHandler(async (req, res) => {
- const { description, priority, context, model, provider, app, type = 'user', approvalRequired, screenshots } = req.body;
+ const { description, priority, context, model, provider, app, type = 'user', approvalRequired, screenshots, position = 'bottom' } = req.body;
if (!description) {
throw new ServerError('Description is required', { status: 400, code: 'VALIDATION_ERROR' });
}
- const taskData = { description, priority, context, model, provider, app, approvalRequired, screenshots };
+ const taskData = { description, priority, context, model, provider, app, approvalRequired, screenshots, position };
const result = await cos.addTask(taskData, type);
res.json(result);
}));
@@ -329,6 +342,19 @@ router.get('/learning/skipped', asyncHandler(async (req, res) => {
});
}));
+// POST /api/cos/learning/reset/:taskType - Reset learning data for a specific task type
+router.post('/learning/reset/:taskType', asyncHandler(async (req, res) => {
+ const { taskType } = req.params;
+ if (!taskType) {
+ throw new ServerError('Task type is required', { status: 400, code: 'VALIDATION_ERROR' });
+ }
+ const result = await taskLearning.resetTaskTypeLearning(taskType);
+ if (!result.reset) {
+ throw new ServerError(`Task type "${taskType}" not found in learning data`, { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(result);
+}));
+
// GET /api/cos/learning/cooldown/:taskType - Get adaptive cooldown for specific task type
router.get('/learning/cooldown/:taskType', asyncHandler(async (req, res) => {
const { taskType } = req.params;
@@ -457,12 +483,15 @@ router.get('/schedule/self-improvement/:taskType', asyncHandler(async (req, res)
// PUT /api/cos/schedule/self-improvement/:taskType - Update interval for self-improvement task
router.put('/schedule/self-improvement/:taskType', asyncHandler(async (req, res) => {
const { taskType } = req.params;
- const { type, enabled, intervalMs } = req.body;
+ const { type, enabled, intervalMs, providerId, model, prompt } = req.body;
const settings = {};
if (type !== undefined) settings.type = type;
if (enabled !== undefined) settings.enabled = enabled;
if (intervalMs !== undefined) settings.intervalMs = intervalMs;
+ if (providerId !== undefined) settings.providerId = providerId;
+ if (model !== undefined) settings.model = model;
+ if (prompt !== undefined) settings.prompt = prompt;
const result = await taskSchedule.updateSelfImprovementInterval(taskType, settings);
res.json({ success: true, taskType, interval: result });
@@ -478,12 +507,15 @@ router.get('/schedule/app-improvement/:taskType', asyncHandler(async (req, res)
// PUT /api/cos/schedule/app-improvement/:taskType - Update interval for app improvement task
router.put('/schedule/app-improvement/:taskType', asyncHandler(async (req, res) => {
const { taskType } = req.params;
- const { type, enabled, intervalMs } = req.body;
+ const { type, enabled, intervalMs, providerId, model, prompt } = req.body;
const settings = {};
if (type !== undefined) settings.type = type;
if (enabled !== undefined) settings.enabled = enabled;
if (intervalMs !== undefined) settings.intervalMs = intervalMs;
+ if (providerId !== undefined) settings.providerId = providerId;
+ if (model !== undefined) settings.model = model;
+ if (prompt !== undefined) settings.prompt = prompt;
const result = await taskSchedule.updateAppImprovementInterval(taskType, settings);
res.json({ success: true, taskType, interval: result });
diff --git a/server/routes/detect.js b/server/routes/detect.js
index 090e8b7..391401b 100644
--- a/server/routes/detect.js
+++ b/server/routes/detect.js
@@ -6,6 +6,7 @@ import { exec } from 'child_process';
import { promisify } from 'util';
import { detectAppWithAi } from '../services/aiDetect.js';
import { asyncHandler, ServerError } from '../lib/errorHandler.js';
+import { safeJSONParse } from '../lib/fileUtils.js';
const execAsync = promisify(exec);
const router = Router();
@@ -53,29 +54,33 @@ router.post('/repo', asyncHandler(async (req, res) => {
result.hasPackageJson = true;
const content = await readFile(packageJsonPath, 'utf-8').catch(() => null);
if (content) {
- const pkg = JSON.parse(content);
- result.packageJson = {
- name: pkg.name,
- scripts: pkg.scripts || {}
- };
-
- // Detect type from dependencies/scripts
- const deps = { ...pkg.dependencies, ...pkg.devDependencies };
- if (deps.vite && deps.express) {
- result.type = 'vite+express';
- } else if (deps.vite || deps.react || deps.vue) {
- result.type = 'vite';
- } else if (deps.express || deps.fastify || deps.koa) {
- result.type = 'single-node-server';
- } else if (deps.next) {
- result.type = 'nextjs';
- }
+ const pkg = safeJSONParse(content, null);
+ if (!pkg) {
+ result.packageJson = null;
+ } else {
+ result.packageJson = {
+ name: pkg.name,
+ scripts: pkg.scripts || {}
+ };
- // Suggest start commands from scripts
- const scripts = pkg.scripts || {};
- if (scripts.dev) result.startCommands.push('npm run dev');
- if (scripts.start) result.startCommands.push('npm start');
- if (scripts.serve) result.startCommands.push('npm run serve');
+ // Detect type from dependencies/scripts
+ const deps = { ...pkg.dependencies, ...pkg.devDependencies };
+ if (deps.vite && deps.express) {
+ result.type = 'vite+express';
+ } else if (deps.vite || deps.react || deps.vue) {
+ result.type = 'vite';
+ } else if (deps.express || deps.fastify || deps.koa) {
+ result.type = 'single-node-server';
+ } else if (deps.next) {
+ result.type = 'nextjs';
+ }
+
+ // Suggest start commands from scripts
+ const scripts = pkg.scripts || {};
+ if (scripts.dev) result.startCommands.push('npm run dev');
+ if (scripts.start) result.startCommands.push('npm start');
+ if (scripts.serve) result.startCommands.push('npm run serve');
+ }
}
}
@@ -162,7 +167,7 @@ router.post('/pm2', asyncHandler(async (req, res) => {
}
const { stdout } = await execAsync('pm2 jlist').catch(() => ({ stdout: '[]' }));
- const processes = JSON.parse(stdout);
+ const processes = safeJSONParse(stdout, []);
const found = processes.find(p => p.name === name);
res.json({
diff --git a/server/routes/digital-twin.js b/server/routes/digital-twin.js
new file mode 100644
index 0000000..6ddac66
--- /dev/null
+++ b/server/routes/digital-twin.js
@@ -0,0 +1,610 @@
+/**
+ * Digital Twin API Routes
+ *
+ * Handles all HTTP endpoints for the Digital Twin feature:
+ * - Document CRUD
+ * - Behavioral testing
+ * - Enrichment questionnaire
+ * - Export
+ * - Settings
+ */
+
+import { Router } from 'express';
+import * as digitalTwinService from '../services/digital-twin.js';
+import { asyncHandler, ServerError } from '../lib/errorHandler.js';
+import { validate } from '../lib/validation.js';
+import {
+ createDocumentInputSchema,
+ updateDocumentInputSchema,
+ runTestsInputSchema,
+ runMultiTestsInputSchema,
+ enrichmentQuestionInputSchema,
+ enrichmentAnswerInputSchema,
+ exportInputSchema,
+ settingsUpdateInputSchema,
+ testHistoryQuerySchema,
+ contradictionInputSchema,
+ generateTestsInputSchema,
+ writingAnalysisInputSchema,
+ analyzeListInputSchema,
+ saveListDocumentInputSchema,
+ getListItemsInputSchema,
+ analyzeTraitsInputSchema,
+ updateTraitsInputSchema,
+ calculateConfidenceInputSchema,
+ importDataInputSchema
+} from '../lib/digitalTwinValidation.js';
+
+const router = Router();
+
+// =============================================================================
+// STATUS & SUMMARY
+// =============================================================================
+
+/**
+ * GET /api/digital-twin
+ * Get digital twin status summary
+ */
+router.get('/', asyncHandler(async (req, res) => {
+ const status = await digitalTwinService.getDigitalTwinStatus();
+ res.json(status);
+}));
+
+// =============================================================================
+// DOCUMENTS
+// =============================================================================
+
+/**
+ * GET /api/digital-twin/documents
+ * List all digital twin documents
+ */
+router.get('/documents', asyncHandler(async (req, res) => {
+ const documents = await digitalTwinService.getDocuments();
+ res.json(documents);
+}));
+
+/**
+ * GET /api/digital-twin/documents/:id
+ * Get a single document with content
+ */
+router.get('/documents/:id', asyncHandler(async (req, res) => {
+ const document = await digitalTwinService.getDocumentById(req.params.id);
+ if (!document) {
+ throw new ServerError('Document not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(document);
+}));
+
+/**
+ * POST /api/digital-twin/documents
+ * Create a new document
+ */
+router.post('/documents', asyncHandler(async (req, res) => {
+ const validation = validate(createDocumentInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const document = await digitalTwinService.createDocument(validation.data);
+ res.status(201).json(document);
+}));
+
+/**
+ * PUT /api/digital-twin/documents/:id
+ * Update a document
+ */
+router.put('/documents/:id', asyncHandler(async (req, res) => {
+ const validation = validate(updateDocumentInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const document = await digitalTwinService.updateDocument(req.params.id, validation.data);
+ if (!document) {
+ throw new ServerError('Document not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json(document);
+}));
+
+/**
+ * DELETE /api/digital-twin/documents/:id
+ * Delete a document
+ */
+router.delete('/documents/:id', asyncHandler(async (req, res) => {
+ const deleted = await digitalTwinService.deleteDocument(req.params.id);
+ if (!deleted) {
+ throw new ServerError('Document not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.status(204).send();
+}));
+
+// =============================================================================
+// TESTING
+// =============================================================================
+
+/**
+ * GET /api/digital-twin/tests
+ * Get the behavioral test suite (parsed from BEHAVIORAL_TEST_SUITE.md)
+ */
+router.get('/tests', asyncHandler(async (req, res) => {
+ const tests = await digitalTwinService.parseTestSuite();
+ res.json(tests);
+}));
+
+/**
+ * POST /api/digital-twin/tests/run
+ * Run behavioral tests against a single provider/model
+ */
+router.post('/tests/run', asyncHandler(async (req, res) => {
+ const validation = validate(runTestsInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { providerId, model, testIds } = validation.data;
+ const result = await digitalTwinService.runTests(providerId, model, testIds);
+ res.json(result);
+}));
+
+/**
+ * POST /api/digital-twin/tests/run-multi
+ * Run behavioral tests against multiple providers/models
+ */
+router.post('/tests/run-multi', asyncHandler(async (req, res) => {
+ const validation = validate(runMultiTestsInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { providers, testIds } = validation.data;
+ const io = req.app.get('io');
+
+ // Run tests for each provider in parallel
+ const results = await Promise.all(
+ providers.map(async ({ providerId, model }) => {
+ const result = await digitalTwinService.runTests(providerId, model, testIds).catch(err => ({
+ providerId,
+ model,
+ error: err.message
+ }));
+
+ // Emit progress via Socket.IO
+ if (io) {
+ io.emit('digital-twin:test-progress', { providerId, model, result });
+ }
+
+ return { providerId, model, ...result };
+ })
+ );
+
+ res.json(results);
+}));
+
+/**
+ * GET /api/digital-twin/tests/history
+ * Get test run history
+ */
+router.get('/tests/history', asyncHandler(async (req, res) => {
+ const validation = validate(testHistoryQuerySchema, req.query);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const history = await digitalTwinService.getTestHistory(validation.data.limit);
+ res.json(history);
+}));
+
+// =============================================================================
+// ENRICHMENT
+// =============================================================================
+
+/**
+ * GET /api/digital-twin/enrich/categories
+ * List all enrichment categories
+ */
+router.get('/enrich/categories', asyncHandler(async (req, res) => {
+ const categories = digitalTwinService.getEnrichmentCategories();
+ res.json(categories);
+}));
+
+/**
+ * GET /api/digital-twin/enrich/progress
+ * Get enrichment progress
+ */
+router.get('/enrich/progress', asyncHandler(async (req, res) => {
+ const progress = await digitalTwinService.getEnrichmentProgress();
+ res.json(progress);
+}));
+
+/**
+ * POST /api/digital-twin/enrich/question
+ * Get next question for a category
+ */
+router.post('/enrich/question', asyncHandler(async (req, res) => {
+ const validation = validate(enrichmentQuestionInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { category, providerOverride, modelOverride } = validation.data;
+ const question = await digitalTwinService.generateEnrichmentQuestion(category, providerOverride, modelOverride);
+ res.json(question);
+}));
+
+/**
+ * POST /api/digital-twin/enrich/answer
+ * Submit answer and update digital twin documents
+ */
+router.post('/enrich/answer', asyncHandler(async (req, res) => {
+ const validation = validate(enrichmentAnswerInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const result = await digitalTwinService.processEnrichmentAnswer(validation.data);
+ res.json(result);
+}));
+
+/**
+ * POST /api/digital-twin/enrich/analyze-list
+ * Analyze a list of items (books, movies, music) and generate document content
+ */
+router.post('/enrich/analyze-list', asyncHandler(async (req, res) => {
+ const validation = validate(analyzeListInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { category, items, providerId, model } = validation.data;
+ const result = await digitalTwinService.analyzeEnrichmentList(category, items, providerId, model);
+ res.json(result);
+}));
+
+/**
+ * POST /api/digital-twin/enrich/save-list
+ * Save analyzed list content to document
+ */
+router.post('/enrich/save-list', asyncHandler(async (req, res) => {
+ const validation = validate(saveListDocumentInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { category, content, items } = validation.data;
+ const result = await digitalTwinService.saveEnrichmentListDocument(category, content, items);
+ res.json(result);
+}));
+
+/**
+ * GET /api/digital-twin/enrich/list-items/:category
+ * Get previously saved list items for a category
+ */
+router.get('/enrich/list-items/:category', asyncHandler(async (req, res) => {
+ const validation = validate(getListItemsInputSchema, { category: req.params.category });
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const items = await digitalTwinService.getEnrichmentListItems(validation.data.category);
+ res.json(items);
+}));
+
+// =============================================================================
+// EXPORT
+// =============================================================================
+
+/**
+ * GET /api/digital-twin/export/formats
+ * List available export formats
+ */
+router.get('/export/formats', asyncHandler(async (req, res) => {
+ const formats = digitalTwinService.getExportFormats();
+ res.json(formats);
+}));
+
+/**
+ * POST /api/digital-twin/export
+ * Export soul in specified format
+ */
+router.post('/export', asyncHandler(async (req, res) => {
+ const validation = validate(exportInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { format, documentIds, includeDisabled } = validation.data;
+ const exported = await digitalTwinService.exportDigitalTwin(format, documentIds, includeDisabled);
+ res.json(exported);
+}));
+
+// =============================================================================
+// SETTINGS
+// =============================================================================
+
+/**
+ * GET /api/digital-twin/settings
+ * Get digital twin settings
+ */
+router.get('/settings', asyncHandler(async (req, res) => {
+ const meta = await digitalTwinService.loadMeta();
+ res.json(meta.settings);
+}));
+
+/**
+ * PUT /api/digital-twin/settings
+ * Update digital twin settings
+ */
+router.put('/settings', asyncHandler(async (req, res) => {
+ const validation = validate(settingsUpdateInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const settings = await digitalTwinService.updateSettings(validation.data);
+ res.json(settings);
+}));
+
+// =============================================================================
+// VALIDATION & ANALYSIS
+// =============================================================================
+
+/**
+ * GET /api/digital-twin/validate/completeness
+ * Check digital twin document completeness
+ */
+router.get('/validate/completeness', asyncHandler(async (req, res) => {
+ const result = await digitalTwinService.validateCompleteness();
+ res.json(result);
+}));
+
+/**
+ * POST /api/digital-twin/validate/contradictions
+ * Detect contradictions in digital twin documents using AI
+ */
+router.post('/validate/contradictions', asyncHandler(async (req, res) => {
+ const validation = validate(contradictionInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { providerId, model } = validation.data;
+ const result = await digitalTwinService.detectContradictions(providerId, model);
+ res.json(result);
+}));
+
+/**
+ * POST /api/digital-twin/tests/generate
+ * Generate behavioral tests from soul content
+ */
+router.post('/tests/generate', asyncHandler(async (req, res) => {
+ const validation = validate(generateTestsInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { providerId, model } = validation.data;
+ const result = await digitalTwinService.generateDynamicTests(providerId, model);
+ res.json(result);
+}));
+
+/**
+ * POST /api/digital-twin/analyze-writing
+ * Analyze writing samples to extract communication patterns
+ */
+router.post('/analyze-writing', asyncHandler(async (req, res) => {
+ const validation = validate(writingAnalysisInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { samples, providerId, model } = validation.data;
+ const result = await digitalTwinService.analyzeWritingSamples(samples, providerId, model);
+ res.json(result);
+}));
+
+// =============================================================================
+// TRAITS & CONFIDENCE (Phase 1 & 2)
+// =============================================================================
+
+/**
+ * GET /api/digital-twin/traits
+ * Get current personality traits
+ */
+router.get('/traits', asyncHandler(async (req, res) => {
+ const traits = await digitalTwinService.getTraits();
+ res.json({ traits });
+}));
+
+/**
+ * POST /api/digital-twin/traits/analyze
+ * Analyze documents to extract personality traits using AI
+ */
+router.post('/traits/analyze', asyncHandler(async (req, res) => {
+ const validation = validate(analyzeTraitsInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { providerId, model, forceReanalyze } = validation.data;
+ const result = await digitalTwinService.analyzeTraits(providerId, model, forceReanalyze);
+ res.json(result);
+}));
+
+/**
+ * PUT /api/digital-twin/traits
+ * Manually update personality traits
+ */
+router.put('/traits', asyncHandler(async (req, res) => {
+ const validation = validate(updateTraitsInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const traits = await digitalTwinService.updateTraits(validation.data);
+ res.json({ traits });
+}));
+
+/**
+ * GET /api/digital-twin/confidence
+ * Get current confidence scores
+ */
+router.get('/confidence', asyncHandler(async (req, res) => {
+ const confidence = await digitalTwinService.getConfidence();
+ res.json({ confidence });
+}));
+
+/**
+ * POST /api/digital-twin/confidence/calculate
+ * Calculate confidence scores (optionally with AI analysis)
+ */
+router.post('/confidence/calculate', asyncHandler(async (req, res) => {
+ const validation = validate(calculateConfidenceInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { providerId, model } = validation.data;
+ const result = await digitalTwinService.calculateConfidence(providerId, model);
+ res.json(result);
+}));
+
+/**
+ * GET /api/digital-twin/gaps
+ * Get gap recommendations for personality enrichment
+ */
+router.get('/gaps', asyncHandler(async (req, res) => {
+ const gaps = await digitalTwinService.getGapRecommendations();
+ res.json({ gaps });
+}));
+
+// =============================================================================
+// EXTERNAL DATA IMPORT (Phase 4)
+// =============================================================================
+
+/**
+ * GET /api/digital-twin/import/sources
+ * Get list of supported import sources
+ */
+router.get('/import/sources', asyncHandler(async (req, res) => {
+ const sources = digitalTwinService.getImportSources();
+ res.json({ sources });
+}));
+
+/**
+ * POST /api/digital-twin/import/analyze
+ * Analyze imported external data
+ */
+router.post('/import/analyze', asyncHandler(async (req, res) => {
+ const validation = validate(importDataInputSchema, req.body);
+ if (!validation.success) {
+ throw new ServerError('Validation failed', {
+ status: 400,
+ code: 'VALIDATION_ERROR',
+ context: { details: validation.errors }
+ });
+ }
+
+ const { source, data, providerId, model } = validation.data;
+ const result = await digitalTwinService.analyzeImportedData(source, data, providerId, model);
+
+ if (result.error) {
+ throw new ServerError(result.error, {
+ status: 400,
+ code: 'IMPORT_ANALYSIS_ERROR'
+ });
+ }
+
+ res.json(result);
+}));
+
+/**
+ * POST /api/digital-twin/import/save
+ * Save import analysis as a document
+ */
+router.post('/import/save', asyncHandler(async (req, res) => {
+ const { source, suggestedDoc } = req.body;
+
+ if (!source || !suggestedDoc || !suggestedDoc.filename || !suggestedDoc.content) {
+ throw new ServerError('Missing required fields: source and suggestedDoc', {
+ status: 400,
+ code: 'VALIDATION_ERROR'
+ });
+ }
+
+ const document = await digitalTwinService.saveImportAsDocument(source, suggestedDoc);
+ res.json({ document, message: 'Document saved successfully' });
+}));
+
+export default router;
diff --git a/server/routes/lmstudio.js b/server/routes/lmstudio.js
new file mode 100644
index 0000000..f8ecaff
--- /dev/null
+++ b/server/routes/lmstudio.js
@@ -0,0 +1,202 @@
+/**
+ * LM Studio Routes
+ *
+ * REST endpoints for LM Studio model management.
+ */
+
+import { Router } from 'express'
+import * as lmStudioManager from '../services/lmStudioManager.js'
+import * as localThinking from '../services/localThinking.js'
+
+const router = Router()
+
+/**
+ * GET /api/lmstudio/status
+ * Check LM Studio availability and loaded models
+ */
+router.get('/status', async (req, res) => {
+ const status = await lmStudioManager.getStatus()
+ const thinkingStats = localThinking.getStats()
+
+ res.json({
+ ...status,
+ thinkingStats
+ })
+})
+
+/**
+ * GET /api/lmstudio/models
+ * List available/loaded models
+ */
+router.get('/models', async (req, res) => {
+ const available = await lmStudioManager.checkLMStudioAvailable()
+
+ if (!available) {
+ return res.status(503).json({
+ error: 'LM Studio not available',
+ available: false
+ })
+ }
+
+ const models = await lmStudioManager.getLoadedModels(true)
+ const recommended = await lmStudioManager.getRecommendedThinkingModel()
+
+ res.json({
+ available: true,
+ count: models.length,
+ models,
+ recommendedThinkingModel: recommended
+ })
+})
+
+/**
+ * POST /api/lmstudio/download
+ * Download a model by ID
+ */
+router.post('/download', async (req, res) => {
+ const { modelId } = req.body
+
+ if (!modelId) {
+ return res.status(400).json({ error: 'modelId is required' })
+ }
+
+ const result = await lmStudioManager.downloadModel(modelId)
+ res.json(result)
+})
+
+/**
+ * POST /api/lmstudio/load
+ * Load a model into memory
+ */
+router.post('/load', async (req, res) => {
+ const { modelId } = req.body
+
+ if (!modelId) {
+ return res.status(400).json({ error: 'modelId is required' })
+ }
+
+ const result = await lmStudioManager.loadModel(modelId)
+ res.json(result)
+})
+
+/**
+ * POST /api/lmstudio/unload
+ * Unload a model from memory
+ */
+router.post('/unload', async (req, res) => {
+ const { modelId } = req.body
+
+ if (!modelId) {
+ return res.status(400).json({ error: 'modelId is required' })
+ }
+
+ const result = await lmStudioManager.unloadModel(modelId)
+ res.json(result)
+})
+
+/**
+ * POST /api/lmstudio/completion
+ * Quick completion using local model
+ */
+router.post('/completion', async (req, res) => {
+ const { prompt, model, maxTokens, temperature, systemPrompt } = req.body
+
+ if (!prompt) {
+ return res.status(400).json({ error: 'prompt is required' })
+ }
+
+ const result = await lmStudioManager.quickCompletion(prompt, {
+ model,
+ maxTokens,
+ temperature,
+ systemPrompt
+ })
+
+ res.json(result)
+})
+
+/**
+ * POST /api/lmstudio/analyze-task
+ * Analyze a task for complexity and escalation needs
+ */
+router.post('/analyze-task', async (req, res) => {
+ const { description, id, metadata } = req.body
+
+ if (!description) {
+ return res.status(400).json({ error: 'description is required' })
+ }
+
+ const analysis = await localThinking.analyzeTask({
+ id,
+ description,
+ metadata
+ })
+
+ res.json(analysis)
+})
+
+/**
+ * POST /api/lmstudio/classify-memory
+ * Classify memory content
+ */
+router.post('/classify-memory', async (req, res) => {
+ const { content } = req.body
+
+ if (!content) {
+ return res.status(400).json({ error: 'content is required' })
+ }
+
+ const classification = await localThinking.classifyMemory(content)
+ res.json(classification)
+})
+
+/**
+ * POST /api/lmstudio/embeddings
+ * Get embeddings for text
+ */
+router.post('/embeddings', async (req, res) => {
+ const { text, model } = req.body
+
+ if (!text) {
+ return res.status(400).json({ error: 'text is required' })
+ }
+
+ const result = await lmStudioManager.getEmbeddings(text, { model })
+ res.json(result)
+})
+
+/**
+ * PUT /api/lmstudio/config
+ * Update LM Studio configuration
+ */
+router.put('/config', async (req, res) => {
+ const { baseUrl, timeout, defaultThinkingModel } = req.body
+
+ const config = lmStudioManager.updateConfig({
+ baseUrl,
+ timeout,
+ defaultThinkingModel
+ })
+
+ res.json({ success: true, config })
+})
+
+/**
+ * GET /api/lmstudio/thinking-stats
+ * Get local thinking statistics
+ */
+router.get('/thinking-stats', (req, res) => {
+ const stats = localThinking.getStats()
+ res.json(stats)
+})
+
+/**
+ * POST /api/lmstudio/reset-cache
+ * Reset LM Studio cached state
+ */
+router.post('/reset-cache', (req, res) => {
+ lmStudioManager.resetCache()
+ res.json({ success: true, message: 'Cache reset' })
+})
+
+export default router
diff --git a/server/routes/media.js b/server/routes/media.js
new file mode 100644
index 0000000..8e542ef
--- /dev/null
+++ b/server/routes/media.js
@@ -0,0 +1,119 @@
+import express from 'express';
+import { z } from 'zod';
+import mediaService from '../services/mediaService.js';
+
+const router = express.Router();
+
+// Validation schemas
+const startMediaSchema = z.object({
+ videoDeviceId: z.string().optional(),
+ audioDeviceId: z.string().optional(),
+ video: z.boolean().default(true),
+ audio: z.boolean().default(true)
+});
+
+// List available media devices
+router.get('/devices', async (req, res) => {
+ const devices = await mediaService.listDevices();
+ res.json(devices);
+});
+
+// Get current streaming status
+router.get('/status', (req, res) => {
+ res.json({
+ video: mediaService.isVideoStreaming(),
+ audio: mediaService.isAudioStreaming()
+ });
+});
+
+// Start media streaming
+router.post('/start', async (req, res) => {
+ const { videoDeviceId = '0', audioDeviceId = '0', video = true, audio = true } = startMediaSchema.parse(req.body);
+
+ if (video) {
+ mediaService.startVideoStream(videoDeviceId);
+ }
+
+ if (audio) {
+ mediaService.startAudioStream(audioDeviceId);
+ }
+
+ res.json({
+ success: true,
+ video: mediaService.isVideoStreaming(),
+ audio: mediaService.isAudioStreaming()
+ });
+});
+
+// Stop media streaming
+router.post('/stop', (req, res) => {
+ mediaService.stopAll();
+ res.json({ success: true });
+});
+
+// Stream video
+router.get('/video', (req, res) => {
+ const videoStream = mediaService.getVideoStream();
+
+ if (!videoStream) {
+ return res.status(404).json({ error: 'Video stream not active' });
+ }
+
+ res.setHeader('Content-Type', 'multipart/x-mixed-replace; boundary=frame');
+ res.setHeader('Cache-Control', 'no-cache, no-store, must-revalidate');
+ res.setHeader('Pragma', 'no-cache');
+ res.setHeader('Expires', '0');
+ res.setHeader('Access-Control-Allow-Origin', '*');
+
+ let frameBuffer = Buffer.alloc(0);
+
+ videoStream.on('data', (chunk) => {
+ frameBuffer = Buffer.concat([frameBuffer, chunk]);
+
+ // Look for JPEG markers
+ let start = frameBuffer.indexOf(Buffer.from([0xFF, 0xD8])); // JPEG start
+ let end = frameBuffer.indexOf(Buffer.from([0xFF, 0xD9]), start + 2); // JPEG end
+
+ while (start !== -1 && end !== -1) {
+ const frame = frameBuffer.slice(start, end + 2);
+ res.write(`--frame\r\nContent-Type: image/jpeg\r\nContent-Length: ${frame.length}\r\n\r\n`);
+ res.write(frame);
+ res.write('\r\n');
+
+ frameBuffer = frameBuffer.slice(end + 2);
+ start = frameBuffer.indexOf(Buffer.from([0xFF, 0xD8]));
+ end = frameBuffer.indexOf(Buffer.from([0xFF, 0xD9]), start + 2);
+ }
+ });
+
+ videoStream.on('end', () => {
+ res.end();
+ });
+
+ req.on('close', () => {
+ console.log('๐น Video client disconnected');
+ });
+});
+
+// Stream audio
+router.get('/audio', (req, res) => {
+ const audioStream = mediaService.getAudioStream();
+
+ if (!audioStream) {
+ return res.status(404).json({ error: 'Audio stream not active' });
+ }
+
+ res.setHeader('Content-Type', 'audio/webm');
+ res.setHeader('Cache-Control', 'no-cache, no-store, must-revalidate');
+ res.setHeader('Pragma', 'no-cache');
+ res.setHeader('Expires', '0');
+ res.setHeader('Access-Control-Allow-Origin', '*');
+
+ audioStream.pipe(res);
+
+ req.on('close', () => {
+ console.log('๐ค Audio client disconnected');
+ });
+});
+
+export default router;
diff --git a/server/routes/prompts.js b/server/routes/prompts.js
index 9c96ebb..2ae62e5 100644
--- a/server/routes/prompts.js
+++ b/server/routes/prompts.js
@@ -1,87 +1,155 @@
import { Router } from 'express';
-import * as promptService from '../services/promptService.js';
import { asyncHandler, ServerError } from '../lib/errorHandler.js';
-const router = Router();
-
-// GET /api/prompts - List all stages
-router.get('/', asyncHandler(async (req, res) => {
- const stages = promptService.getStages();
- res.json({ stages });
-}));
-
-// GET /api/prompts/variables - List all variables
-router.get('/variables', asyncHandler(async (req, res) => {
- const variables = promptService.getVariables();
- res.json({ variables });
-}));
-
-// GET /api/prompts/variables/:key - Get a variable
-router.get('/variables/:key', asyncHandler(async (req, res) => {
- const variable = promptService.getVariable(req.params.key);
- if (!variable) {
- throw new ServerError('Variable not found', { status: 404, code: 'NOT_FOUND' });
- }
- res.json({ key: req.params.key, ...variable });
-}));
-
-// POST /api/prompts/variables - Create a variable
-router.post('/variables', asyncHandler(async (req, res) => {
- const { key, name, category, content } = req.body;
- if (!key || !content) {
- throw new ServerError('key and content are required', { status: 400, code: 'VALIDATION_ERROR' });
- }
- await promptService.createVariable(key, { name, category, content });
- res.json({ success: true, key });
-}));
-
-// PUT /api/prompts/variables/:key - Update a variable
-router.put('/variables/:key', asyncHandler(async (req, res) => {
- const { name, category, content } = req.body;
- await promptService.updateVariable(req.params.key, { name, category, content });
- res.json({ success: true });
-}));
-
-// DELETE /api/prompts/variables/:key - Delete a variable
-router.delete('/variables/:key', asyncHandler(async (req, res) => {
- await promptService.deleteVariable(req.params.key);
- res.json({ success: true });
-}));
-
-// GET /api/prompts/:stage - Get stage with template
-router.get('/:stage', asyncHandler(async (req, res) => {
- const stage = promptService.getStage(req.params.stage);
- if (!stage) {
- throw new ServerError('Stage not found', { status: 404, code: 'NOT_FOUND' });
- }
- const template = await promptService.getStageTemplate(req.params.stage);
- res.json({ ...stage, template });
-}));
-
-// PUT /api/prompts/:stage - Update stage config and/or template
-router.put('/:stage', asyncHandler(async (req, res) => {
- const { template, ...config } = req.body;
-
- if (Object.keys(config).length > 0) {
- await promptService.updateStageConfig(req.params.stage, config);
- }
- if (template !== undefined) {
- await promptService.updateStageTemplate(req.params.stage, template);
- }
- res.json({ success: true });
-}));
-
-// POST /api/prompts/:stage/preview - Preview compiled prompt
-router.post('/:stage/preview', asyncHandler(async (req, res) => {
- const { testData = {} } = req.body;
- const preview = await promptService.previewPrompt(req.params.stage, testData);
- res.json({ preview });
-}));
-
-// POST /api/prompts/reload - Reload prompts from disk
-router.post('/reload', asyncHandler(async (req, res) => {
- await promptService.loadPrompts();
- res.json({ success: true });
-}));
-
-export default router;
+/**
+ * Create PortOS-specific prompts routes
+ * Wraps toolkit routes to match PortOS API contract
+ */
+export function createPortOSPromptsRoutes(aiToolkit) {
+ const router = Router();
+ const promptsService = aiToolkit.services.prompts;
+
+ // GET /api/prompts - List all stages (wrapped in {stages: ...})
+ router.get('/', asyncHandler(async (req, res) => {
+ const stages = promptsService.getStages();
+ res.json({ stages });
+ }));
+
+ // GET /api/prompts/variables - List all variables (wrapped in {variables: ...})
+ router.get('/variables', asyncHandler(async (req, res) => {
+ const variables = promptsService.getVariables();
+ res.json({ variables });
+ }));
+
+ // GET /api/prompts/variables/:key - Get a variable
+ router.get('/variables/:key', asyncHandler(async (req, res) => {
+ const variable = promptsService.getVariable(req.params.key);
+ if (!variable) {
+ throw new ServerError('Variable not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json({ key: req.params.key, ...variable });
+ }));
+
+ // POST /api/prompts/variables - Create a variable
+ router.post('/variables', asyncHandler(async (req, res) => {
+ const { key, name, category, content } = req.body;
+ if (!key || !content) {
+ throw new ServerError('key and content are required', { status: 400, code: 'VALIDATION_ERROR' });
+ }
+ await promptsService.createVariable(key, { name, category, content });
+ res.json({ success: true, key });
+ }));
+
+ // PUT /api/prompts/variables/:key - Update a variable
+ router.put('/variables/:key', asyncHandler(async (req, res) => {
+ const { name, category, content } = req.body;
+ await promptsService.updateVariable(req.params.key, { name, category, content });
+ res.json({ success: true });
+ }));
+
+ // DELETE /api/prompts/variables/:key - Delete a variable
+ router.delete('/variables/:key', asyncHandler(async (req, res) => {
+ await promptsService.deleteVariable(req.params.key);
+ res.json({ success: true });
+ }));
+
+ // GET /api/prompts/:stage - Get stage with template
+ router.get('/:stage', asyncHandler(async (req, res) => {
+ const stage = promptsService.getStage(req.params.stage);
+ if (!stage) {
+ throw new ServerError('Stage not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ const template = await promptsService.getStageTemplate(req.params.stage);
+ res.json({ ...stage, template });
+ }));
+
+ // POST /api/prompts - Create a new stage
+ router.post('/', asyncHandler(async (req, res) => {
+ const { stageName, name, description, model = 'default', returnsJson = false, variables = [], template = '' } = req.body;
+ if (!stageName || !name) {
+ throw new ServerError('stageName and name are required', { status: 400, code: 'VALIDATION_ERROR' });
+ }
+ const config = { name, description, model, returnsJson, variables };
+ await promptsService.createStage(stageName, config, template);
+ res.json({ success: true, stageName });
+ }));
+
+ // PUT /api/prompts/:stage - Update stage config and/or template
+ router.put('/:stage', asyncHandler(async (req, res) => {
+ const { template, ...config } = req.body;
+
+ if (Object.keys(config).length > 0) {
+ await promptsService.updateStageConfig(req.params.stage, config);
+ }
+ if (template !== undefined) {
+ await promptsService.updateStageTemplate(req.params.stage, template);
+ }
+ res.json({ success: true });
+ }));
+
+ // GET /api/prompts/:stage/usage - Check if stage is in use
+ router.get('/:stage/usage', asyncHandler(async (req, res) => {
+ const stageName = req.params.stage;
+
+ // Known system stages that are referenced in code
+ const systemStages = {
+ 'cos-agent-briefing': ['CoS sub-agent task briefing'],
+ 'cos-evaluate': ['CoS task evaluation'],
+ 'cos-report-summary': ['CoS daily reports'],
+ 'cos-self-improvement': ['CoS self-improvement tasks'],
+ 'cos-task-enhance': ['CoS task prompt enhancement'],
+ 'brain-classifier': ['Brain thought classification'],
+ 'brain-daily-digest': ['Brain daily digest generation'],
+ 'brain-weekly-review': ['Brain weekly review generation'],
+ 'memory-evaluate': ['Memory extraction from agent output'],
+ 'app-detection': ['Project directory analysis']
+ };
+
+ const isSystemStage = stageName in systemStages;
+ const usedBy = systemStages[stageName] || [];
+
+ res.json({
+ isSystemStage,
+ usedBy,
+ canDelete: !isSystemStage,
+ warning: isSystemStage ? 'This is a system stage used by PortOS features. Deleting it may break functionality.' : null
+ });
+ }));
+
+ // DELETE /api/prompts/:stage - Delete a stage
+ router.delete('/:stage', asyncHandler(async (req, res) => {
+ const stageName = req.params.stage;
+
+ // Check if it's a system stage
+ const systemStages = [
+ 'cos-agent-briefing', 'cos-evaluate', 'cos-report-summary', 'cos-self-improvement',
+ 'cos-task-enhance', 'brain-classifier', 'brain-daily-digest', 'brain-weekly-review',
+ 'memory-evaluate', 'app-detection'
+ ];
+
+ if (systemStages.includes(stageName) && req.query.force !== 'true') {
+ throw new ServerError(
+ 'Cannot delete system stage. This stage is used by PortOS features. Add ?force=true to delete anyway.',
+ { status: 400, code: 'SYSTEM_STAGE_PROTECTED' }
+ );
+ }
+
+ await promptsService.deleteStage(stageName);
+ res.json({ success: true });
+ }));
+
+ // POST /api/prompts/:stage/preview - Preview compiled prompt
+ router.post('/:stage/preview', asyncHandler(async (req, res) => {
+ const { testData = {} } = req.body;
+ const preview = await promptsService.previewPrompt(req.params.stage, testData);
+ res.json({ preview });
+ }));
+
+ // POST /api/prompts/reload - Reload prompts from disk
+ router.post('/reload', asyncHandler(async (req, res) => {
+ await promptsService.init();
+ res.json({ success: true });
+ }));
+
+ return router;
+}
diff --git a/server/routes/prompts.old.js b/server/routes/prompts.old.js
new file mode 100644
index 0000000..9c96ebb
--- /dev/null
+++ b/server/routes/prompts.old.js
@@ -0,0 +1,87 @@
+import { Router } from 'express';
+import * as promptService from '../services/promptService.js';
+import { asyncHandler, ServerError } from '../lib/errorHandler.js';
+
+const router = Router();
+
+// GET /api/prompts - List all stages
+router.get('/', asyncHandler(async (req, res) => {
+ const stages = promptService.getStages();
+ res.json({ stages });
+}));
+
+// GET /api/prompts/variables - List all variables
+router.get('/variables', asyncHandler(async (req, res) => {
+ const variables = promptService.getVariables();
+ res.json({ variables });
+}));
+
+// GET /api/prompts/variables/:key - Get a variable
+router.get('/variables/:key', asyncHandler(async (req, res) => {
+ const variable = promptService.getVariable(req.params.key);
+ if (!variable) {
+ throw new ServerError('Variable not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ res.json({ key: req.params.key, ...variable });
+}));
+
+// POST /api/prompts/variables - Create a variable
+router.post('/variables', asyncHandler(async (req, res) => {
+ const { key, name, category, content } = req.body;
+ if (!key || !content) {
+ throw new ServerError('key and content are required', { status: 400, code: 'VALIDATION_ERROR' });
+ }
+ await promptService.createVariable(key, { name, category, content });
+ res.json({ success: true, key });
+}));
+
+// PUT /api/prompts/variables/:key - Update a variable
+router.put('/variables/:key', asyncHandler(async (req, res) => {
+ const { name, category, content } = req.body;
+ await promptService.updateVariable(req.params.key, { name, category, content });
+ res.json({ success: true });
+}));
+
+// DELETE /api/prompts/variables/:key - Delete a variable
+router.delete('/variables/:key', asyncHandler(async (req, res) => {
+ await promptService.deleteVariable(req.params.key);
+ res.json({ success: true });
+}));
+
+// GET /api/prompts/:stage - Get stage with template
+router.get('/:stage', asyncHandler(async (req, res) => {
+ const stage = promptService.getStage(req.params.stage);
+ if (!stage) {
+ throw new ServerError('Stage not found', { status: 404, code: 'NOT_FOUND' });
+ }
+ const template = await promptService.getStageTemplate(req.params.stage);
+ res.json({ ...stage, template });
+}));
+
+// PUT /api/prompts/:stage - Update stage config and/or template
+router.put('/:stage', asyncHandler(async (req, res) => {
+ const { template, ...config } = req.body;
+
+ if (Object.keys(config).length > 0) {
+ await promptService.updateStageConfig(req.params.stage, config);
+ }
+ if (template !== undefined) {
+ await promptService.updateStageTemplate(req.params.stage, template);
+ }
+ res.json({ success: true });
+}));
+
+// POST /api/prompts/:stage/preview - Preview compiled prompt
+router.post('/:stage/preview', asyncHandler(async (req, res) => {
+ const { testData = {} } = req.body;
+ const preview = await promptService.previewPrompt(req.params.stage, testData);
+ res.json({ preview });
+}));
+
+// POST /api/prompts/reload - Reload prompts from disk
+router.post('/reload', asyncHandler(async (req, res) => {
+ await promptService.loadPrompts();
+ res.json({ success: true });
+}));
+
+export default router;
diff --git a/server/routes/providers.js b/server/routes/providers.js
index 68e4755..a9a4d59 100644
--- a/server/routes/providers.js
+++ b/server/routes/providers.js
@@ -1,101 +1,78 @@
import { Router } from 'express';
import { asyncHandler, ServerError } from '../lib/errorHandler.js';
-import * as providers from '../services/providers.js';
-
-const router = Router();
-
-// GET /api/providers - List all providers
-router.get('/', asyncHandler(async (req, res) => {
- const data = await providers.getAllProviders();
- res.json(data);
-}));
-
-// GET /api/providers/active - Get active provider
-router.get('/active', asyncHandler(async (req, res) => {
- const provider = await providers.getActiveProvider();
- res.json(provider);
-}));
-
-// PUT /api/providers/active - Set active provider
-router.put('/active', asyncHandler(async (req, res) => {
- const { id } = req.body;
- if (!id) {
- throw new ServerError('Provider ID required', { status: 400, code: 'MISSING_ID' });
- }
-
- const provider = await providers.setActiveProvider(id);
-
- if (!provider) {
- throw new ServerError('Provider not found', { status: 404, code: 'NOT_FOUND' });
- }
-
- res.json(provider);
-}));
-
-// GET /api/providers/:id - Get provider by ID
-router.get('/:id', asyncHandler(async (req, res) => {
- const provider = await providers.getProviderById(req.params.id);
-
- if (!provider) {
- throw new ServerError('Provider not found', { status: 404, code: 'NOT_FOUND' });
- }
-
- res.json(provider);
-}));
-
-// POST /api/providers - Create new provider
-router.post('/', asyncHandler(async (req, res) => {
- const { name, type } = req.body;
-
- if (!name) {
- throw new ServerError('Name is required', { status: 400, code: 'VALIDATION_ERROR' });
- }
-
- if (!type || !['cli', 'api'].includes(type)) {
- throw new ServerError('Type must be "cli" or "api"', { status: 400, code: 'VALIDATION_ERROR' });
- }
-
- const provider = await providers.createProvider(req.body);
- res.status(201).json(provider);
-}));
-
-// PUT /api/providers/:id - Update provider
-router.put('/:id', asyncHandler(async (req, res) => {
- const provider = await providers.updateProvider(req.params.id, req.body);
-
- if (!provider) {
- throw new ServerError('Provider not found', { status: 404, code: 'NOT_FOUND' });
- }
-
- res.json(provider);
-}));
-
-// DELETE /api/providers/:id - Delete provider
-router.delete('/:id', asyncHandler(async (req, res) => {
- const deleted = await providers.deleteProvider(req.params.id);
-
- if (!deleted) {
- throw new ServerError('Provider not found', { status: 404, code: 'NOT_FOUND' });
- }
-
- res.status(204).send();
-}));
-
-// POST /api/providers/:id/test - Test provider connectivity
-router.post('/:id/test', asyncHandler(async (req, res) => {
- const result = await providers.testProvider(req.params.id);
- res.json(result);
-}));
-
-// POST /api/providers/:id/refresh-models - Refresh models for API provider
-router.post('/:id/refresh-models', asyncHandler(async (req, res) => {
- const provider = await providers.refreshProviderModels(req.params.id);
-
- if (!provider) {
- throw new ServerError('Provider not found or not an API type', { status: 404, code: 'NOT_FOUND' });
- }
-
- res.json(provider);
-}));
-
-export default router;
+import { testVision, runVisionTestSuite, checkVisionHealth } from '../services/visionTest.js';
+import { getAllProviderStatuses, getProviderStatus, markProviderAvailable, getTimeUntilRecovery } from '../services/providerStatus.js';
+
+/**
+ * Create PortOS-specific provider routes
+ * Extends AI Toolkit routes with vision testing endpoints
+ */
+export function createPortOSProviderRoutes(aiToolkit) {
+ const router = Router();
+
+ // Mount all base toolkit routes
+ router.use('/', aiToolkit.routes.providers);
+
+ // PortOS-specific extension: Vision health check
+ router.get('/:id/vision-health', asyncHandler(async (req, res) => {
+ const result = await checkVisionHealth(req.params.id);
+ res.json(result);
+ }));
+
+ // PortOS-specific extension: Test vision with specific image
+ router.post('/:id/test-vision', asyncHandler(async (req, res) => {
+ const { imagePath, prompt, expectedContent, model } = req.body;
+
+ if (!imagePath) {
+ throw new ServerError('imagePath is required', { status: 400, code: 'VALIDATION_ERROR' });
+ }
+
+ const result = await testVision({
+ imagePath,
+ prompt: prompt || 'Describe what you see in this image.',
+ expectedContent: expectedContent || [],
+ providerId: req.params.id,
+ model
+ });
+
+ res.json(result);
+ }));
+
+ // PortOS-specific extension: Run full vision test suite
+ router.post('/:id/vision-suite', asyncHandler(async (req, res) => {
+ const { model } = req.body;
+ const result = await runVisionTestSuite(req.params.id, model);
+ res.json(result);
+ }));
+
+ // Provider status: Get all provider statuses (usage limits, availability)
+ router.get('/status', asyncHandler(async (req, res) => {
+ const statuses = getAllProviderStatuses();
+ // Enrich with time until recovery
+ const enriched = { ...statuses };
+ for (const [providerId, status] of Object.entries(enriched.providers)) {
+ enriched.providers[providerId] = {
+ ...status,
+ timeUntilRecovery: getTimeUntilRecovery(providerId)
+ };
+ }
+ res.json(enriched);
+ }));
+
+ // Provider status: Get single provider status
+ router.get('/:id/status', asyncHandler(async (req, res) => {
+ const status = getProviderStatus(req.params.id);
+ res.json({
+ ...status,
+ timeUntilRecovery: getTimeUntilRecovery(req.params.id)
+ });
+ }));
+
+ // Provider status: Manually mark provider as available (recovery)
+ router.post('/:id/status/recover', asyncHandler(async (req, res) => {
+ const status = await markProviderAvailable(req.params.id);
+ res.json({ success: true, status });
+ }));
+
+ return router;
+}
diff --git a/server/routes/providers.old.js b/server/routes/providers.old.js
new file mode 100644
index 0000000..eb10ae4
--- /dev/null
+++ b/server/routes/providers.old.js
@@ -0,0 +1,134 @@
+import { Router } from 'express';
+import { asyncHandler, ServerError } from '../lib/errorHandler.js';
+import * as providers from '../services/providers.js';
+import { testVision, runVisionTestSuite, checkVisionHealth } from '../services/visionTest.js';
+
+const router = Router();
+
+// GET /api/providers - List all providers
+router.get('/', asyncHandler(async (req, res) => {
+ const data = await providers.getAllProviders();
+ res.json(data);
+}));
+
+// GET /api/providers/active - Get active provider
+router.get('/active', asyncHandler(async (req, res) => {
+ const provider = await providers.getActiveProvider();
+ res.json(provider);
+}));
+
+// PUT /api/providers/active - Set active provider
+router.put('/active', asyncHandler(async (req, res) => {
+ const { id } = req.body;
+ if (!id) {
+ throw new ServerError('Provider ID required', { status: 400, code: 'MISSING_ID' });
+ }
+
+ const provider = await providers.setActiveProvider(id);
+
+ if (!provider) {
+ throw new ServerError('Provider not found', { status: 404, code: 'NOT_FOUND' });
+ }
+
+ res.json(provider);
+}));
+
+// GET /api/providers/:id - Get provider by ID
+router.get('/:id', asyncHandler(async (req, res) => {
+ const provider = await providers.getProviderById(req.params.id);
+
+ if (!provider) {
+ throw new ServerError('Provider not found', { status: 404, code: 'NOT_FOUND' });
+ }
+
+ res.json(provider);
+}));
+
+// POST /api/providers - Create new provider
+router.post('/', asyncHandler(async (req, res) => {
+ const { name, type } = req.body;
+
+ if (!name) {
+ throw new ServerError('Name is required', { status: 400, code: 'VALIDATION_ERROR' });
+ }
+
+ if (!type || !['cli', 'api'].includes(type)) {
+ throw new ServerError('Type must be "cli" or "api"', { status: 400, code: 'VALIDATION_ERROR' });
+ }
+
+ const provider = await providers.createProvider(req.body);
+ res.status(201).json(provider);
+}));
+
+// PUT /api/providers/:id - Update provider
+router.put('/:id', asyncHandler(async (req, res) => {
+ const provider = await providers.updateProvider(req.params.id, req.body);
+
+ if (!provider) {
+ throw new ServerError('Provider not found', { status: 404, code: 'NOT_FOUND' });
+ }
+
+ res.json(provider);
+}));
+
+// DELETE /api/providers/:id - Delete provider
+router.delete('/:id', asyncHandler(async (req, res) => {
+ const deleted = await providers.deleteProvider(req.params.id);
+
+ if (!deleted) {
+ throw new ServerError('Provider not found', { status: 404, code: 'NOT_FOUND' });
+ }
+
+ res.status(204).send();
+}));
+
+// POST /api/providers/:id/test - Test provider connectivity
+router.post('/:id/test', asyncHandler(async (req, res) => {
+ const result = await providers.testProvider(req.params.id);
+ res.json(result);
+}));
+
+// POST /api/providers/:id/refresh-models - Refresh models for API provider
+router.post('/:id/refresh-models', asyncHandler(async (req, res) => {
+ const provider = await providers.refreshProviderModels(req.params.id);
+
+ if (!provider) {
+ throw new ServerError('Provider not found or not an API type', { status: 404, code: 'NOT_FOUND' });
+ }
+
+ res.json(provider);
+}));
+
+// GET /api/providers/:id/vision-health - Check vision capability health
+router.get('/:id/vision-health', asyncHandler(async (req, res) => {
+ const result = await checkVisionHealth(req.params.id);
+ res.json(result);
+}));
+
+// POST /api/providers/:id/test-vision - Test vision with a specific image
+router.post('/:id/test-vision', asyncHandler(async (req, res) => {
+ const { imagePath, prompt, expectedContent, model } = req.body;
+
+ if (!imagePath) {
+ throw new ServerError('imagePath is required', { status: 400, code: 'VALIDATION_ERROR' });
+ }
+
+ const result = await testVision({
+ imagePath,
+ prompt: prompt || 'Describe what you see in this image.',
+ expectedContent: expectedContent || [],
+ providerId: req.params.id,
+ model
+ });
+
+ res.json(result);
+}));
+
+// POST /api/providers/:id/vision-suite - Run full vision test suite
+router.post('/:id/vision-suite', asyncHandler(async (req, res) => {
+ const { model } = req.body;
+ const result = await runVisionTestSuite(req.params.id, model);
+ res.json(result);
+}));
+
+export default router;
diff --git a/server/routes/runs.js b/server/routes/runs.js
index 9447513..7c649db 100644
--- a/server/routes/runs.js
+++ b/server/routes/runs.js
@@ -1,189 +1,9 @@
-import { Router } from 'express';
-import * as runner from '../services/runner.js';
-import { asyncHandler, ServerError } from '../lib/errorHandler.js';
-
-const router = Router();
-
-// GET /api/runs - List runs
-// Query params: limit, offset, source (all|devtools|cos-agent)
-router.get('/', asyncHandler(async (req, res, next) => {
- const limit = parseInt(req.query.limit) || 50;
- const offset = parseInt(req.query.offset) || 0;
- const source = req.query.source || 'all'; // all, devtools, cos-agent
-
- const result = await runner.listRuns(limit, offset, source);
- res.json(result);
-}));
-
-// POST /api/runs - Create and execute a new run
-router.post('/', asyncHandler(async (req, res, next) => {
- const { providerId, model, prompt, workspacePath, workspaceName, timeout } = req.body;
- console.log(`๐ POST /api/runs - provider: ${providerId}, model: ${model}, workspace: ${workspaceName}, timeout: ${timeout}ms`);
-
- if (!providerId) {
- throw new ServerError('providerId is required', {
- status: 400,
- code: 'VALIDATION_ERROR'
- });
- }
-
- if (!prompt) {
- throw new ServerError('prompt is required', {
- status: 400,
- code: 'VALIDATION_ERROR'
- });
- }
-
- const runData = await runner.createRun({
- providerId,
- model,
- prompt,
- workspacePath,
- workspaceName,
- timeout
- });
-
- const { runId, provider, metadata, timeout: effectiveTimeout } = runData;
- const io = req.app.get('io');
- console.log(`๐ Run created: ${runId}, provider type: ${provider.type}, command: ${provider.command}, timeout: ${effectiveTimeout}ms`);
-
- // Execute based on provider type
- if (provider.type === 'cli') {
- console.log(`๐ Executing CLI run: ${provider.command} with args: ${JSON.stringify(provider.args)}`);
- runner.executeCliRun(
- runId,
- provider,
- prompt,
- workspacePath,
- (data) => {
- // Stream output via Socket.IO
- console.log(`๐ค Emitting run:${runId}:data (${data.length} chars)`);
- io?.emit(`run:${runId}:data`, data);
- },
- (finalMetadata) => {
- console.log(`โ
Run complete: ${runId}, success: ${finalMetadata.success}`);
- io?.emit(`run:${runId}:complete`, finalMetadata);
- },
- effectiveTimeout
- );
- } else if (provider.type === 'api') {
- runner.executeApiRun(
- runId,
- provider,
- model,
- prompt,
- workspacePath,
- (data) => {
- io?.emit(`run:${runId}:data`, data);
- },
- (finalMetadata) => {
- io?.emit(`run:${runId}:complete`, finalMetadata);
- }
- );
- }
-
- // Return immediately with run ID
- res.status(202).json({
- runId,
- status: 'started',
- metadata
- });
-}));
-
-// GET /api/runs/:id - Get run metadata
-router.get('/:id', asyncHandler(async (req, res, next) => {
- const metadata = await runner.getRun(req.params.id);
-
- if (!metadata) {
- throw new ServerError('Run not found', {
- status: 404,
- code: 'NOT_FOUND'
- });
- }
-
- const isActive = await runner.isRunActive(req.params.id);
- res.json({
- ...metadata,
- isActive
- });
-}));
-
-// GET /api/runs/:id/output - Get run output
-router.get('/:id/output', asyncHandler(async (req, res, next) => {
- const output = await runner.getRunOutput(req.params.id);
-
- if (output === null) {
- throw new ServerError('Run not found', {
- status: 404,
- code: 'NOT_FOUND'
- });
- }
-
- res.type('text/plain').send(output);
-}));
-
-// GET /api/runs/:id/prompt - Get run prompt
-router.get('/:id/prompt', asyncHandler(async (req, res, next) => {
- const prompt = await runner.getRunPrompt(req.params.id);
-
- if (prompt === null) {
- throw new ServerError('Run not found', {
- status: 404,
- code: 'NOT_FOUND'
- });
- }
-
- res.type('text/plain').send(prompt);
-}));
-
-// POST /api/runs/:id/stop - Stop a running execution
-router.post('/:id/stop', asyncHandler(async (req, res, next) => {
- const stopped = await runner.stopRun(req.params.id);
-
- if (!stopped) {
- throw new ServerError('Run not found or not active', {
- status: 404,
- code: 'NOT_ACTIVE'
- });
- }
-
- res.json({ stopped: true });
-}));
-
-// DELETE /api/runs/:id - Delete run and artifacts
-router.delete('/:id', asyncHandler(async (req, res, next) => {
- // Don't allow deleting active runs
- const isActive = await runner.isRunActive(req.params.id);
- if (isActive) {
- throw new ServerError('Cannot delete active run', {
- status: 409,
- code: 'RUN_ACTIVE'
- });
- }
-
- const deleted = await runner.deleteRun(req.params.id);
-
- if (!deleted) {
- throw new ServerError('Run not found', {
- status: 404,
- code: 'NOT_FOUND'
- });
- }
-
- res.status(204).send();
-}));
-
-// DELETE /api/runs - Delete all failed runs
-// Requires ?confirm=true query parameter to prevent accidental deletion
-router.delete('/', asyncHandler(async (req, res, next) => {
- if (req.query.confirm !== 'true') {
- throw new ServerError('Destructive operation requires ?confirm=true', {
- status: 400,
- code: 'CONFIRMATION_REQUIRED'
- });
- }
- const deletedCount = await runner.deleteFailedRuns();
- res.json({ deleted: deletedCount });
-}));
-
-export default router;
+/**
+ * Create PortOS-specific runs routes
+ * Currently just uses toolkit routes directly, but wrapper allows for future extensions
+ */
+export function createPortOSRunsRoutes(aiToolkit) {
+ // For now, just return the toolkit routes directly
+ // Future PortOS-specific extensions can be added here
+ return aiToolkit.routes.runs;
+}
diff --git a/server/routes/runs.old.js b/server/routes/runs.old.js
new file mode 100644
index 0000000..bd7a500
--- /dev/null
+++ b/server/routes/runs.old.js
@@ -0,0 +1,191 @@
+import { Router } from 'express';
+import * as runner from '../services/runner.js';
+import { asyncHandler, ServerError } from '../lib/errorHandler.js';
+
+const router = Router();
+
+// GET /api/runs - List runs
+// Query params: limit, offset, source (all|devtools|cos-agent)
+router.get('/', asyncHandler(async (req, res, next) => {
+ const limit = parseInt(req.query.limit) || 50;
+ const offset = parseInt(req.query.offset) || 0;
+ const source = req.query.source || 'all'; // all, devtools, cos-agent
+
+ const result = await runner.listRuns(limit, offset, source);
+ res.json(result);
+}));
+
+// POST /api/runs - Create and execute a new run
+router.post('/', asyncHandler(async (req, res, next) => {
+ const { providerId, model, prompt, workspacePath, workspaceName, timeout, screenshots } = req.body;
+ console.log(`๐ POST /api/runs - provider: ${providerId}, model: ${model}, workspace: ${workspaceName}, timeout: ${timeout}ms, screenshots: ${screenshots?.length || 0}`);
+
+ if (!providerId) {
+ throw new ServerError('providerId is required', {
+ status: 400,
+ code: 'VALIDATION_ERROR'
+ });
+ }
+
+ if (!prompt) {
+ throw new ServerError('prompt is required', {
+ status: 400,
+ code: 'VALIDATION_ERROR'
+ });
+ }
+
+ const runData = await runner.createRun({
+ providerId,
+ model,
+ prompt,
+ workspacePath,
+ workspaceName,
+ timeout,
+ screenshots
+ });
+
+ const { runId, provider, metadata, timeout: effectiveTimeout } = runData;
+ const io = req.app.get('io');
+ console.log(`๐ Run created: ${runId}, provider type: ${provider.type}, command: ${provider.command}, timeout: ${effectiveTimeout}ms`);
+
+ // Execute based on provider type
+ if (provider.type === 'cli') {
+ console.log(`๐ Executing CLI run: ${provider.command} with args: ${JSON.stringify(provider.args)}`);
+ runner.executeCliRun(
+ runId,
+ provider,
+ prompt,
+ workspacePath,
+ (data) => {
+ // Stream output via Socket.IO
+ console.log(`๐ค Emitting run:${runId}:data (${data.length} chars)`);
+ io?.emit(`run:${runId}:data`, data);
+ },
+ (finalMetadata) => {
+ console.log(`โ
Run complete: ${runId}, success: ${finalMetadata.success}`);
+ io?.emit(`run:${runId}:complete`, finalMetadata);
+ },
+ effectiveTimeout
+ );
+ } else if (provider.type === 'api') {
+ runner.executeApiRun(
+ runId,
+ provider,
+ model,
+ prompt,
+ workspacePath,
+ screenshots,
+ (data) => {
+ io?.emit(`run:${runId}:data`, data);
+ },
+ (finalMetadata) => {
+ io?.emit(`run:${runId}:complete`, finalMetadata);
+ }
+ );
+ }
+
+ // Return immediately with run ID
+ res.status(202).json({
+ runId,
+ status: 'started',
+ metadata
+ });
+}));
+
+// GET /api/runs/:id - Get run metadata
+router.get('/:id', asyncHandler(async (req, res, next) => {
+ const metadata = await runner.getRun(req.params.id);
+
+ if (!metadata) {
+ throw new ServerError('Run not found', {
+ status: 404,
+ code: 'NOT_FOUND'
+ });
+ }
+
+ const isActive = await runner.isRunActive(req.params.id);
+ res.json({
+ ...metadata,
+ isActive
+ });
+}));
+
+// GET /api/runs/:id/output - Get run output
+router.get('/:id/output', asyncHandler(async (req, res, next) => {
+ const output = await runner.getRunOutput(req.params.id);
+
+ if (output === null) {
+ throw new ServerError('Run not found', {
+ status: 404,
+ code: 'NOT_FOUND'
+ });
+ }
+
+ res.type('text/plain').send(output);
+}));
+
+// GET /api/runs/:id/prompt - Get run prompt
+router.get('/:id/prompt', asyncHandler(async (req, res, next) => {
+ const prompt = await runner.getRunPrompt(req.params.id);
+
+ if (prompt === null) {
+ throw new ServerError('Run not found', {
+ status: 404,
+ code: 'NOT_FOUND'
+ });
+ }
+
+ res.type('text/plain').send(prompt);
+}));
+
+// POST /api/runs/:id/stop - Stop a running execution
+router.post('/:id/stop', asyncHandler(async (req, res, next) => {
+ const stopped = await runner.stopRun(req.params.id);
+
+ if (!stopped) {
+ throw new ServerError('Run not found or not active', {
+ status: 404,
+ code: 'NOT_ACTIVE'
+ });
+ }
+
+ res.json({ stopped: true });
+}));
+
+// DELETE /api/runs/:id - Delete run and artifacts
+router.delete('/:id', asyncHandler(async (req, res, next) => {
+ // Don't allow deleting active runs
+ const isActive = await runner.isRunActive(req.params.id);
+ if (isActive) {
+ throw new ServerError('Cannot delete active run', {
+ status: 409,
+ code: 'RUN_ACTIVE'
+ });
+ }
+
+ const deleted = await runner.deleteRun(req.params.id);
+
+ if (!deleted) {
+ throw new ServerError('Run not found', {
+ status: 404,
+ code: 'NOT_FOUND'
+ });
+ }
+
+ res.status(204).send();
+}));
+
+// DELETE /api/runs - Delete all failed runs
+// Requires ?confirm=true query parameter to prevent accidental deletion
+router.delete('/', asyncHandler(async (req, res, next) => {
+ if (req.query.confirm !== 'true') {
+ throw new ServerError('Destructive operation requires ?confirm=true', {
+ status: 400,
+ code: 'CONFIRMATION_REQUIRED'
+ });
+ }
+ const deletedCount = await runner.deleteFailedRuns();
+ res.json({ deleted: deletedCount });
+}));
+
+export default router;
diff --git a/server/routes/scaffold.js b/server/routes/scaffold.js
index 7a0e673..ff2a05e 100644
--- a/server/routes/scaffold.js
+++ b/server/routes/scaffold.js
@@ -1,12 +1,14 @@
import { Router } from 'express';
-import { mkdir, writeFile, readdir, copyFile, readFile } from 'fs/promises';
+import { mkdir, writeFile, readdir, copyFile, readFile, stat } from 'fs/promises';
import { existsSync } from 'fs';
-import { join, dirname } from 'path';
+import { join, dirname, resolve } from 'path';
import { fileURLToPath } from 'url';
import { exec, spawn } from 'child_process';
import { promisify } from 'util';
+import { homedir } from 'os';
import { createApp, getReservedPorts } from '../services/apps.js';
import { asyncHandler, ServerError } from '../lib/errorHandler.js';
+import { safeJSONParse } from '../lib/fileUtils.js';
const execAsync = promisify(exec);
const __filename = fileURLToPath(import.meta.url);
@@ -15,17 +17,62 @@ const TEMPLATES_DIR = join(__dirname, '../../templates');
const router = Router();
+// GET /api/directories - Browse directories for directory picker
+router.get('/directories', asyncHandler(async (req, res) => {
+ const { path: dirPath } = req.query;
+
+ // Default to parent of PortOS project if no path provided
+ const defaultPath = resolve(join(__dirname, '../../..'));
+ const targetPath = dirPath ? resolve(dirPath) : defaultPath;
+
+ // Validate path exists and is a directory
+ if (!existsSync(targetPath)) {
+ throw new ServerError('Directory does not exist', {
+ status: 400,
+ code: 'INVALID_PATH'
+ });
+ }
+
+ const stats = await stat(targetPath);
+ if (!stats.isDirectory()) {
+ throw new ServerError('Path is not a directory', {
+ status: 400,
+ code: 'NOT_A_DIRECTORY'
+ });
+ }
+
+ // Read directory contents
+ const entries = await readdir(targetPath, { withFileTypes: true });
+ const directories = entries
+ .filter(entry => entry.isDirectory() && !entry.name.startsWith('.'))
+ .map(entry => ({
+ name: entry.name,
+ path: join(targetPath, entry.name)
+ }))
+ .sort((a, b) => a.name.localeCompare(b.name));
+
+ // Get parent directory info
+ const parentPath = dirname(targetPath);
+ const canGoUp = parentPath !== targetPath; // Can't go above root
+
+ res.json({
+ currentPath: targetPath,
+ parentPath: canGoUp ? parentPath : null,
+ directories
+ });
+}));
+
// GET /api/templates - List available templates
router.get('/templates', asyncHandler(async (req, res) => {
const templates = [
{
id: 'portos-stack',
name: 'PortOS Stack',
- description: 'Express + React + Vite with Tailwind, PM2, and GitHub Actions CI/CD',
+ description: 'Express + React + Vite with Tailwind, PM2, AI providers, and GitHub Actions CI/CD',
type: 'portos-stack',
icon: 'layers',
builtIn: true,
- features: ['Express.js API', 'React + Vite frontend', 'Tailwind CSS', 'PM2 ecosystem', 'GitHub Actions CI/CD', 'Collapsible nav layout'],
+ features: ['Express.js API', 'React + Vite frontend', 'Tailwind CSS', 'PM2 ecosystem', 'AI Provider Integration', 'GitHub Actions CI/CD', 'Collapsible nav layout'],
ports: { ui: true, api: true }
},
{
@@ -243,7 +290,8 @@ app.listen(PORT, '0.0.0.0', () => {
// Update package.json to add express and server script
const pkgPath = join(repoPath, 'package.json');
- const pkg = JSON.parse(await readFile(pkgPath, 'utf-8'));
+ const pkgContent = await readFile(pkgPath, 'utf-8');
+ const pkg = safeJSONParse(pkgContent, { dependencies: {}, devDependencies: {}, scripts: {} });
pkg.dependencies = pkg.dependencies || {};
pkg.dependencies.express = '^4.21.2';
pkg.dependencies.cors = '^2.8.5';
@@ -336,6 +384,7 @@ app.listen(PORT, '0.0.0.0', () => {
},
dependencies: {
'lucide-react': '^0.562.0',
+ 'portos-ai-toolkit': '^0.1.0',
'react': '^18.3.1',
'react-dom': '^18.3.1',
'react-hot-toast': '^2.6.0',
@@ -446,10 +495,12 @@ ReactDOM.createRoot(document.getElementById('root')).render(
`);
await writeFile(join(clientSrcDir, 'App.jsx'), `import { Routes, Route, Link } from 'react-router-dom';
-import { Menu, X } from 'lucide-react';
+import { Menu, X, Home, Brain, Info } from 'lucide-react';
import { useState } from 'react';
+import { useLocation } from 'react-router-dom';
+import AIProviders from './pages/AIProviders';
-function Home() {
+function HomePage() {
return (
Welcome to ${name}
@@ -462,13 +513,14 @@ function About() {
return (
About
-
Express + React + Vite + Tailwind
+
Express + React + Vite + Tailwind + AI Provider Integration
);
}
export default function App() {
const [navOpen, setNavOpen] = useState(true);
+ const location = useLocation();
return (
@@ -480,18 +532,27 @@ export default function App() {
>
{navOpen ?
:
}
- {navOpen && (
-
- Home
- About
-
- )}
+
+
+
+ {navOpen && Home }
+
+
+
+ {navOpen && AI Providers }
+
+
+
+ {navOpen && About }
+
+
{/* Main content */}
-
+
- } />
+ } />
+ } />
} />
@@ -508,6 +569,19 @@ body {
margin: 0;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
}
+`);
+
+ // === Client pages ===
+ const pagesDir = join(clientSrcDir, 'pages');
+ await mkdir(pagesDir, { recursive: true });
+
+ // AIProviders page - uses shared component from ai-toolkit
+ await writeFile(join(pagesDir, 'AIProviders.jsx'), `import { AIProviders } from 'portos-ai-toolkit/client';
+import toast from 'react-hot-toast';
+
+export default function AIProvidersPage() {
+ return ;
+}
`);
addStep('Create client', 'done');
@@ -527,6 +601,7 @@ body {
dependencies: {
'cors': '^2.8.5',
'express': '^4.21.2',
+ 'portos-ai-toolkit': '^0.1.0',
'socket.io': '^4.8.3',
'zod': '^3.24.1'
},
@@ -541,6 +616,7 @@ body {
import cors from 'cors';
import { createServer } from 'http';
import { Server } from 'socket.io';
+import { createAIToolkit } from 'portos-ai-toolkit/server';
const app = express();
const httpServer = createServer(app);
@@ -553,6 +629,13 @@ const PORT = process.env.PORT || ${apiPort || 3001};
app.use(cors());
app.use(express.json());
+// Initialize AI Toolkit with routes for providers, runs, and prompts
+const aiToolkit = createAIToolkit({
+ dataDir: './data',
+ io
+});
+aiToolkit.mountRoutes(app);
+
// Health endpoint
app.get('/api/health', (req, res) => {
res.json({ status: 'ok', timestamp: new Date().toISOString() });
@@ -584,6 +667,73 @@ export default defineConfig({
addStep('Create server', 'done');
+ // === Default Data (providers, etc.) ===
+ // Data dir at project root (server runs with cwd at project root)
+ const dataDir = join(repoPath, 'data');
+ await mkdir(dataDir, { recursive: true });
+
+ const defaultProviders = {
+ activeProvider: 'claude-code',
+ providers: {
+ 'claude-code': {
+ id: 'claude-code',
+ name: 'Claude Code CLI',
+ type: 'cli',
+ command: 'claude',
+ args: ['--print'],
+ models: ['claude-haiku-4-5-20251001', 'claude-sonnet-4-5-20250929', 'claude-opus-4-5-20251101'],
+ defaultModel: 'claude-sonnet-4-5-20250929',
+ lightModel: 'claude-haiku-4-5-20251001',
+ mediumModel: 'claude-sonnet-4-5-20250929',
+ heavyModel: 'claude-opus-4-5-20251101',
+ timeout: 300000,
+ enabled: true,
+ envVars: {}
+ },
+ 'codex': {
+ id: 'codex',
+ name: 'Codex CLI',
+ type: 'cli',
+ command: 'codex',
+ args: [],
+ models: ['gpt-5', 'gpt-5-codex'],
+ defaultModel: 'gpt-5-codex',
+ lightModel: 'gpt-5',
+ mediumModel: 'gpt-5-codex',
+ heavyModel: 'gpt-5-codex',
+ timeout: 300000,
+ enabled: true,
+ envVars: {}
+ },
+ 'lm-studio': {
+ id: 'lm-studio',
+ name: 'LM Studio (Local)',
+ type: 'api',
+ endpoint: 'http://localhost:1234/v1',
+ apiKey: 'lm-studio',
+ models: [],
+ defaultModel: null,
+ timeout: 300000,
+ enabled: false,
+ envVars: {}
+ },
+ 'ollama': {
+ id: 'ollama',
+ name: 'Ollama (Local)',
+ type: 'api',
+ endpoint: 'http://localhost:11434/v1',
+ apiKey: '',
+ models: [],
+ defaultModel: null,
+ timeout: 300000,
+ enabled: false,
+ envVars: {}
+ }
+ }
+ };
+ await writeFile(join(dataDir, 'providers.json'), JSON.stringify(defaultProviders, null, 2));
+ addStep('Create default data', 'done');
+
// === GitHub Actions CI ===
await writeFile(join(workflowsDir, 'ci.yml'), `name: CI
@@ -779,12 +929,21 @@ pm2 start ecosystem.config.cjs
${name} is a monorepo with Express.js server (port ${apiPort || 3001}) and React/Vite client (port ${uiPort || 3000}). PM2 manages app lifecycles.
### Server (\`server/\`)
-- **index.js**: Express server with Socket.IO
+- **index.js**: Express server with Socket.IO and AI toolkit integration
### Client (\`client/src/\`)
- **App.jsx**: Main component with routing and collapsible nav
- **main.jsx**: React entry point
+### AI Provider Integration
+
+This project includes \`portos-ai-toolkit\` for AI provider management. The server exposes:
+- \`GET/POST /api/providers\` - Manage AI providers (CLI or API-based)
+- \`GET/POST /api/runs\` - Execute and track AI runs
+- \`GET/POST /api/prompts\` - Manage prompt templates
+
+Provider data is stored in \`./data/providers.json\`.
+
## Code Conventions
- **No try/catch** - errors bubble to centralized middleware
@@ -813,9 +972,17 @@ npm run dev
- **Client**: React + Vite + Tailwind (port ${uiPort || 3000})
- **Server**: Express + Socket.IO (port ${apiPort || 3001})
+- **AI**: portos-ai-toolkit for provider management
- **PM2**: Process management
- **CI/CD**: GitHub Actions
+## API Endpoints
+
+- \`GET /api/health\` - Health check
+- \`GET/POST /api/providers\` - AI provider management
+- \`GET/POST /api/runs\` - AI execution runs
+- \`GET/POST /api/prompts\` - Prompt templates
+
## Scripts
| Command | Description |
diff --git a/server/services/agentGateway.js b/server/services/agentGateway.js
new file mode 100644
index 0000000..8fb251e
--- /dev/null
+++ b/server/services/agentGateway.js
@@ -0,0 +1,327 @@
+/**
+ * Agent Gateway Service
+ *
+ * Centralized communication hub for agent operations.
+ * Provides request deduplication, caching, and coordination.
+ */
+
+import { cosEvents } from './cosEvents.js'
+
+// Request deduplication cache
+const pendingRequests = new Map()
+
+// Response cache with TTL
+const responseCache = new Map()
+const CACHE_TTL_MS = 10 * 60 * 1000 // 10 minutes
+
+// Request history for analytics
+const requestHistory = []
+const MAX_HISTORY = 500
+
+// Gateway statistics
+const stats = {
+ totalRequests: 0,
+ cacheHits: 0,
+ cacheMisses: 0,
+ deduplicatedRequests: 0,
+ errors: 0
+}
+
+/**
+ * Generate cache key for a request
+ * @param {string} type - Request type
+ * @param {Object} params - Request parameters
+ * @returns {string} - Cache key
+ */
+function generateCacheKey(type, params) {
+ const sortedParams = JSON.stringify(params, Object.keys(params).sort())
+ return `${type}:${sortedParams}`
+}
+
+/**
+ * Clean expired cache entries
+ */
+function cleanExpiredCache() {
+ const now = Date.now()
+ for (const [key, entry] of responseCache.entries()) {
+ if (now > entry.expiresAt) {
+ responseCache.delete(key)
+ }
+ }
+}
+
+/**
+ * Route a request through the gateway
+ * Handles deduplication and caching
+ *
+ * @param {string} type - Request type (e.g., 'embedding', 'completion', 'tool')
+ * @param {Object} params - Request parameters
+ * @param {Function} handler - Async function to execute if not cached
+ * @param {Object} options - Gateway options
+ * @returns {Promise<*>} - Request result
+ */
+async function routeRequest(type, params, handler, options = {}) {
+ const {
+ cacheable = true,
+ ttlMs = CACHE_TTL_MS,
+ deduplicateMs = 5000
+ } = options
+
+ stats.totalRequests++
+ const cacheKey = generateCacheKey(type, params)
+ const now = Date.now()
+
+ // Check response cache first
+ if (cacheable) {
+ const cached = responseCache.get(cacheKey)
+ if (cached && now < cached.expiresAt) {
+ stats.cacheHits++
+ return { ...cached.response, fromCache: true }
+ }
+ stats.cacheMisses++
+ }
+
+ // Check for pending duplicate request
+ const pending = pendingRequests.get(cacheKey)
+ if (pending && now - pending.startedAt < deduplicateMs) {
+ stats.deduplicatedRequests++
+ return pending.promise
+ }
+
+ // Execute the request
+ const requestPromise = (async () => {
+ const startTime = Date.now()
+ let result
+ let error = null
+
+ try {
+ result = await handler(params)
+
+ // Cache successful response
+ if (cacheable && result) {
+ responseCache.set(cacheKey, {
+ response: result,
+ expiresAt: now + ttlMs,
+ cachedAt: now
+ })
+ }
+ } catch (err) {
+ error = err
+ stats.errors++
+ throw err
+ } finally {
+ // Record in history
+ requestHistory.unshift({
+ type,
+ cacheKey: cacheKey.substring(0, 50),
+ startedAt: startTime,
+ duration: Date.now() - startTime,
+ success: !error,
+ cached: false
+ })
+
+ // Trim history
+ while (requestHistory.length > MAX_HISTORY) {
+ requestHistory.pop()
+ }
+
+ // Clean up pending request
+ pendingRequests.delete(cacheKey)
+ }
+
+ return result
+ })()
+
+ // Track pending request
+ pendingRequests.set(cacheKey, {
+ promise: requestPromise,
+ startedAt: now
+ })
+
+ return requestPromise
+}
+
+/**
+ * Invalidate cache entries matching a pattern
+ * @param {string} typePrefix - Type prefix to match
+ * @returns {number} - Number of entries invalidated
+ */
+function invalidateCache(typePrefix) {
+ let invalidated = 0
+
+ for (const key of responseCache.keys()) {
+ if (key.startsWith(typePrefix)) {
+ responseCache.delete(key)
+ invalidated++
+ }
+ }
+
+ if (invalidated > 0) {
+ console.log(`๐๏ธ Invalidated ${invalidated} cache entries matching "${typePrefix}"`)
+ }
+
+ return invalidated
+}
+
+/**
+ * Clear entire cache
+ * @returns {number} - Number of entries cleared
+ */
+function clearCache() {
+ const size = responseCache.size
+ responseCache.clear()
+ console.log(`๐๏ธ Cleared ${size} cache entries`)
+ return size
+}
+
+/**
+ * Get gateway statistics
+ * @returns {Object} - Gateway stats
+ */
+function getStats() {
+ cleanExpiredCache()
+
+ return {
+ ...stats,
+ cacheSize: responseCache.size,
+ pendingRequests: pendingRequests.size,
+ cacheHitRate: stats.totalRequests > 0
+ ? ((stats.cacheHits / stats.totalRequests) * 100).toFixed(1) + '%'
+ : '0%',
+ deduplicationRate: stats.totalRequests > 0
+ ? ((stats.deduplicatedRequests / stats.totalRequests) * 100).toFixed(1) + '%'
+ : '0%'
+ }
+}
+
+/**
+ * Get recent request history
+ * @param {Object} options - Filter options
+ * @returns {Array} - Request history
+ */
+function getRequestHistory(options = {}) {
+ let history = [...requestHistory]
+
+ if (options.type) {
+ history = history.filter(r => r.type === options.type)
+ }
+
+ if (options.success !== undefined) {
+ history = history.filter(r => r.success === options.success)
+ }
+
+ const limit = options.limit || 50
+ return history.slice(0, limit)
+}
+
+/**
+ * Pre-warm cache with common requests
+ * @param {Array} requests - Array of {type, params, handler} to pre-warm
+ * @returns {Promise} - Number of requests cached
+ */
+async function prewarmCache(requests) {
+ let cached = 0
+
+ for (const { type, params, handler } of requests) {
+ const cacheKey = generateCacheKey(type, params)
+
+ // Skip if already cached
+ if (responseCache.has(cacheKey)) continue
+
+ try {
+ await routeRequest(type, params, handler, { cacheable: true })
+ cached++
+ } catch (err) {
+ console.error(`โ ๏ธ Cache prewarm failed for ${type}: ${err.message}`)
+ }
+ }
+
+ if (cached > 0) {
+ console.log(`๐ฅ Pre-warmed cache with ${cached} entries`)
+ }
+
+ return cached
+}
+
+/**
+ * Create a gateway-aware request function for a specific type
+ * @param {string} type - Request type
+ * @param {Function} handler - Request handler
+ * @param {Object} defaultOptions - Default gateway options
+ * @returns {Function} - Gateway-wrapped function
+ */
+function createGatewayRequest(type, handler, defaultOptions = {}) {
+ return async function gatewayRequest(params, options = {}) {
+ return routeRequest(type, params, handler, { ...defaultOptions, ...options })
+ }
+}
+
+/**
+ * Batch multiple requests through gateway
+ * @param {Array<{type, params, handler}>} requests - Requests to batch
+ * @param {Object} options - Batch options
+ * @returns {Promise} - Results in same order
+ */
+async function batchRequests(requests, options = {}) {
+ const { parallel = true, stopOnError = false } = options
+
+ if (parallel) {
+ const promises = requests.map(({ type, params, handler, options: reqOptions }) =>
+ routeRequest(type, params, handler, reqOptions).catch(err => {
+ if (stopOnError) throw err
+ return { error: err.message }
+ })
+ )
+ return Promise.all(promises)
+ }
+
+ const results = []
+ for (const { type, params, handler, options: reqOptions } of requests) {
+ try {
+ const result = await routeRequest(type, params, handler, reqOptions)
+ results.push(result)
+ } catch (err) {
+ if (stopOnError) throw err
+ results.push({ error: err.message })
+ }
+ }
+
+ return results
+}
+
+/**
+ * Subscribe to gateway events
+ * @param {string} event - Event name
+ * @param {Function} callback - Event handler
+ */
+function subscribe(event, callback) {
+ cosEvents.on(`gateway:${event}`, callback)
+}
+
+/**
+ * Reset gateway statistics
+ */
+function resetStats() {
+ stats.totalRequests = 0
+ stats.cacheHits = 0
+ stats.cacheMisses = 0
+ stats.deduplicatedRequests = 0
+ stats.errors = 0
+}
+
+// Periodic cache cleanup
+setInterval(cleanExpiredCache, 60000).unref()
+
+export {
+ routeRequest,
+ invalidateCache,
+ clearCache,
+ getStats,
+ getRequestHistory,
+ prewarmCache,
+ createGatewayRequest,
+ batchRequests,
+ subscribe,
+ resetStats,
+ generateCacheKey
+}
diff --git a/server/services/agentRunCache.js b/server/services/agentRunCache.js
new file mode 100644
index 0000000..62cd58d
--- /dev/null
+++ b/server/services/agentRunCache.js
@@ -0,0 +1,390 @@
+/**
+ * Agent Run Cache Service
+ *
+ * Caches agent outputs and tool results with 10-minute TTL.
+ * Provides fast lookups for repeated operations.
+ */
+
+// Cache TTL (10 minutes)
+const DEFAULT_TTL_MS = 10 * 60 * 1000
+
+// Cache storage
+const outputCache = new Map()
+const toolResultCache = new Map()
+const contextCache = new Map()
+
+// Cache statistics
+const stats = {
+ outputHits: 0,
+ outputMisses: 0,
+ toolHits: 0,
+ toolMisses: 0,
+ contextHits: 0,
+ contextMisses: 0,
+ evictions: 0
+}
+
+/**
+ * Create a cache entry with expiration
+ * @param {*} value - Value to cache
+ * @param {number} ttlMs - Time to live in milliseconds
+ * @returns {Object} - Cache entry
+ */
+function createCacheEntry(value, ttlMs = DEFAULT_TTL_MS) {
+ return {
+ value,
+ createdAt: Date.now(),
+ expiresAt: Date.now() + ttlMs,
+ accessCount: 0,
+ lastAccessedAt: null
+ }
+}
+
+/**
+ * Check if entry is expired
+ * @param {Object} entry - Cache entry
+ * @returns {boolean} - True if expired
+ */
+function isExpired(entry) {
+ return Date.now() > entry.expiresAt
+}
+
+/**
+ * Cache agent output
+ * @param {string} agentId - Agent identifier
+ * @param {*} output - Output to cache
+ * @param {Object} options - Cache options
+ */
+function cacheOutput(agentId, output, options = {}) {
+ const ttl = options.ttlMs || DEFAULT_TTL_MS
+ outputCache.set(agentId, createCacheEntry(output, ttl))
+}
+
+/**
+ * Get cached agent output
+ * @param {string} agentId - Agent identifier
+ * @returns {*} - Cached output or null
+ */
+function getOutput(agentId) {
+ const entry = outputCache.get(agentId)
+
+ if (!entry) {
+ stats.outputMisses++
+ return null
+ }
+
+ if (isExpired(entry)) {
+ outputCache.delete(agentId)
+ stats.evictions++
+ stats.outputMisses++
+ return null
+ }
+
+ entry.accessCount++
+ entry.lastAccessedAt = Date.now()
+ stats.outputHits++
+
+ return entry.value
+}
+
+/**
+ * Generate tool cache key
+ * @param {string} toolId - Tool identifier
+ * @param {Object} params - Tool parameters
+ * @returns {string} - Cache key
+ */
+function generateToolKey(toolId, params) {
+ const sortedParams = JSON.stringify(params, Object.keys(params || {}).sort())
+ return `${toolId}:${sortedParams}`
+}
+
+/**
+ * Cache tool result
+ * @param {string} toolId - Tool identifier
+ * @param {Object} params - Tool parameters
+ * @param {*} result - Tool result
+ * @param {Object} options - Cache options
+ */
+function cacheToolResult(toolId, params, result, options = {}) {
+ const key = generateToolKey(toolId, params)
+ const ttl = options.ttlMs || DEFAULT_TTL_MS
+ toolResultCache.set(key, createCacheEntry(result, ttl))
+}
+
+/**
+ * Get cached tool result
+ * @param {string} toolId - Tool identifier
+ * @param {Object} params - Tool parameters
+ * @returns {*} - Cached result or null
+ */
+function getToolResult(toolId, params) {
+ const key = generateToolKey(toolId, params)
+ const entry = toolResultCache.get(key)
+
+ if (!entry) {
+ stats.toolMisses++
+ return null
+ }
+
+ if (isExpired(entry)) {
+ toolResultCache.delete(key)
+ stats.evictions++
+ stats.toolMisses++
+ return null
+ }
+
+ entry.accessCount++
+ entry.lastAccessedAt = Date.now()
+ stats.toolHits++
+
+ return entry.value
+}
+
+/**
+ * Cache context/memory section
+ * @param {string} taskId - Task identifier
+ * @param {string} context - Context string
+ * @param {Object} options - Cache options
+ */
+function cacheContext(taskId, context, options = {}) {
+ const ttl = options.ttlMs || DEFAULT_TTL_MS
+ contextCache.set(taskId, createCacheEntry(context, ttl))
+}
+
+/**
+ * Get cached context
+ * @param {string} taskId - Task identifier
+ * @returns {string|null} - Cached context or null
+ */
+function getContext(taskId) {
+ const entry = contextCache.get(taskId)
+
+ if (!entry) {
+ stats.contextMisses++
+ return null
+ }
+
+ if (isExpired(entry)) {
+ contextCache.delete(taskId)
+ stats.evictions++
+ stats.contextMisses++
+ return null
+ }
+
+ entry.accessCount++
+ entry.lastAccessedAt = Date.now()
+ stats.contextHits++
+
+ return entry.value
+}
+
+/**
+ * Invalidate output cache for an agent
+ * @param {string} agentId - Agent identifier
+ * @returns {boolean} - True if entry was found and removed
+ */
+function invalidateOutput(agentId) {
+ return outputCache.delete(agentId)
+}
+
+/**
+ * Invalidate tool result cache
+ * @param {string} toolId - Tool identifier
+ * @param {Object} params - Tool parameters (optional, if null clears all for tool)
+ * @returns {number} - Number of entries invalidated
+ */
+function invalidateToolResult(toolId, params = null) {
+ if (params !== null) {
+ const key = generateToolKey(toolId, params)
+ return toolResultCache.delete(key) ? 1 : 0
+ }
+
+ // Clear all entries for this tool
+ let count = 0
+ for (const key of toolResultCache.keys()) {
+ if (key.startsWith(`${toolId}:`)) {
+ toolResultCache.delete(key)
+ count++
+ }
+ }
+ return count
+}
+
+/**
+ * Invalidate context cache for a task
+ * @param {string} taskId - Task identifier
+ * @returns {boolean} - True if entry was found and removed
+ */
+function invalidateContext(taskId) {
+ return contextCache.delete(taskId)
+}
+
+/**
+ * Clear all caches
+ * @returns {Object} - Number of entries cleared per cache
+ */
+function clearAll() {
+ const counts = {
+ outputs: outputCache.size,
+ toolResults: toolResultCache.size,
+ contexts: contextCache.size
+ }
+
+ outputCache.clear()
+ toolResultCache.clear()
+ contextCache.clear()
+
+ console.log(`๐๏ธ Cache cleared: ${counts.outputs} outputs, ${counts.toolResults} tool results, ${counts.contexts} contexts`)
+
+ return counts
+}
+
+/**
+ * Clean up expired entries from all caches
+ * @returns {number} - Total entries cleaned
+ */
+function cleanExpired() {
+ let cleaned = 0
+
+ for (const [key, entry] of outputCache.entries()) {
+ if (isExpired(entry)) {
+ outputCache.delete(key)
+ cleaned++
+ }
+ }
+
+ for (const [key, entry] of toolResultCache.entries()) {
+ if (isExpired(entry)) {
+ toolResultCache.delete(key)
+ cleaned++
+ }
+ }
+
+ for (const [key, entry] of contextCache.entries()) {
+ if (isExpired(entry)) {
+ contextCache.delete(key)
+ cleaned++
+ }
+ }
+
+ if (cleaned > 0) {
+ stats.evictions += cleaned
+ }
+
+ return cleaned
+}
+
+/**
+ * Get cache statistics
+ * @returns {Object} - Cache statistics
+ */
+function getStats() {
+ cleanExpired()
+
+ const outputHitRate = (stats.outputHits + stats.outputMisses) > 0
+ ? ((stats.outputHits / (stats.outputHits + stats.outputMisses)) * 100).toFixed(1) + '%'
+ : '0%'
+
+ const toolHitRate = (stats.toolHits + stats.toolMisses) > 0
+ ? ((stats.toolHits / (stats.toolHits + stats.toolMisses)) * 100).toFixed(1) + '%'
+ : '0%'
+
+ const contextHitRate = (stats.contextHits + stats.contextMisses) > 0
+ ? ((stats.contextHits / (stats.contextHits + stats.contextMisses)) * 100).toFixed(1) + '%'
+ : '0%'
+
+ return {
+ outputs: {
+ size: outputCache.size,
+ hits: stats.outputHits,
+ misses: stats.outputMisses,
+ hitRate: outputHitRate
+ },
+ toolResults: {
+ size: toolResultCache.size,
+ hits: stats.toolHits,
+ misses: stats.toolMisses,
+ hitRate: toolHitRate
+ },
+ contexts: {
+ size: contextCache.size,
+ hits: stats.contextHits,
+ misses: stats.contextMisses,
+ hitRate: contextHitRate
+ },
+ totalEvictions: stats.evictions,
+ totalSize: outputCache.size + toolResultCache.size + contextCache.size
+ }
+}
+
+/**
+ * Get or compute output with caching
+ * @param {string} agentId - Agent identifier
+ * @param {Function} computeFn - Function to compute output if not cached
+ * @param {Object} options - Cache options
+ * @returns {Promise<*>} - Output (cached or computed)
+ */
+async function getOrComputeOutput(agentId, computeFn, options = {}) {
+ const cached = getOutput(agentId)
+ if (cached !== null) {
+ return { value: cached, fromCache: true }
+ }
+
+ const computed = await computeFn()
+ cacheOutput(agentId, computed, options)
+ return { value: computed, fromCache: false }
+}
+
+/**
+ * Get or compute tool result with caching
+ * @param {string} toolId - Tool identifier
+ * @param {Object} params - Tool parameters
+ * @param {Function} computeFn - Function to compute result if not cached
+ * @param {Object} options - Cache options
+ * @returns {Promise<*>} - Result (cached or computed)
+ */
+async function getOrComputeToolResult(toolId, params, computeFn, options = {}) {
+ const cached = getToolResult(toolId, params)
+ if (cached !== null) {
+ return { value: cached, fromCache: true }
+ }
+
+ const computed = await computeFn()
+ cacheToolResult(toolId, params, computed, options)
+ return { value: computed, fromCache: false }
+}
+
+/**
+ * Reset cache statistics
+ */
+function resetStats() {
+ stats.outputHits = 0
+ stats.outputMisses = 0
+ stats.toolHits = 0
+ stats.toolMisses = 0
+ stats.contextHits = 0
+ stats.contextMisses = 0
+ stats.evictions = 0
+}
+
+// Periodic cleanup
+setInterval(cleanExpired, 60000).unref()
+
+export {
+ cacheOutput,
+ getOutput,
+ cacheToolResult,
+ getToolResult,
+ cacheContext,
+ getContext,
+ invalidateOutput,
+ invalidateToolResult,
+ invalidateContext,
+ clearAll,
+ cleanExpired,
+ getStats,
+ getOrComputeOutput,
+ getOrComputeToolResult,
+ resetStats,
+ DEFAULT_TTL_MS
+}
diff --git a/server/services/agentRunCache.test.js b/server/services/agentRunCache.test.js
new file mode 100644
index 0000000..d94b904
--- /dev/null
+++ b/server/services/agentRunCache.test.js
@@ -0,0 +1,337 @@
+import { describe, it, expect, beforeEach, vi } from 'vitest';
+import {
+ cacheOutput,
+ getOutput,
+ cacheToolResult,
+ getToolResult,
+ cacheContext,
+ getContext,
+ invalidateOutput,
+ invalidateToolResult,
+ invalidateContext,
+ clearAll,
+ cleanExpired,
+ getStats,
+ getOrComputeOutput,
+ getOrComputeToolResult,
+ resetStats,
+ DEFAULT_TTL_MS
+} from './agentRunCache.js';
+
+describe('Agent Run Cache Service', () => {
+ beforeEach(() => {
+ clearAll();
+ resetStats();
+ });
+
+ describe('DEFAULT_TTL_MS', () => {
+ it('should be 10 minutes', () => {
+ expect(DEFAULT_TTL_MS).toBe(10 * 60 * 1000);
+ });
+ });
+
+ describe('cacheOutput / getOutput', () => {
+ it('should cache and retrieve output', () => {
+ const output = { result: 'test output' };
+ cacheOutput('agent-1', output);
+
+ const cached = getOutput('agent-1');
+ expect(cached).toEqual(output);
+ });
+
+ it('should return null for non-existent key', () => {
+ expect(getOutput('nonexistent')).toBeNull();
+ });
+
+ it('should track hits and misses', () => {
+ cacheOutput('agent-1', 'output');
+ getOutput('agent-1'); // hit
+ getOutput('agent-2'); // miss
+
+ const stats = getStats();
+ expect(stats.outputs.hits).toBe(1);
+ expect(stats.outputs.misses).toBe(1);
+ });
+
+ it('should expire after TTL', async () => {
+ cacheOutput('agent-1', 'output', { ttlMs: 50 });
+
+ expect(getOutput('agent-1')).toBe('output');
+
+ await new Promise(resolve => setTimeout(resolve, 60));
+
+ expect(getOutput('agent-1')).toBeNull();
+ });
+ });
+
+ describe('cacheToolResult / getToolResult', () => {
+ it('should cache and retrieve tool results', () => {
+ const result = { data: [1, 2, 3] };
+ cacheToolResult('tool-1', { input: 'test' }, result);
+
+ const cached = getToolResult('tool-1', { input: 'test' });
+ expect(cached).toEqual(result);
+ });
+
+ it('should differentiate based on params', () => {
+ cacheToolResult('tool-1', { a: 1 }, 'result-a');
+ cacheToolResult('tool-1', { b: 2 }, 'result-b');
+
+ expect(getToolResult('tool-1', { a: 1 })).toBe('result-a');
+ expect(getToolResult('tool-1', { b: 2 })).toBe('result-b');
+ });
+
+ it('should handle same params in different order', () => {
+ cacheToolResult('tool-1', { a: 1, b: 2 }, 'result');
+
+ // Same params but different order
+ const cached = getToolResult('tool-1', { b: 2, a: 1 });
+ expect(cached).toBe('result');
+ });
+
+ it('should return null for non-existent key', () => {
+ expect(getToolResult('tool-1', { unknown: true })).toBeNull();
+ });
+
+ it('should track hits and misses', () => {
+ cacheToolResult('tool-1', { x: 1 }, 'result');
+ getToolResult('tool-1', { x: 1 }); // hit
+ getToolResult('tool-1', { x: 2 }); // miss
+
+ const stats = getStats();
+ expect(stats.toolResults.hits).toBe(1);
+ expect(stats.toolResults.misses).toBe(1);
+ });
+ });
+
+ describe('cacheContext / getContext', () => {
+ it('should cache and retrieve context', () => {
+ cacheContext('task-1', 'Relevant memory context');
+
+ const cached = getContext('task-1');
+ expect(cached).toBe('Relevant memory context');
+ });
+
+ it('should return null for non-existent key', () => {
+ expect(getContext('nonexistent')).toBeNull();
+ });
+
+ it('should track hits and misses', () => {
+ cacheContext('task-1', 'context');
+ getContext('task-1'); // hit
+ getContext('task-2'); // miss
+
+ const stats = getStats();
+ expect(stats.contexts.hits).toBe(1);
+ expect(stats.contexts.misses).toBe(1);
+ });
+ });
+
+ describe('invalidateOutput', () => {
+ it('should remove cached output', () => {
+ cacheOutput('agent-1', 'output');
+ expect(getOutput('agent-1')).toBe('output');
+
+ invalidateOutput('agent-1');
+ expect(getOutput('agent-1')).toBeNull();
+ });
+
+ it('should return true when entry existed', () => {
+ cacheOutput('agent-1', 'output');
+ expect(invalidateOutput('agent-1')).toBe(true);
+ });
+
+ it('should return false when entry did not exist', () => {
+ expect(invalidateOutput('nonexistent')).toBe(false);
+ });
+ });
+
+ describe('invalidateToolResult', () => {
+ it('should remove specific cached tool result', () => {
+ cacheToolResult('tool-1', { a: 1 }, 'result-a');
+ cacheToolResult('tool-1', { b: 2 }, 'result-b');
+
+ invalidateToolResult('tool-1', { a: 1 });
+
+ expect(getToolResult('tool-1', { a: 1 })).toBeNull();
+ expect(getToolResult('tool-1', { b: 2 })).toBe('result-b');
+ });
+
+ it('should remove all results for tool when params is null', () => {
+ cacheToolResult('tool-1', { a: 1 }, 'result-a');
+ cacheToolResult('tool-1', { b: 2 }, 'result-b');
+ cacheToolResult('tool-2', { c: 3 }, 'result-c');
+
+ const count = invalidateToolResult('tool-1', null);
+
+ expect(count).toBe(2);
+ expect(getToolResult('tool-1', { a: 1 })).toBeNull();
+ expect(getToolResult('tool-1', { b: 2 })).toBeNull();
+ expect(getToolResult('tool-2', { c: 3 })).toBe('result-c');
+ });
+
+ it('should return count of entries invalidated', () => {
+ cacheToolResult('tool-1', { a: 1 }, 'result');
+ expect(invalidateToolResult('tool-1', { a: 1 })).toBe(1);
+ expect(invalidateToolResult('tool-1', { unknown: true })).toBe(0);
+ });
+ });
+
+ describe('invalidateContext', () => {
+ it('should remove cached context', () => {
+ cacheContext('task-1', 'context');
+ expect(getContext('task-1')).toBe('context');
+
+ invalidateContext('task-1');
+ expect(getContext('task-1')).toBeNull();
+ });
+ });
+
+ describe('clearAll', () => {
+ it('should clear all caches', () => {
+ cacheOutput('agent-1', 'output');
+ cacheToolResult('tool-1', { x: 1 }, 'result');
+ cacheContext('task-1', 'context');
+
+ const counts = clearAll();
+
+ expect(counts.outputs).toBe(1);
+ expect(counts.toolResults).toBe(1);
+ expect(counts.contexts).toBe(1);
+
+ expect(getOutput('agent-1')).toBeNull();
+ expect(getToolResult('tool-1', { x: 1 })).toBeNull();
+ expect(getContext('task-1')).toBeNull();
+ });
+ });
+
+ describe('cleanExpired', () => {
+ it('should remove expired entries', async () => {
+ cacheOutput('agent-1', 'output', { ttlMs: 50 });
+ cacheOutput('agent-2', 'output', { ttlMs: 5000 });
+
+ await new Promise(resolve => setTimeout(resolve, 60));
+
+ const cleaned = cleanExpired();
+
+ expect(cleaned).toBe(1);
+ expect(getOutput('agent-1')).toBeNull();
+ expect(getOutput('agent-2')).toBe('output');
+ });
+ });
+
+ describe('getStats', () => {
+ it('should return cache statistics', () => {
+ cacheOutput('agent-1', 'output');
+ cacheToolResult('tool-1', {}, 'result');
+ cacheContext('task-1', 'context');
+
+ getOutput('agent-1');
+ getToolResult('tool-1', {});
+ getContext('task-1');
+
+ const stats = getStats();
+
+ expect(stats.outputs.size).toBe(1);
+ expect(stats.outputs.hits).toBe(1);
+ expect(stats.toolResults.size).toBe(1);
+ expect(stats.toolResults.hits).toBe(1);
+ expect(stats.contexts.size).toBe(1);
+ expect(stats.contexts.hits).toBe(1);
+ expect(stats.totalSize).toBe(3);
+ });
+
+ it('should calculate hit rates', () => {
+ cacheOutput('agent-1', 'output');
+ getOutput('agent-1'); // hit
+ getOutput('agent-2'); // miss
+
+ const stats = getStats();
+ expect(stats.outputs.hitRate).toBe('50.0%');
+ });
+
+ it('should handle zero hits/misses', () => {
+ const stats = getStats();
+ expect(stats.outputs.hitRate).toBe('0%');
+ });
+ });
+
+ describe('getOrComputeOutput', () => {
+ it('should return cached value if available', async () => {
+ cacheOutput('agent-1', 'cached-output');
+ const computeFn = vi.fn(() => 'computed-output');
+
+ const result = await getOrComputeOutput('agent-1', computeFn);
+
+ expect(result.value).toBe('cached-output');
+ expect(result.fromCache).toBe(true);
+ expect(computeFn).not.toHaveBeenCalled();
+ });
+
+ it('should compute and cache if not available', async () => {
+ const computeFn = vi.fn(() => 'computed-output');
+
+ const result = await getOrComputeOutput('agent-new', computeFn);
+
+ expect(result.value).toBe('computed-output');
+ expect(result.fromCache).toBe(false);
+ expect(computeFn).toHaveBeenCalledOnce();
+
+ // Verify it was cached
+ expect(getOutput('agent-new')).toBe('computed-output');
+ });
+
+ it('should handle async compute function', async () => {
+ const computeFn = vi.fn(async () => {
+ await new Promise(resolve => setTimeout(resolve, 10));
+ return 'async-result';
+ });
+
+ const result = await getOrComputeOutput('agent-async', computeFn);
+
+ expect(result.value).toBe('async-result');
+ expect(result.fromCache).toBe(false);
+ });
+ });
+
+ describe('getOrComputeToolResult', () => {
+ it('should return cached value if available', async () => {
+ cacheToolResult('tool-1', { x: 1 }, 'cached-result');
+ const computeFn = vi.fn(() => 'computed-result');
+
+ const result = await getOrComputeToolResult('tool-1', { x: 1 }, computeFn);
+
+ expect(result.value).toBe('cached-result');
+ expect(result.fromCache).toBe(true);
+ expect(computeFn).not.toHaveBeenCalled();
+ });
+
+ it('should compute and cache if not available', async () => {
+ const computeFn = vi.fn(() => 'computed-result');
+
+ const result = await getOrComputeToolResult('tool-new', { x: 1 }, computeFn);
+
+ expect(result.value).toBe('computed-result');
+ expect(result.fromCache).toBe(false);
+ expect(computeFn).toHaveBeenCalledOnce();
+
+ // Verify it was cached
+ expect(getToolResult('tool-new', { x: 1 })).toBe('computed-result');
+ });
+ });
+
+ describe('resetStats', () => {
+ it('should reset all statistics', () => {
+ cacheOutput('agent-1', 'output');
+ getOutput('agent-1');
+ getOutput('nonexistent');
+
+ resetStats();
+
+ const stats = getStats();
+ expect(stats.outputs.hits).toBe(0);
+ expect(stats.outputs.misses).toBe(0);
+ expect(stats.totalEvictions).toBe(0);
+ });
+ });
+});
diff --git a/server/services/appActivity.js b/server/services/appActivity.js
index 8ef3bb0..c4266d3 100644
--- a/server/services/appActivity.js
+++ b/server/services/appActivity.js
@@ -5,10 +5,11 @@
* Prevents the CoS from working on the same app in a loop.
*/
-import { readFile, writeFile, mkdir } from 'fs/promises';
+import { writeFile, mkdir } from 'fs/promises';
import { existsSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
+import { readJSONFile } from '../lib/fileUtils.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
@@ -38,12 +39,8 @@ async function ensureDir() {
export async function loadAppActivity() {
await ensureDir();
- if (!existsSync(ACTIVITY_FILE)) {
- return { ...DEFAULT_ACTIVITY };
- }
-
- const content = await readFile(ACTIVITY_FILE, 'utf-8');
- return { ...DEFAULT_ACTIVITY, ...JSON.parse(content) };
+ const loaded = await readJSONFile(ACTIVITY_FILE, null);
+ return loaded ? { ...DEFAULT_ACTIVITY, ...loaded } : { ...DEFAULT_ACTIVITY };
}
/**
diff --git a/server/services/apps.js b/server/services/apps.js
index b40dcd9..d8fa604 100644
--- a/server/services/apps.js
+++ b/server/services/apps.js
@@ -1,13 +1,11 @@
-import { readFile, writeFile, mkdir } from 'fs/promises';
+import { readFile, writeFile } from 'fs/promises';
import { existsSync } from 'fs';
-import { join, dirname } from 'path';
-import { fileURLToPath } from 'url';
+import { join } from 'path';
import { v4 as uuidv4 } from 'uuid';
import EventEmitter from 'events';
+import { ensureDir, PATHS } from '../lib/fileUtils.js';
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = dirname(__filename);
-const DATA_DIR = join(__dirname, '../../data');
+const DATA_DIR = PATHS.data;
const APPS_FILE = join(DATA_DIR, 'apps.json');
// Event emitter for apps changes
@@ -22,9 +20,7 @@ const CACHE_TTL_MS = 2000; // Cache for 2 seconds to reduce file reads during ra
* Ensure data directory exists
*/
async function ensureDataDir() {
- if (!existsSync(DATA_DIR)) {
- await mkdir(DATA_DIR, { recursive: true });
- }
+ await ensureDir(DATA_DIR);
}
/**
diff --git a/server/services/brain.js b/server/services/brain.js
new file mode 100644
index 0000000..140f017
--- /dev/null
+++ b/server/services/brain.js
@@ -0,0 +1,676 @@
+/**
+ * Brain Service
+ *
+ * Core business logic for the Brain feature:
+ * - Capture and classify thoughts
+ * - Route to appropriate databases
+ * - Generate daily digests and weekly reviews
+ * - Handle corrections and fixes
+ */
+
+import { spawn } from 'child_process';
+import * as storage from './brainStorage.js';
+import { getActiveProvider, getProviderById } from './providers.js';
+import { buildPrompt } from './promptService.js';
+import { validate } from '../lib/validation.js';
+import {
+ classifierOutputSchema,
+ digestOutputSchema,
+ reviewOutputSchema,
+ extractedPeopleSchema,
+ extractedProjectSchema,
+ extractedIdeaSchema,
+ extractedAdminSchema
+} from '../lib/brainValidation.js';
+
+// Extracted field validators by destination
+const EXTRACTED_VALIDATORS = {
+ people: extractedPeopleSchema,
+ projects: extractedProjectSchema,
+ ideas: extractedIdeaSchema,
+ admin: extractedAdminSchema
+};
+
+/**
+ * Call AI provider with a prompt
+ */
+async function callAI(promptStageName, variables, providerOverride, modelOverride) {
+ const provider = providerOverride
+ ? await getProviderById(providerOverride)
+ : await getActiveProvider();
+
+ if (!provider || !provider.enabled) {
+ throw new Error('No AI provider available');
+ }
+
+ const prompt = await buildPrompt(promptStageName, variables);
+ const model = modelOverride || provider.defaultModel;
+
+ if (provider.type === 'cli') {
+
+ return new Promise((resolve, reject) => {
+ const args = [...(provider.args || []), prompt];
+ let output = '';
+
+ const child = spawn(provider.command, args, {
+ env: { ...process.env, ...provider.envVars },
+ shell: false
+ });
+
+ child.stdout.on('data', (data) => {
+ output += data.toString();
+ });
+
+ child.stderr.on('data', (data) => {
+ output += data.toString();
+ });
+
+ child.on('close', (code) => {
+ if (code === 0) {
+ resolve(output);
+ } else {
+ reject(new Error(`CLI exited with code ${code}`));
+ }
+ });
+
+ child.on('error', reject);
+
+ setTimeout(() => {
+ child.kill();
+ reject(new Error('AI request timed out'));
+ }, provider.timeout || 300000);
+ });
+ }
+
+ if (provider.type === 'api') {
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) {
+ headers['Authorization'] = `Bearer ${provider.apiKey}`;
+ }
+
+ const response = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.1
+ })
+ });
+
+ if (!response.ok) {
+ const errorText = await response.text();
+ throw new Error(`AI API error: ${response.status} - ${errorText}`);
+ }
+
+ const data = await response.json();
+ return data.choices?.[0]?.message?.content || '';
+ }
+
+ throw new Error(`Unsupported provider type: ${provider.type}`);
+}
+
+/**
+ * Parse JSON from AI response (handles markdown code blocks)
+ */
+function parseJsonResponse(content) {
+ if (!content || typeof content !== 'string') {
+ throw new Error('Empty or invalid AI response');
+ }
+
+ let jsonStr = content.trim();
+
+ // Remove markdown code blocks if present
+ const jsonMatch = jsonStr.match(/```(?:json)?\s*([\s\S]*?)```/);
+ if (jsonMatch) {
+ jsonStr = jsonMatch[1].trim();
+ }
+
+ // Find JSON object
+ const objectMatch = jsonStr.match(/\{[\s\S]*\}/);
+ if (objectMatch) {
+ jsonStr = objectMatch[0];
+ }
+
+ return JSON.parse(jsonStr);
+}
+
+/**
+ * Capture a thought and classify it
+ */
+export async function captureThought(text, providerOverride, modelOverride) {
+ const meta = await storage.loadMeta();
+ const provider = providerOverride || meta.defaultProvider;
+ const model = modelOverride || meta.defaultModel;
+
+ // Create initial inbox log entry
+ const inboxEntry = await storage.createInboxLog({
+ capturedText: text,
+ source: 'brain_ui',
+ ai: {
+ providerId: provider,
+ modelId: model,
+ promptTemplateId: 'brain-classifier'
+ },
+ status: 'needs_review'
+ });
+
+ // Attempt AI classification
+ let classification = null;
+ let aiError = null;
+
+ const aiResponse = await callAI(
+ 'brain-classifier',
+ { capturedText: text, now: new Date().toISOString() },
+ providerOverride,
+ modelOverride
+ ).catch(err => {
+ aiError = err;
+ return null;
+ });
+
+ if (aiResponse) {
+ const parsed = parseJsonResponse(aiResponse);
+ const validationResult = classifierOutputSchema.safeParse(parsed);
+
+ if (validationResult.success) {
+ classification = validationResult.data;
+ } else {
+ console.error(`๐ง Classification validation failed: ${JSON.stringify(validationResult.error.errors)}`);
+ aiError = new Error('Invalid classification output from AI');
+ }
+ }
+
+ // If AI failed, return entry as needs_review
+ if (!classification) {
+ const errorMessage = aiError?.message || 'AI classification failed';
+ await storage.updateInboxLog(inboxEntry.id, {
+ classification: {
+ destination: 'unknown',
+ confidence: 0,
+ title: 'Classification failed',
+ extracted: {},
+ reasons: [errorMessage]
+ },
+ status: 'needs_review',
+ error: { message: errorMessage }
+ });
+
+ console.log(`๐ง Capture queued for review (AI unavailable): ${inboxEntry.id}`);
+ return {
+ inboxLog: await storage.getInboxLogById(inboxEntry.id),
+ message: `Thought captured but AI unavailable. Queued for manual review.`
+ };
+ }
+
+ // Check confidence threshold
+ if (classification.confidence < meta.confidenceThreshold || classification.destination === 'unknown') {
+ await storage.updateInboxLog(inboxEntry.id, {
+ classification,
+ status: 'needs_review'
+ });
+
+ console.log(`๐ง Capture needs review (low confidence ${classification.confidence}): ${inboxEntry.id}`);
+ return {
+ inboxLog: await storage.getInboxLogById(inboxEntry.id),
+ message: `Thought captured but needs review. Confidence: ${Math.round(classification.confidence * 100)}%`
+ };
+ }
+
+ // File to appropriate destination
+ const filedRecord = await fileToDestination(classification.destination, classification.extracted, classification.title);
+
+ await storage.updateInboxLog(inboxEntry.id, {
+ classification,
+ status: 'filed',
+ filed: {
+ destination: classification.destination,
+ destinationId: filedRecord.id
+ }
+ });
+
+ console.log(`๐ง Captured and filed to ${classification.destination}: ${filedRecord.id}`);
+ return {
+ inboxLog: await storage.getInboxLogById(inboxEntry.id),
+ filedRecord,
+ message: `Filed to ${classification.destination}: ${classification.title}`
+ };
+}
+
+/**
+ * File extracted data to destination database
+ */
+async function fileToDestination(destination, extracted, title) {
+ const validator = EXTRACTED_VALIDATORS[destination];
+ if (!validator) {
+ throw new Error(`Unknown destination: ${destination}`);
+ }
+
+ // Validate and set defaults
+ const validationResult = validator.safeParse(extracted);
+ const data = validationResult.success ? validationResult.data : extracted;
+
+ switch (destination) {
+ case 'people':
+ return storage.createPerson({
+ name: data.name || title,
+ context: data.context || '',
+ followUps: data.followUps || [],
+ lastTouched: data.lastTouched || null,
+ tags: data.tags || []
+ });
+
+ case 'projects':
+ return storage.createProject({
+ name: data.name || title,
+ status: data.status || 'active',
+ nextAction: data.nextAction || 'Define next action',
+ notes: data.notes || '',
+ tags: data.tags || []
+ });
+
+ case 'ideas':
+ return storage.createIdea({
+ title: data.title || title,
+ oneLiner: data.oneLiner || title,
+ notes: data.notes || '',
+ tags: data.tags || []
+ });
+
+ case 'admin':
+ return storage.createAdminItem({
+ title: data.title || title,
+ status: data.status || 'open',
+ dueDate: data.dueDate || null,
+ nextAction: data.nextAction || null,
+ notes: data.notes || ''
+ });
+
+ default:
+ throw new Error(`Cannot file to destination: ${destination}`);
+ }
+}
+
+/**
+ * Resolve a needs_review inbox item
+ */
+export async function resolveReview(inboxLogId, destination, editedExtracted) {
+ const inboxLog = await storage.getInboxLogById(inboxLogId);
+ if (!inboxLog) {
+ throw new Error('Inbox log entry not found');
+ }
+
+ if (inboxLog.status !== 'needs_review') {
+ throw new Error('Inbox entry is not in needs_review status');
+ }
+
+ // Merge extracted data with edits
+ const extracted = { ...inboxLog.classification?.extracted, ...editedExtracted };
+ const title = inboxLog.classification?.title || 'Untitled';
+
+ // File to destination
+ const filedRecord = await fileToDestination(destination, extracted, title);
+
+ // Update inbox log
+ await storage.updateInboxLog(inboxLogId, {
+ classification: {
+ ...inboxLog.classification,
+ destination,
+ extracted,
+ confidence: 1.0,
+ reasons: [...(inboxLog.classification?.reasons || []), 'Manually resolved']
+ },
+ status: 'filed',
+ filed: {
+ destination,
+ destinationId: filedRecord.id
+ }
+ });
+
+ console.log(`๐ง Resolved review to ${destination}: ${filedRecord.id}`);
+ return {
+ inboxLog: await storage.getInboxLogById(inboxLogId),
+ filedRecord
+ };
+}
+
+/**
+ * Fix/correct a filed inbox item
+ */
+export async function fixClassification(inboxLogId, newDestination, updatedFields, note) {
+ const inboxLog = await storage.getInboxLogById(inboxLogId);
+ if (!inboxLog) {
+ throw new Error('Inbox log entry not found');
+ }
+
+ if (inboxLog.status !== 'filed' && inboxLog.status !== 'corrected') {
+ throw new Error('Can only fix filed or previously corrected entries');
+ }
+
+ const previousDestination = inboxLog.filed?.destination || inboxLog.classification?.destination;
+ const previousId = inboxLog.filed?.destinationId;
+
+ // Create new record in new destination
+ const extracted = { ...inboxLog.classification?.extracted, ...updatedFields };
+ const title = inboxLog.classification?.title || 'Untitled';
+ const newRecord = await fileToDestination(newDestination, extracted, title);
+
+ // Mark old record as archived (soft delete by adding archived flag)
+ if (previousId && previousDestination) {
+ await archiveRecord(previousDestination, previousId);
+ }
+
+ // Update inbox log with correction info
+ await storage.updateInboxLog(inboxLogId, {
+ status: 'corrected',
+ filed: {
+ destination: newDestination,
+ destinationId: newRecord.id
+ },
+ correction: {
+ correctedAt: new Date().toISOString(),
+ previousDestination: previousDestination || 'unknown',
+ newDestination,
+ note
+ }
+ });
+
+ console.log(`๐ง Fixed classification from ${previousDestination} to ${newDestination}`);
+ return {
+ inboxLog: await storage.getInboxLogById(inboxLogId),
+ newRecord
+ };
+}
+
+/**
+ * Archive a record (soft delete)
+ */
+async function archiveRecord(destination, id) {
+ const updateFn = {
+ people: storage.updatePerson,
+ projects: storage.updateProject,
+ ideas: storage.updateIdea,
+ admin: storage.updateAdminItem
+ }[destination];
+
+ if (updateFn) {
+ await updateFn(id, { archived: true });
+ }
+}
+
+/**
+ * Run daily digest
+ */
+export async function runDailyDigest(providerOverride, modelOverride) {
+ const meta = await storage.loadMeta();
+
+ // Gather data for digest
+ const [activeProjects, openAdmin, allPeople, needsReviewLogs] = await Promise.all([
+ storage.getProjects({ status: 'active' }),
+ storage.getAdminItems({ status: 'open' }),
+ storage.getPeople(),
+ storage.getInboxLog({ status: 'needs_review' })
+ ]);
+
+ // Filter people with follow-ups
+ const peopleWithFollowUps = allPeople.filter(p => p.followUps && p.followUps.length > 0);
+
+ const aiResponse = await callAI(
+ 'brain-daily-digest',
+ {
+ activeProjects: JSON.stringify(activeProjects),
+ openAdmin: JSON.stringify(openAdmin),
+ peopleFollowUps: JSON.stringify(peopleWithFollowUps),
+ needsReview: JSON.stringify(needsReviewLogs),
+ now: new Date().toISOString()
+ },
+ providerOverride || meta.defaultProvider,
+ modelOverride || meta.defaultModel
+ );
+
+ const parsed = parseJsonResponse(aiResponse);
+ const validationResult = digestOutputSchema.safeParse(parsed);
+
+ if (!validationResult.success) {
+ throw new Error(`Invalid digest output: ${JSON.stringify(validationResult.error.errors)}`);
+ }
+
+ const digestData = validationResult.data;
+
+ // Enforce word limit
+ const wordCount = digestData.digestText.split(/\s+/).length;
+ if (wordCount > 150) {
+ digestData.digestText = digestData.digestText.split(/\s+/).slice(0, 150).join(' ') + '...';
+ }
+
+ // Store digest
+ const digest = await storage.createDigest({
+ ...digestData,
+ ai: {
+ providerId: providerOverride || meta.defaultProvider,
+ modelId: modelOverride || meta.defaultModel,
+ promptTemplateId: 'brain-daily-digest'
+ }
+ });
+
+ console.log(`๐ง Generated daily digest: ${digest.id}`);
+ return digest;
+}
+
+/**
+ * Run weekly review
+ */
+export async function runWeeklyReview(providerOverride, modelOverride) {
+ const meta = await storage.loadMeta();
+
+ // Get inbox log from last 7 days
+ const sevenDaysAgo = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString();
+ const allInboxLogs = await storage.getInboxLog({ limit: 500 });
+ const recentInboxLogs = allInboxLogs.filter(log => log.capturedAt >= sevenDaysAgo);
+
+ // Get active projects
+ const activeProjects = await storage.getProjects({ status: 'active' });
+
+ const aiResponse = await callAI(
+ 'brain-weekly-review',
+ {
+ inboxLogLast7Days: JSON.stringify(recentInboxLogs),
+ activeProjects: JSON.stringify(activeProjects),
+ now: new Date().toISOString()
+ },
+ providerOverride || meta.defaultProvider,
+ modelOverride || meta.defaultModel
+ );
+
+ const parsed = parseJsonResponse(aiResponse);
+ const validationResult = reviewOutputSchema.safeParse(parsed);
+
+ if (!validationResult.success) {
+ throw new Error(`Invalid review output: ${JSON.stringify(validationResult.error.errors)}`);
+ }
+
+ const reviewData = validationResult.data;
+
+ // Enforce word limit
+ const wordCount = reviewData.reviewText.split(/\s+/).length;
+ if (wordCount > 250) {
+ reviewData.reviewText = reviewData.reviewText.split(/\s+/).slice(0, 250).join(' ') + '...';
+ }
+
+ // Store review
+ const review = await storage.createReview({
+ ...reviewData,
+ ai: {
+ providerId: providerOverride || meta.defaultProvider,
+ modelId: modelOverride || meta.defaultModel,
+ promptTemplateId: 'brain-weekly-review'
+ }
+ });
+
+ console.log(`๐ง Generated weekly review: ${review.id}`);
+ return review;
+}
+
+/**
+ * Retry classification for a needs_review item
+ */
+export async function retryClassification(inboxLogId, providerOverride, modelOverride) {
+ const inboxLog = await storage.getInboxLogById(inboxLogId);
+ if (!inboxLog) {
+ throw new Error('Inbox log entry not found');
+ }
+
+ const meta = await storage.loadMeta();
+ const provider = providerOverride || meta.defaultProvider;
+ const model = modelOverride || meta.defaultModel;
+
+ // Update AI config for this retry
+ await storage.updateInboxLog(inboxLogId, {
+ ai: {
+ providerId: provider,
+ modelId: model,
+ promptTemplateId: 'brain-classifier'
+ },
+ status: 'needs_review',
+ error: null
+ });
+
+ // Attempt AI classification
+ let classification = null;
+ let aiError = null;
+
+ const aiResponse = await callAI(
+ 'brain-classifier',
+ { capturedText: inboxLog.capturedText, now: new Date().toISOString() },
+ provider,
+ model
+ ).catch(err => {
+ aiError = err;
+ return null;
+ });
+
+ if (aiResponse) {
+ const parsed = parseJsonResponse(aiResponse);
+ const validationResult = classifierOutputSchema.safeParse(parsed);
+
+ if (validationResult.success) {
+ classification = validationResult.data;
+ } else {
+ console.error(`๐ง Classification validation failed: ${JSON.stringify(validationResult.error.errors)}`);
+ aiError = new Error('Invalid classification output from AI');
+ }
+ }
+
+ // If AI failed, update entry with error
+ if (!classification) {
+ const errorMessage = aiError?.message || 'AI classification failed';
+ await storage.updateInboxLog(inboxLogId, {
+ classification: {
+ destination: 'unknown',
+ confidence: 0,
+ title: 'Classification failed',
+ extracted: {},
+ reasons: [errorMessage]
+ },
+ status: 'needs_review',
+ error: { message: errorMessage }
+ });
+
+ console.log(`๐ง Retry failed for ${inboxLogId}: ${errorMessage}`);
+ return {
+ inboxLog: await storage.getInboxLogById(inboxLogId),
+ message: 'Classification failed, needs manual review'
+ };
+ }
+
+ // If confidence too low, update but keep as needs_review
+ if (classification.confidence < meta.confidenceThreshold) {
+ await storage.updateInboxLog(inboxLogId, {
+ classification,
+ status: 'needs_review'
+ });
+
+ console.log(`๐ง Retry low confidence for ${inboxLogId}: ${classification.confidence}`);
+ return {
+ inboxLog: await storage.getInboxLogById(inboxLogId),
+ message: `Low confidence (${(classification.confidence * 100).toFixed(0)}%), needs manual review`
+ };
+ }
+
+ // Classification succeeded with high confidence - auto-file
+ const filedRecord = await fileToDestination(classification.destination, classification.extracted, classification.title);
+
+ await storage.updateInboxLog(inboxLogId, {
+ classification,
+ status: 'filed',
+ filed: {
+ destination: classification.destination,
+ recordId: filedRecord.id,
+ filedAt: new Date().toISOString()
+ }
+ });
+
+ console.log(`๐ง Retry successful for ${inboxLogId} -> ${classification.destination}`);
+ return {
+ inboxLog: await storage.getInboxLogById(inboxLogId),
+ message: `Successfully classified as ${classification.destination}`
+ };
+}
+
+/**
+ * Update inbox entry (edit captured text)
+ */
+export async function updateInboxEntry(inboxLogId, updates) {
+ const updated = await storage.updateInboxLog(inboxLogId, updates);
+ if (!updated) {
+ return null;
+ }
+
+ console.log(`๐ง Updated inbox entry text: ${inboxLogId}`);
+ return updated;
+}
+
+/**
+ * Delete inbox entry
+ */
+export async function deleteInboxEntry(inboxLogId) {
+ const deleted = await storage.deleteInboxLog(inboxLogId);
+ if (!deleted) {
+ return false;
+ }
+
+ console.log(`๐ง Deleted inbox entry: ${inboxLogId}`);
+ return true;
+}
+
+// Re-export storage functions for convenience
+export const loadMeta = storage.loadMeta;
+export const updateMeta = storage.updateMeta;
+export const getSummary = storage.getSummary;
+export const getInboxLog = storage.getInboxLog;
+export const getInboxLogById = storage.getInboxLogById;
+export const getInboxLogCounts = storage.getInboxLogCounts;
+export const getPeople = storage.getPeople;
+export const getPersonById = storage.getPersonById;
+export const createPerson = storage.createPerson;
+export const updatePerson = storage.updatePerson;
+export const deletePerson = storage.deletePerson;
+export const getProjects = storage.getProjects;
+export const getProjectById = storage.getProjectById;
+export const createProject = storage.createProject;
+export const updateProject = storage.updateProject;
+export const deleteProject = storage.deleteProject;
+export const getIdeas = storage.getIdeas;
+export const getIdeaById = storage.getIdeaById;
+export const createIdea = storage.createIdea;
+export const updateIdea = storage.updateIdea;
+export const deleteIdea = storage.deleteIdea;
+export const getAdminItems = storage.getAdminItems;
+export const getAdminById = storage.getAdminById;
+export const createAdminItem = storage.createAdminItem;
+export const updateAdminItem = storage.updateAdminItem;
+export const deleteAdminItem = storage.deleteAdminItem;
+export const getDigests = storage.getDigests;
+export const getLatestDigest = storage.getLatestDigest;
+export const getReviews = storage.getReviews;
+export const getLatestReview = storage.getLatestReview;
diff --git a/server/services/brainScheduler.js b/server/services/brainScheduler.js
new file mode 100644
index 0000000..a4915b8
--- /dev/null
+++ b/server/services/brainScheduler.js
@@ -0,0 +1,203 @@
+/**
+ * Brain Scheduler Service
+ *
+ * Manages scheduled jobs for the Brain feature:
+ * - Daily digest generation (default 9:00 AM)
+ * - Weekly review generation (default Sunday 4:00 PM)
+ *
+ * Handles catch-up logic for missed runs (max 1 per type)
+ */
+
+import * as storage from './brainStorage.js';
+import { runDailyDigest, runWeeklyReview } from './brain.js';
+
+let schedulerInterval = null;
+let lastCheckTime = null;
+const CHECK_INTERVAL_MS = 60000; // Check every minute
+
+// Day name to number mapping
+const DAY_MAP = {
+ sunday: 0,
+ monday: 1,
+ tuesday: 2,
+ wednesday: 3,
+ thursday: 4,
+ friday: 5,
+ saturday: 6
+};
+
+/**
+ * Parse time string (HH:MM) to hours and minutes
+ */
+function parseTime(timeStr) {
+ const [hours, minutes] = timeStr.split(':').map(Number);
+ return { hours, minutes };
+}
+
+/**
+ * Check if it's time for daily digest
+ */
+function isDailyDigestTime(settings, now) {
+ const { hours, minutes } = parseTime(settings.dailyDigestTime);
+ return now.getHours() === hours && now.getMinutes() === minutes;
+}
+
+/**
+ * Check if it's time for weekly review
+ */
+function isWeeklyReviewTime(settings, now) {
+ const { hours, minutes } = parseTime(settings.weeklyReviewTime);
+ const targetDay = DAY_MAP[settings.weeklyReviewDay];
+ return now.getDay() === targetDay &&
+ now.getHours() === hours &&
+ now.getMinutes() === minutes;
+}
+
+/**
+ * Check if daily digest was missed (should have run today but didn't)
+ */
+function isDailyDigestMissed(settings, now) {
+ if (!settings.lastDailyDigest) return false;
+
+ const lastRun = new Date(settings.lastDailyDigest);
+ const { hours, minutes } = parseTime(settings.dailyDigestTime);
+
+ // Create target time for today
+ const todayTarget = new Date(now);
+ todayTarget.setHours(hours, minutes, 0, 0);
+
+ // If current time is past today's target and last run was before today
+ if (now > todayTarget) {
+ const lastRunDate = lastRun.toDateString();
+ const todayDate = now.toDateString();
+ return lastRunDate !== todayDate;
+ }
+
+ return false;
+}
+
+/**
+ * Check if weekly review was missed
+ */
+function isWeeklyReviewMissed(settings, now) {
+ if (!settings.lastWeeklyReview) return false;
+
+ const lastRun = new Date(settings.lastWeeklyReview);
+ const daysSinceLastRun = Math.floor((now - lastRun) / (24 * 60 * 60 * 1000));
+
+ // If more than 7 days since last run, we missed one
+ return daysSinceLastRun > 7;
+}
+
+/**
+ * Run the scheduler check
+ */
+async function checkSchedule() {
+ const now = new Date();
+ const settings = await storage.loadMeta();
+
+ // Avoid running multiple times in the same minute
+ const currentMinute = `${now.getFullYear()}-${now.getMonth()}-${now.getDate()}-${now.getHours()}-${now.getMinutes()}`;
+ if (lastCheckTime === currentMinute) return;
+ lastCheckTime = currentMinute;
+
+ // Check for daily digest
+ if (isDailyDigestTime(settings, now)) {
+ console.log('๐ง Scheduler: Running daily digest...');
+ runDailyDigest().catch(err => {
+ console.error(`๐ง Scheduler: Daily digest failed: ${err.message}`);
+ });
+ } else if (isDailyDigestMissed(settings, now)) {
+ console.log('๐ง Scheduler: Running missed daily digest (catch-up)...');
+ runDailyDigest().catch(err => {
+ console.error(`๐ง Scheduler: Catch-up daily digest failed: ${err.message}`);
+ });
+ }
+
+ // Check for weekly review
+ if (isWeeklyReviewTime(settings, now)) {
+ console.log('๐ง Scheduler: Running weekly review...');
+ runWeeklyReview().catch(err => {
+ console.error(`๐ง Scheduler: Weekly review failed: ${err.message}`);
+ });
+ } else if (isWeeklyReviewMissed(settings, now)) {
+ console.log('๐ง Scheduler: Running missed weekly review (catch-up)...');
+ runWeeklyReview().catch(err => {
+ console.error(`๐ง Scheduler: Catch-up weekly review failed: ${err.message}`);
+ });
+ }
+}
+
+/**
+ * Start the Brain scheduler
+ */
+export function startBrainScheduler() {
+ if (schedulerInterval) {
+ console.log('๐ง Scheduler: Already running');
+ return;
+ }
+
+ console.log('๐ง Scheduler: Starting Brain scheduler...');
+
+ // Run initial check
+ checkSchedule().catch(err => {
+ console.error(`๐ง Scheduler: Initial check failed: ${err.message}`);
+ });
+
+ // Set up interval
+ schedulerInterval = setInterval(() => {
+ checkSchedule().catch(err => {
+ console.error(`๐ง Scheduler: Check failed: ${err.message}`);
+ });
+ }, CHECK_INTERVAL_MS);
+
+ console.log('๐ง Scheduler: Brain scheduler started');
+}
+
+/**
+ * Stop the Brain scheduler
+ */
+export function stopBrainScheduler() {
+ if (schedulerInterval) {
+ clearInterval(schedulerInterval);
+ schedulerInterval = null;
+ console.log('๐ง Scheduler: Brain scheduler stopped');
+ }
+}
+
+/**
+ * Get scheduler status
+ */
+export async function getSchedulerStatus() {
+ const settings = await storage.loadMeta();
+
+ return {
+ running: schedulerInterval !== null,
+ checkIntervalMs: CHECK_INTERVAL_MS,
+ dailyDigest: {
+ scheduledTime: settings.dailyDigestTime,
+ lastRun: settings.lastDailyDigest
+ },
+ weeklyReview: {
+ scheduledDay: settings.weeklyReviewDay,
+ scheduledTime: settings.weeklyReviewTime,
+ lastRun: settings.lastWeeklyReview
+ }
+ };
+}
+
+/**
+ * Manually trigger the next scheduled digest (for testing)
+ */
+export async function triggerNextDigest() {
+ console.log('๐ง Scheduler: Manually triggering daily digest...');
+ return runDailyDigest();
+}
+
+/**
+ * Manually trigger the next scheduled review (for testing)
+ */
+export async function triggerNextReview() {
+ console.log('๐ง Scheduler: Manually triggering weekly review...');
+ return runWeeklyReview();
+}
diff --git a/server/services/brainStorage.js b/server/services/brainStorage.js
new file mode 100644
index 0000000..e6f0113
--- /dev/null
+++ b/server/services/brainStorage.js
@@ -0,0 +1,571 @@
+/**
+ * Brain Storage Service
+ *
+ * Handles file-based persistence for the Brain feature.
+ * - JSON for entity stores (people, projects, ideas, admin)
+ * - JSONL for append-heavy logs (inbox_log, digests, reviews)
+ * - In-memory caching with TTL for performance
+ */
+
+import { readFile, writeFile, appendFile, mkdir } from 'fs/promises';
+import { existsSync } from 'fs';
+import { join, dirname } from 'path';
+import { fileURLToPath } from 'url';
+import { v4 as uuidv4 } from 'uuid';
+import EventEmitter from 'events';
+import { readJSONFile, safeJSONParse } from '../lib/fileUtils.js';
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = dirname(__filename);
+const DATA_DIR = join(__dirname, '../../data/brain');
+
+// File paths
+const FILES = {
+ meta: join(DATA_DIR, 'meta.json'),
+ inboxLog: join(DATA_DIR, 'inbox_log.jsonl'),
+ people: join(DATA_DIR, 'people.json'),
+ projects: join(DATA_DIR, 'projects.json'),
+ ideas: join(DATA_DIR, 'ideas.json'),
+ admin: join(DATA_DIR, 'admin.json'),
+ digests: join(DATA_DIR, 'digests.jsonl'),
+ reviews: join(DATA_DIR, 'reviews.jsonl')
+};
+
+// Event emitter for brain data changes
+export const brainEvents = new EventEmitter();
+
+// In-memory caches
+const caches = {
+ meta: { data: null, timestamp: 0 },
+ people: { data: null, timestamp: 0 },
+ projects: { data: null, timestamp: 0 },
+ ideas: { data: null, timestamp: 0 },
+ admin: { data: null, timestamp: 0 },
+ inboxLog: { data: null, timestamp: 0 },
+ digests: { data: null, timestamp: 0 },
+ reviews: { data: null, timestamp: 0 }
+};
+
+const CACHE_TTL_MS = 2000;
+
+// Default settings
+const DEFAULT_META = {
+ version: 1,
+ confidenceThreshold: 0.6,
+ dailyDigestTime: '09:00',
+ weeklyReviewTime: '16:00',
+ weeklyReviewDay: 'sunday',
+ defaultProvider: 'lmstudio',
+ defaultModel: 'gptoss-20b',
+ lastDailyDigest: null,
+ lastWeeklyReview: null
+};
+
+/**
+ * Ensure brain data directory exists
+ */
+export async function ensureBrainDir() {
+ if (!existsSync(DATA_DIR)) {
+ await mkdir(DATA_DIR, { recursive: true });
+ console.log(`๐ง Created brain data directory: ${DATA_DIR}`);
+ }
+}
+
+/**
+ * Generate a new UUID
+ */
+export function generateId() {
+ return uuidv4();
+}
+
+/**
+ * Get current ISO timestamp
+ */
+export function now() {
+ return new Date().toISOString();
+}
+
+// =============================================================================
+// META / SETTINGS
+// =============================================================================
+
+/**
+ * Load brain settings
+ */
+export async function loadMeta() {
+ const cache = caches.meta;
+ if (cache.data && (Date.now() - cache.timestamp) < CACHE_TTL_MS) {
+ return cache.data;
+ }
+
+ await ensureBrainDir();
+
+ const loaded = await readJSONFile(FILES.meta, null);
+ cache.data = loaded ? { ...DEFAULT_META, ...loaded } : { ...DEFAULT_META };
+ cache.timestamp = Date.now();
+ return cache.data;
+}
+
+/**
+ * Save brain settings
+ */
+export async function saveMeta(meta) {
+ await ensureBrainDir();
+ await writeFile(FILES.meta, JSON.stringify(meta, null, 2));
+ caches.meta.data = meta;
+ caches.meta.timestamp = Date.now();
+ brainEvents.emit('meta:changed', meta);
+}
+
+/**
+ * Update brain settings (partial update)
+ */
+export async function updateMeta(updates) {
+ const meta = await loadMeta();
+ const updated = { ...meta, ...updates };
+ await saveMeta(updated);
+ return updated;
+}
+
+// =============================================================================
+// JSON ENTITY STORES (people, projects, ideas, admin)
+// =============================================================================
+
+/**
+ * Load a JSON entity store
+ */
+async function loadJsonStore(type) {
+ const cache = caches[type];
+ if (cache.data && (Date.now() - cache.timestamp) < CACHE_TTL_MS) {
+ return cache.data;
+ }
+
+ await ensureBrainDir();
+ const filePath = FILES[type];
+
+ cache.data = await readJSONFile(filePath, { records: {} });
+ cache.timestamp = Date.now();
+ return cache.data;
+}
+
+/**
+ * Save a JSON entity store
+ */
+async function saveJsonStore(type, data) {
+ await ensureBrainDir();
+ await writeFile(FILES[type], JSON.stringify(data, null, 2));
+ caches[type].data = data;
+ caches[type].timestamp = Date.now();
+ brainEvents.emit(`${type}:changed`, data);
+}
+
+/**
+ * Get all records from a JSON store
+ */
+export async function getAll(type) {
+ const data = await loadJsonStore(type);
+ return Object.entries(data.records).map(([id, record]) => ({ id, ...record }));
+}
+
+/**
+ * Get a record by ID
+ */
+export async function getById(type, id) {
+ const data = await loadJsonStore(type);
+ const record = data.records[id];
+ return record ? { id, ...record } : null;
+}
+
+/**
+ * Create a new record
+ */
+export async function create(type, recordData) {
+ const data = await loadJsonStore(type);
+ const id = generateId();
+ const timestamp = now();
+
+ const record = {
+ ...recordData,
+ createdAt: timestamp,
+ updatedAt: timestamp
+ };
+
+ data.records[id] = record;
+ await saveJsonStore(type, data);
+
+ console.log(`๐ง Created ${type} record: ${id}`);
+ return { id, ...record };
+}
+
+/**
+ * Update a record
+ */
+export async function update(type, id, updates) {
+ const data = await loadJsonStore(type);
+
+ if (!data.records[id]) {
+ return null;
+ }
+
+ const record = {
+ ...data.records[id],
+ ...updates,
+ createdAt: data.records[id].createdAt,
+ updatedAt: now()
+ };
+
+ data.records[id] = record;
+ await saveJsonStore(type, data);
+
+ console.log(`๐ง Updated ${type} record: ${id}`);
+ return { id, ...record };
+}
+
+/**
+ * Delete a record
+ */
+export async function remove(type, id) {
+ const data = await loadJsonStore(type);
+
+ if (!data.records[id]) {
+ return false;
+ }
+
+ delete data.records[id];
+ await saveJsonStore(type, data);
+
+ console.log(`๐ง Deleted ${type} record: ${id}`);
+ return true;
+}
+
+/**
+ * Query records with filters
+ */
+export async function query(type, filters = {}) {
+ const records = await getAll(type);
+
+ return records.filter(record => {
+ for (const [key, value] of Object.entries(filters)) {
+ if (record[key] !== value) return false;
+ }
+ return true;
+ });
+}
+
+// =============================================================================
+// JSONL APPEND LOGS (inbox_log, digests, reviews)
+// =============================================================================
+
+/**
+ * Load all records from a JSONL file
+ */
+async function loadJsonlStore(type) {
+ const cache = caches[type];
+ if (cache.data && (Date.now() - cache.timestamp) < CACHE_TTL_MS) {
+ return cache.data;
+ }
+
+ await ensureBrainDir();
+ const filePath = FILES[type];
+
+ if (!existsSync(filePath)) {
+ cache.data = [];
+ cache.timestamp = Date.now();
+ return cache.data;
+ }
+
+ const content = await readFile(filePath, 'utf-8');
+ const lines = content.trim().split('\n').filter(line => line.trim());
+ cache.data = lines.map(line => safeJSONParse(line, null)).filter(item => item !== null);
+ cache.timestamp = Date.now();
+ return cache.data;
+}
+
+/**
+ * Append a record to a JSONL file
+ */
+async function appendJsonl(type, record) {
+ await ensureBrainDir();
+ const line = JSON.stringify(record) + '\n';
+ await appendFile(FILES[type], line);
+
+ // Invalidate cache so next read gets fresh data
+ caches[type].data = null;
+ caches[type].timestamp = 0;
+
+ brainEvents.emit(`${type}:added`, record);
+}
+
+/**
+ * Rewrite entire JSONL file (for updates/deletes)
+ */
+async function rewriteJsonl(type, records) {
+ await ensureBrainDir();
+ const content = records.map(r => JSON.stringify(r)).join('\n') + (records.length > 0 ? '\n' : '');
+ await writeFile(FILES[type], content);
+
+ caches[type].data = records;
+ caches[type].timestamp = Date.now();
+
+ brainEvents.emit(`${type}:changed`, records);
+}
+
+// =============================================================================
+// INBOX LOG OPERATIONS
+// =============================================================================
+
+/**
+ * Get all inbox log entries
+ */
+export async function getInboxLog(options = {}) {
+ const { status, limit = 50, offset = 0 } = options;
+ let records = await loadJsonlStore('inboxLog');
+
+ // Sort by capturedAt descending (newest first)
+ records = records.sort((a, b) => new Date(b.capturedAt) - new Date(a.capturedAt));
+
+ // Filter by status if provided
+ if (status) {
+ records = records.filter(r => r.status === status);
+ }
+
+ // Apply pagination
+ return records.slice(offset, offset + limit);
+}
+
+/**
+ * Get inbox log entry by ID
+ */
+export async function getInboxLogById(id) {
+ const records = await loadJsonlStore('inboxLog');
+ return records.find(r => r.id === id) || null;
+}
+
+/**
+ * Create inbox log entry
+ */
+export async function createInboxLog(entry) {
+ const record = {
+ id: generateId(),
+ ...entry,
+ capturedAt: entry.capturedAt || now()
+ };
+
+ await appendJsonl('inboxLog', record);
+ console.log(`๐ง Created inbox log: ${record.id}`);
+ return record;
+}
+
+/**
+ * Update inbox log entry
+ */
+export async function updateInboxLog(id, updates) {
+ const records = await loadJsonlStore('inboxLog');
+ const index = records.findIndex(r => r.id === id);
+
+ if (index === -1) {
+ return null;
+ }
+
+ records[index] = { ...records[index], ...updates };
+ await rewriteJsonl('inboxLog', records);
+
+ console.log(`๐ง Updated inbox log: ${id}`);
+ return records[index];
+}
+
+/**
+ * Delete inbox log entry
+ */
+export async function deleteInboxLog(id) {
+ const records = await loadJsonlStore('inboxLog');
+ const index = records.findIndex(r => r.id === id);
+
+ if (index === -1) {
+ return false;
+ }
+
+ records.splice(index, 1);
+ await rewriteJsonl('inboxLog', records);
+
+ console.log(`๐ง Deleted inbox log: ${id}`);
+ return true;
+}
+
+/**
+ * Get inbox log count by status
+ */
+export async function getInboxLogCounts() {
+ const records = await loadJsonlStore('inboxLog');
+
+ const counts = {
+ total: records.length,
+ filed: 0,
+ needs_review: 0,
+ corrected: 0,
+ error: 0
+ };
+
+ for (const record of records) {
+ if (counts[record.status] !== undefined) {
+ counts[record.status]++;
+ }
+ }
+
+ return counts;
+}
+
+// =============================================================================
+// DIGEST OPERATIONS
+// =============================================================================
+
+/**
+ * Get all digests
+ */
+export async function getDigests(limit = 10) {
+ let records = await loadJsonlStore('digests');
+ records = records.sort((a, b) => new Date(b.generatedAt) - new Date(a.generatedAt));
+ return records.slice(0, limit);
+}
+
+/**
+ * Get latest digest
+ */
+export async function getLatestDigest() {
+ const digests = await getDigests(1);
+ return digests[0] || null;
+}
+
+/**
+ * Create digest entry
+ */
+export async function createDigest(digest) {
+ const record = {
+ id: generateId(),
+ ...digest,
+ generatedAt: now()
+ };
+
+ await appendJsonl('digests', record);
+
+ // Update meta with last digest time
+ await updateMeta({ lastDailyDigest: record.generatedAt });
+
+ console.log(`๐ง Created daily digest: ${record.id}`);
+ return record;
+}
+
+// =============================================================================
+// REVIEW OPERATIONS
+// =============================================================================
+
+/**
+ * Get all reviews
+ */
+export async function getReviews(limit = 10) {
+ let records = await loadJsonlStore('reviews');
+ records = records.sort((a, b) => new Date(b.generatedAt) - new Date(a.generatedAt));
+ return records.slice(0, limit);
+}
+
+/**
+ * Get latest review
+ */
+export async function getLatestReview() {
+ const reviews = await getReviews(1);
+ return reviews[0] || null;
+}
+
+/**
+ * Create review entry
+ */
+export async function createReview(review) {
+ const record = {
+ id: generateId(),
+ ...review,
+ generatedAt: now()
+ };
+
+ await appendJsonl('reviews', record);
+
+ // Update meta with last review time
+ await updateMeta({ lastWeeklyReview: record.generatedAt });
+
+ console.log(`๐ง Created weekly review: ${record.id}`);
+ return record;
+}
+
+// =============================================================================
+// CONVENIENCE EXPORTS FOR ENTITY TYPES
+// =============================================================================
+
+// People
+export const getPeople = (filters) => filters ? query('people', filters) : getAll('people');
+export const getPersonById = (id) => getById('people', id);
+export const createPerson = (data) => create('people', data);
+export const updatePerson = (id, data) => update('people', id, data);
+export const deletePerson = (id) => remove('people', id);
+
+// Projects
+export const getProjects = (filters) => filters ? query('projects', filters) : getAll('projects');
+export const getProjectById = (id) => getById('projects', id);
+export const createProject = (data) => create('projects', data);
+export const updateProject = (id, data) => update('projects', id, data);
+export const deleteProject = (id) => remove('projects', id);
+
+// Ideas
+export const getIdeas = (filters) => filters ? query('ideas', filters) : getAll('ideas');
+export const getIdeaById = (id) => getById('ideas', id);
+export const createIdea = (data) => create('ideas', data);
+export const updateIdea = (id, data) => update('ideas', id, data);
+export const deleteIdea = (id) => remove('ideas', id);
+
+// Admin
+export const getAdminItems = (filters) => filters ? query('admin', filters) : getAll('admin');
+export const getAdminById = (id) => getById('admin', id);
+export const createAdminItem = (data) => create('admin', data);
+export const updateAdminItem = (id, data) => update('admin', id, data);
+export const deleteAdminItem = (id) => remove('admin', id);
+
+// =============================================================================
+// UTILITY FUNCTIONS
+// =============================================================================
+
+/**
+ * Invalidate all caches
+ */
+export function invalidateAllCaches() {
+ for (const key of Object.keys(caches)) {
+ caches[key].data = null;
+ caches[key].timestamp = 0;
+ }
+}
+
+/**
+ * Get brain data summary (for dashboard)
+ */
+export async function getSummary() {
+ const [people, projects, ideas, adminItems, inboxCounts, meta] = await Promise.all([
+ getAll('people'),
+ getAll('projects'),
+ getAll('ideas'),
+ getAll('admin'),
+ getInboxLogCounts(),
+ loadMeta()
+ ]);
+
+ return {
+ counts: {
+ people: people.length,
+ projects: projects.length,
+ ideas: ideas.length,
+ admin: adminItems.length,
+ inbox: inboxCounts
+ },
+ activeProjects: projects.filter(p => p.status === 'active').length,
+ openAdmin: adminItems.filter(a => a.status === 'open').length,
+ needsReview: inboxCounts.needs_review,
+ lastDailyDigest: meta.lastDailyDigest,
+ lastWeeklyReview: meta.lastWeeklyReview
+ };
+}
diff --git a/server/services/contextUpgrader.js b/server/services/contextUpgrader.js
new file mode 100644
index 0000000..a10478b
--- /dev/null
+++ b/server/services/contextUpgrader.js
@@ -0,0 +1,331 @@
+/**
+ * Context Upgrader Service
+ *
+ * Analyzes if a task needs more context or a heavier model.
+ * Provides recommendations for model/context upgrades.
+ */
+
+import * as thinkingLevels from './thinkingLevels.js'
+import * as localThinking from './localThinking.js'
+import { cosEvents } from './cosEvents.js'
+
+// Upgrade triggers
+const UPGRADE_TRIGGERS = {
+ // Context-based triggers
+ longContext: {
+ threshold: 5000,
+ suggestUpgrade: true,
+ suggestHeavyModel: true
+ },
+ multiFileChange: {
+ fileCount: 3,
+ suggestUpgrade: true,
+ suggestHeavyModel: false
+ },
+
+ // Complexity-based triggers
+ highComplexity: {
+ threshold: 0.7,
+ suggestUpgrade: true,
+ suggestHeavyModel: true
+ },
+ architecturalChange: {
+ suggestUpgrade: true,
+ suggestHeavyModel: true
+ },
+
+ // Error-based triggers
+ previousFailure: {
+ suggestUpgrade: true,
+ suggestHeavyModel: true
+ },
+ consecutiveFailures: {
+ threshold: 2,
+ suggestUpgrade: true,
+ suggestHeavyModel: true
+ }
+}
+
+// Upgrade history tracking
+const upgradeHistory = []
+const MAX_HISTORY = 200
+
+/**
+ * Analyze if a task needs context or model upgrade
+ *
+ * @param {Object} task - Task to analyze
+ * @param {Object} context - Current context
+ * @returns {Promise} - Upgrade recommendations
+ */
+async function analyzeTaskNeedsUpgrade(task, context = {}) {
+ const recommendations = {
+ needsUpgrade: false,
+ suggestHeavyModel: false,
+ suggestMoreContext: false,
+ currentLevel: context.thinkingLevel || 'medium',
+ suggestedLevel: null,
+ reasons: [],
+ confidence: 0
+ }
+
+ // Check context length
+ const contextLength = context.contextLength || (task.description?.length || 0)
+ if (contextLength > UPGRADE_TRIGGERS.longContext.threshold) {
+ recommendations.needsUpgrade = true
+ recommendations.suggestHeavyModel = true
+ recommendations.reasons.push(`Context length (${contextLength}) exceeds threshold`)
+ }
+
+ // Check for multi-file changes (from task metadata or keywords)
+ const fileReferences = countFileReferences(task.description || '')
+ if (fileReferences >= UPGRADE_TRIGGERS.multiFileChange.fileCount) {
+ recommendations.needsUpgrade = true
+ recommendations.reasons.push(`Multiple file changes detected (${fileReferences} files)`)
+ }
+
+ // Get local complexity analysis if available
+ const analysis = await localThinking.analyzeTask(task)
+ if (analysis.complexity > UPGRADE_TRIGGERS.highComplexity.threshold) {
+ recommendations.needsUpgrade = true
+ recommendations.suggestHeavyModel = true
+ recommendations.reasons.push(`High complexity score (${analysis.complexity.toFixed(2)})`)
+ }
+
+ // Check for architectural keywords
+ if (hasArchitecturalKeywords(task.description || '')) {
+ recommendations.needsUpgrade = true
+ recommendations.suggestHeavyModel = true
+ recommendations.reasons.push('Architectural change detected')
+ }
+
+ // Check for previous failures
+ if (context.previousAttempts > 0 && context.previousSuccess === false) {
+ recommendations.needsUpgrade = true
+ recommendations.suggestHeavyModel = true
+ recommendations.reasons.push('Previous attempt failed')
+ }
+
+ if (context.consecutiveFailures >= UPGRADE_TRIGGERS.consecutiveFailures.threshold) {
+ recommendations.needsUpgrade = true
+ recommendations.suggestHeavyModel = true
+ recommendations.reasons.push(`${context.consecutiveFailures} consecutive failures`)
+ }
+
+ // Determine suggested level
+ if (recommendations.needsUpgrade) {
+ const suggestedLevel = thinkingLevels.suggestLevel(analysis)
+ const currentLevelIndex = Object.keys(thinkingLevels.THINKING_LEVELS).indexOf(recommendations.currentLevel)
+ const suggestedLevelIndex = Object.keys(thinkingLevels.THINKING_LEVELS).indexOf(suggestedLevel)
+
+ if (suggestedLevelIndex > currentLevelIndex) {
+ recommendations.suggestedLevel = suggestedLevel
+ } else if (recommendations.suggestHeavyModel) {
+ recommendations.suggestedLevel = thinkingLevels.upgradeLevel(recommendations.currentLevel)
+ }
+
+ recommendations.confidence = calculateConfidence(recommendations.reasons.length)
+ }
+
+ // Record analysis
+ recordUpgradeAnalysis(task.id, recommendations)
+
+ return recommendations
+}
+
+/**
+ * Count file references in text
+ * @param {string} text - Text to analyze
+ * @returns {number} - Number of file references
+ */
+function countFileReferences(text) {
+ const filePatterns = [
+ /\.(js|ts|jsx|tsx|py|go|rs|java|c|cpp|h|hpp)(?:\s|$|,|:)/g,
+ /(?:file|component|module|service|route|model)s?\s*:/gi,
+ /(?:create|modify|update|edit|change)\s+(?:the\s+)?(?:file|component)/gi
+ ]
+
+ let count = 0
+ for (const pattern of filePatterns) {
+ const matches = text.match(pattern)
+ if (matches) count += matches.length
+ }
+
+ return count
+}
+
+/**
+ * Check for architectural change keywords
+ * @param {string} text - Text to analyze
+ * @returns {boolean} - True if architectural keywords found
+ */
+function hasArchitecturalKeywords(text) {
+ const keywords = [
+ 'architect', 'restructure', 'redesign', 'migration',
+ 'refactor entire', 'overhaul', 'rewrite', 'new system',
+ 'database schema', 'api design', 'infrastructure'
+ ]
+
+ const lower = text.toLowerCase()
+ return keywords.some(k => lower.includes(k))
+}
+
+/**
+ * Calculate confidence score based on trigger count
+ * @param {number} triggerCount - Number of triggers hit
+ * @returns {number} - Confidence 0-1
+ */
+function calculateConfidence(triggerCount) {
+ if (triggerCount === 0) return 0
+ if (triggerCount === 1) return 0.6
+ if (triggerCount === 2) return 0.8
+ return 0.95
+}
+
+/**
+ * Record upgrade analysis for learning
+ * @param {string} taskId - Task identifier
+ * @param {Object} recommendations - Upgrade recommendations
+ */
+function recordUpgradeAnalysis(taskId, recommendations) {
+ upgradeHistory.unshift({
+ taskId,
+ timestamp: Date.now(),
+ needsUpgrade: recommendations.needsUpgrade,
+ suggestedLevel: recommendations.suggestedLevel,
+ reasons: recommendations.reasons,
+ confidence: recommendations.confidence
+ })
+
+ while (upgradeHistory.length > MAX_HISTORY) {
+ upgradeHistory.pop()
+ }
+}
+
+/**
+ * Record upgrade outcome for learning
+ * @param {string} taskId - Task identifier
+ * @param {boolean} wasSuccessful - Whether upgrade led to success
+ */
+function recordUpgradeOutcome(taskId, wasSuccessful) {
+ const entry = upgradeHistory.find(h => h.taskId === taskId)
+ if (entry) {
+ entry.outcome = wasSuccessful ? 'success' : 'failure'
+ entry.resolvedAt = Date.now()
+ }
+
+ cosEvents.emit('upgrade:outcomeRecorded', { taskId, wasSuccessful })
+}
+
+/**
+ * Get upgrade statistics
+ * @returns {Object} - Upgrade statistics
+ */
+function getStats() {
+ const recent = upgradeHistory.filter(h => h.outcome)
+ const upgradedTasks = recent.filter(h => h.needsUpgrade)
+ const successfulUpgrades = upgradedTasks.filter(h => h.outcome === 'success')
+
+ return {
+ totalAnalyses: upgradeHistory.length,
+ upgradesRecommended: upgradeHistory.filter(h => h.needsUpgrade).length,
+ upgradeSuccessRate: upgradedTasks.length > 0
+ ? ((successfulUpgrades.length / upgradedTasks.length) * 100).toFixed(1) + '%'
+ : 'N/A',
+ commonReasons: getCommonReasons(),
+ recentUpgrades: upgradeHistory.slice(0, 10).map(h => ({
+ taskId: h.taskId,
+ suggestedLevel: h.suggestedLevel,
+ outcome: h.outcome || 'pending'
+ }))
+ }
+}
+
+/**
+ * Get most common upgrade reasons
+ * @returns {Object} - Reason counts
+ */
+function getCommonReasons() {
+ const reasons = {}
+
+ for (const entry of upgradeHistory) {
+ if (!entry.needsUpgrade) continue
+
+ for (const reason of entry.reasons) {
+ // Normalize reason
+ const normalized = reason.split('(')[0].trim()
+ reasons[normalized] = (reasons[normalized] || 0) + 1
+ }
+ }
+
+ return reasons
+}
+
+/**
+ * Should upgrade based on quick heuristics (no async)
+ * @param {Object} task - Task to check
+ * @param {Object} context - Current context
+ * @returns {boolean} - True if upgrade likely needed
+ */
+function quickCheckNeedsUpgrade(task, context = {}) {
+ const description = task.description || ''
+
+ // Quick length check
+ if (description.length > UPGRADE_TRIGGERS.longContext.threshold) {
+ return true
+ }
+
+ // Quick keyword check
+ if (hasArchitecturalKeywords(description)) {
+ return true
+ }
+
+ // Priority check
+ const priority = task.priority?.toUpperCase()
+ if (priority === 'URGENT' || priority === 'CRITICAL') {
+ return true
+ }
+
+ // Failure check
+ if (context.previousSuccess === false) {
+ return true
+ }
+
+ return false
+}
+
+/**
+ * Get upgrade history
+ * @param {Object} options - Filter options
+ * @returns {Array} - Upgrade history
+ */
+function getHistory(options = {}) {
+ let history = [...upgradeHistory]
+
+ if (options.needsUpgrade !== undefined) {
+ history = history.filter(h => h.needsUpgrade === options.needsUpgrade)
+ }
+
+ if (options.outcome) {
+ history = history.filter(h => h.outcome === options.outcome)
+ }
+
+ const limit = options.limit || 50
+ return history.slice(0, limit)
+}
+
+/**
+ * Clear upgrade history
+ */
+function clearHistory() {
+ upgradeHistory.length = 0
+}
+
+export {
+ analyzeTaskNeedsUpgrade,
+ recordUpgradeOutcome,
+ getStats,
+ getHistory,
+ clearHistory,
+ quickCheckNeedsUpgrade,
+ UPGRADE_TRIGGERS
+}
diff --git a/server/services/cos.js b/server/services/cos.js
index 19871a9..dce07a7 100644
--- a/server/services/cos.js
+++ b/server/services/cos.js
@@ -9,7 +9,6 @@ import { readFile, writeFile, mkdir, readdir, rm } from 'fs/promises';
import { existsSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
-import { EventEmitter } from 'events';
import { exec } from 'child_process';
import { promisify } from 'util';
import { v4 as uuidv4 } from 'uuid';
@@ -17,7 +16,12 @@ import { getActiveProvider } from './providers.js';
import { parseTasksMarkdown, groupTasksByStatus, getNextTask, getAutoApprovedTasks, getAwaitingApprovalTasks, updateTaskStatus, generateTasksMarkdown } from '../lib/taskParser.js';
import { isAppOnCooldown, getNextAppForReview, markAppReviewStarted, markIdleReviewStarted } from './appActivity.js';
import { getAllApps } from './apps.js';
-import { getAdaptiveCooldownMultiplier, getSkippedTaskTypes, getPerformanceSummary } from './taskLearning.js';
+import { getAdaptiveCooldownMultiplier, getSkippedTaskTypes, getPerformanceSummary, checkAndRehabilitateSkippedTasks } from './taskLearning.js';
+import { schedule as scheduleEvent, cancel as cancelEvent, getStats as getSchedulerStats } from './eventScheduler.js';
+import { generateProactiveTasks as generateMissionTasks, getStats as getMissionStats } from './missions.js';
+// Import and re-export cosEvents from separate module to avoid circular dependencies
+import { cosEvents as _cosEvents } from './cosEvents.js';
+export const cosEvents = _cosEvents;
const execAsync = promisify(exec);
@@ -31,16 +35,13 @@ const REPORTS_DIR = join(COS_DIR, 'reports');
const SCRIPTS_DIR = join(COS_DIR, 'scripts');
const ROOT_DIR = join(__dirname, '../../');
-// Event emitter for CoS events
-export const cosEvents = new EventEmitter();
-
/**
* Emit a log event for UI display
* Exported for use by other CoS-related services
* @param {string} level - Log level: 'info', 'warn', 'error', 'success', 'debug'
* @param {string} message - Log message
* @param {Object} data - Additional data to include in log entry
- * @param {string} prefix - Optional prefix for console output (e.g., '[SelfImprovement]')
+ * @param {string} prefix - Optional prefix for console output (e.g., 'SelfImprovement')
*/
export function emitLog(level, message, data = {}, prefix = '') {
const logEntry = {
@@ -49,7 +50,7 @@ export function emitLog(level, message, data = {}, prefix = '') {
message,
...data
};
- const emoji = level === 'error' ? 'โ' : level === 'warn' ? 'โ ๏ธ' : level === 'success' ? 'โ
' : 'โน๏ธ';
+ const emoji = level === 'error' ? 'โ' : level === 'warn' ? 'โ ๏ธ' : level === 'success' ? 'โ
' : level === 'debug' ? '๐' : 'โน๏ธ';
const prefixStr = prefix ? ` ${prefix}` : '';
console.log(`${emoji}${prefixStr} ${message}`);
cosEvents.emit('log', logEntry);
@@ -57,8 +58,6 @@ export function emitLog(level, message, data = {}, prefix = '') {
// In-memory daemon state
let daemonRunning = false;
-let evaluationInterval = null;
-let healthCheckInterval = null;
// Mutex lock for state operations to prevent race conditions
let stateLock = Promise.resolve();
@@ -101,6 +100,7 @@ const DEFAULT_CONFIG = {
comprehensiveAppImprovement: true, // Use comprehensive analysis for managed apps (same as PortOS self-improvement)
immediateExecution: true, // Execute new tasks immediately, don't wait for interval
proactiveMode: true, // Be proactive about finding work
+ rehabilitationGracePeriodDays: 7, // Days before auto-retrying skipped task types (learning-based)
autoFixThresholds: {
maxLinesChanged: 50, // Auto-approve if <= this many lines changed
allowedCategories: [ // Categories that can auto-execute
@@ -269,15 +269,27 @@ export async function start() {
// Then reset any orphaned in_progress tasks (no running agent)
await resetOrphanedTasks();
- // Start evaluation loop
- evaluationInterval = setInterval(async () => {
- await evaluateTasks();
- }, state.config.evaluationIntervalMs);
+ // Start evaluation loop using event scheduler
+ scheduleEvent({
+ id: 'cos-evaluation',
+ type: 'interval',
+ intervalMs: state.config.evaluationIntervalMs,
+ handler: async () => {
+ await evaluateTasks();
+ },
+ metadata: { description: 'CoS task evaluation loop' }
+ });
- // Start health check loop
- healthCheckInterval = setInterval(async () => {
- await runHealthCheck();
- }, state.config.healthCheckIntervalMs);
+ // Start health check loop using event scheduler
+ scheduleEvent({
+ id: 'cos-health-check',
+ type: 'interval',
+ intervalMs: state.config.healthCheckIntervalMs,
+ handler: async () => {
+ await runHealthCheck();
+ },
+ metadata: { description: 'CoS health check loop' }
+ });
// Run initial evaluation and health check
emitLog('info', 'Running initial task evaluation...');
@@ -297,15 +309,9 @@ export async function stop() {
return { success: false, error: 'Not running' };
}
- // Clear intervals
- if (evaluationInterval) {
- clearInterval(evaluationInterval);
- evaluationInterval = null;
- }
- if (healthCheckInterval) {
- clearInterval(healthCheckInterval);
- healthCheckInterval = null;
- }
+ // Cancel scheduled events
+ cancelEvent('cos-evaluation');
+ cancelEvent('cos-health-check');
const state = await loadState();
state.running = false;
@@ -551,7 +557,33 @@ export async function evaluateTasks() {
await queueEligibleImprovementTasks(state, cosTaskData);
}
- // Priority 3: Only generate direct idle task if:
+ // Priority 3: Mission-driven proactive tasks (if no user tasks)
+ if (tasksToSpawn.length < availableSlots && !hasPendingUserTasks && state.config.proactiveMode) {
+ const missionTasks = await generateMissionTasks({ maxTasks: availableSlots - tasksToSpawn.length }).catch(err => {
+ emitLog('debug', `Mission task generation failed: ${err.message}`);
+ return [];
+ });
+
+ for (const missionTask of missionTasks) {
+ if (tasksToSpawn.length >= availableSlots) break;
+ // Convert mission task to COS task format
+ tasksToSpawn.push({
+ id: missionTask.id,
+ description: missionTask.description,
+ priority: missionTask.priority?.toUpperCase() || 'MEDIUM',
+ status: 'pending',
+ metadata: missionTask.metadata,
+ taskType: 'internal',
+ approvalRequired: !missionTask.autoApprove
+ });
+ emitLog('info', `Generated mission task: ${missionTask.id} (${missionTask.metadata?.missionName})`, {
+ missionId: missionTask.metadata?.missionId,
+ appId: missionTask.metadata?.appId
+ });
+ }
+ }
+
+ // Priority 4: Only generate direct idle task if:
// 1. Nothing to spawn
// 2. No pending user tasks (even on cooldown)
// 3. No system tasks queued
@@ -593,6 +625,18 @@ export async function evaluateTasks() {
}
}
+ // Periodically check for task types eligible for auto-rehabilitation (every 100 evaluations, ~2 hours)
+ // This gives previously-failing task types a fresh chance after their grace period expires
+ if (evalCount % 100 === 0 && evalCount > 0) {
+ const gracePeriodMs = (state.config.rehabilitationGracePeriodDays || 7) * 24 * 60 * 60 * 1000;
+ const rehabilitationResult = await checkAndRehabilitateSkippedTasks(gracePeriodMs).catch(() => ({ count: 0 }));
+ if (rehabilitationResult.count > 0) {
+ emitLog('success', `Auto-rehabilitated ${rehabilitationResult.count} skipped task type(s) for retry`, {
+ rehabilitated: rehabilitationResult.rehabilitated?.map(r => r.taskType) || []
+ });
+ }
+ }
+
// Update evaluation count
await withStateLock(async () => {
const s = await loadState();
@@ -919,7 +963,7 @@ async function generateSelfImprovementTask(state) {
await saveState(s);
});
- return generateSelfImprovementTaskForType(request.taskType, state);
+ return await generateSelfImprovementTaskForType(request.taskType, state);
}
// Use the schedule service to determine the next task type
@@ -991,17 +1035,35 @@ async function generateSelfImprovementTask(state) {
// Get task descriptions from the centralized helper function
const taskDescriptions = getSelfImprovementTaskDescriptions();
- return generateSelfImprovementTaskForType(nextType, state, taskDescriptions);
+ return await generateSelfImprovementTaskForType(nextType, state, taskDescriptions);
}
/**
* Helper function to generate a self-improvement task for a specific type
* Used by both normal rotation and on-demand task requests
*/
-function generateSelfImprovementTaskForType(taskType, state, taskDescriptions = null) {
- // Use provided descriptions or generate default ones
- const descriptions = taskDescriptions || getSelfImprovementTaskDescriptions();
- const description = descriptions[taskType] || `[Self-Improvement] ${taskType} analysis`;
+async function generateSelfImprovementTaskForType(taskType, state, taskDescriptions = null) {
+ const taskSchedule = await import('./taskSchedule.js');
+ const interval = await taskSchedule.getSelfImprovementInterval(taskType);
+
+ // Get the effective prompt (custom or default)
+ const description = await taskSchedule.getSelfImprovementPrompt(taskType);
+
+ const metadata = {
+ analysisType: taskType,
+ autoGenerated: true,
+ selfImprovement: true
+ };
+
+ // Use configured model/provider if specified, otherwise use default
+ if (interval.providerId) {
+ metadata.providerId = interval.providerId;
+ }
+ if (interval.model) {
+ metadata.model = interval.model;
+ } else {
+ metadata.model = 'claude-opus-4-5-20251101';
+ }
const task = {
id: `self-improve-${taskType}-${Date.now().toString(36)}`,
@@ -1009,12 +1071,7 @@ function generateSelfImprovementTaskForType(taskType, state, taskDescriptions =
priority: 'MEDIUM',
priorityValue: PRIORITY_VALUES['MEDIUM'],
description,
- metadata: {
- analysisType: taskType,
- autoGenerated: true,
- selfImprovement: true,
- model: 'claude-opus-4-5-20251101'
- },
+ metadata,
taskType: 'internal',
autoApproved: true
};
@@ -1377,7 +1434,15 @@ async function generateManagedAppImprovementTask(app, state) {
emitLog('info', `Generating comprehensive improvement task for ${app.name}: ${nextType} (${selectionReason})`, { appId: app.id, analysisType: nextType });
- // Task descriptions for each analysis type
+ // Get the effective prompt (custom or default template)
+ const promptTemplate = await taskSchedule.getAppImprovementPrompt(nextType);
+
+ // Replace template variables in the prompt
+ const description = promptTemplate
+ .replace(/\{appName\}/g, app.name)
+ .replace(/\{repoPath\}/g, app.repoPath);
+
+ // Legacy task descriptions - keeping for fallback but they won't be used
const taskDescriptions = {
'security-audit': `[App Improvement: ${app.name}] Security Audit
@@ -1635,15 +1700,27 @@ Repository: ${app.repoPath}
Use model: claude-opus-4-5-20251101 for thorough typing`
};
- const description = taskDescriptions[nextType] || `[App Improvement: ${app.name}] ${nextType}
+ // Get interval settings to determine provider/model
+ const interval = await taskSchedule.getAppImprovementInterval(nextType);
-Perform ${nextType} analysis on ${app.name}.
-
-Repository: ${app.repoPath}
-
-Analyze the codebase and make improvements. Commit changes with clear descriptions.
+ const metadata = {
+ app: app.id,
+ appName: app.name,
+ repoPath: app.repoPath,
+ analysisType: nextType,
+ autoGenerated: true,
+ comprehensiveImprovement: true
+ };
-Use model: claude-opus-4-5-20251101`;
+ // Use configured model/provider if specified, otherwise use default
+ if (interval.providerId) {
+ metadata.providerId = interval.providerId;
+ }
+ if (interval.model) {
+ metadata.model = interval.model;
+ } else {
+ metadata.model = 'claude-opus-4-5-20251101';
+ }
const task = {
id: `app-improve-${app.id}-${nextType}-${Date.now().toString(36)}`,
@@ -1651,15 +1728,7 @@ Use model: claude-opus-4-5-20251101`;
priority: state.config.idleReviewPriority || 'MEDIUM',
priorityValue: PRIORITY_VALUES[state.config.idleReviewPriority] || 2,
description,
- metadata: {
- app: app.id,
- appName: app.name,
- repoPath: app.repoPath,
- analysisType: nextType,
- autoGenerated: true,
- comprehensiveImprovement: true,
- model: 'claude-opus-4-5-20251101'
- },
+ metadata,
taskType: 'internal',
autoApproved: true
};
@@ -2307,7 +2376,12 @@ export async function addTask(taskData, taskType = 'user') {
section: 'pending'
};
- tasks.push(newTask);
+ // Add task to top or bottom based on position parameter
+ if (taskData.position === 'top') {
+ tasks.unshift(newTask);
+ } else {
+ tasks.push(newTask);
+ }
// Write back to file
const includeApprovalFlags = taskType === 'internal';
diff --git a/server/services/cosEvents.js b/server/services/cosEvents.js
new file mode 100644
index 0000000..ca87408
--- /dev/null
+++ b/server/services/cosEvents.js
@@ -0,0 +1,42 @@
+/**
+ * CoS Events Module
+ *
+ * Centralized event emitter for Chief of Staff services.
+ * Separated to avoid circular dependencies between cos.js and other modules.
+ */
+
+import { EventEmitter } from 'events'
+
+// Event emitter for CoS events
+export const cosEvents = new EventEmitter()
+
+/**
+ * Emit a log event for UI display
+ * @param {string} level - Log level: 'info', 'warn', 'error', 'success', 'debug'
+ * @param {string} message - Log message
+ * @param {Object} data - Additional data to include in log entry
+ * @param {string} prefix - Optional prefix for console output (e.g., 'SelfImprovement')
+ */
+export function emitLog(level, message, data = {}, prefix = '') {
+ const logEntry = {
+ timestamp: new Date().toISOString(),
+ level,
+ message,
+ ...data
+ }
+
+ // Emit for UI
+ cosEvents.emit('log', logEntry)
+
+ // Also log to console with appropriate emoji
+ const levelEmojis = {
+ info: 'i',
+ warn: '!',
+ error: 'x',
+ success: '+',
+ debug: '.'
+ }
+ const emoji = levelEmojis[level] || 'i'
+ const prefixStr = prefix ? `${prefix}: ` : ''
+ console.log(`[${logEntry.timestamp}] ${emoji} ${prefixStr}${message}`)
+}
diff --git a/server/services/cosEvolution.js b/server/services/cosEvolution.js
new file mode 100644
index 0000000..703b067
--- /dev/null
+++ b/server/services/cosEvolution.js
@@ -0,0 +1,409 @@
+/**
+ * COS Evolution Service
+ *
+ * Enables COS to propose and execute self-modifications:
+ * - Base model changes
+ * - Threshold adjustments
+ * - Model downloads
+ *
+ * Full autonomy - COS can evolve without user approval.
+ * Changes are logged for transparency.
+ */
+
+import { promises as fs } from 'fs'
+import path from 'path'
+import { v4 as uuidv4 } from 'uuid'
+import { cosEvents } from './cosEvents.js'
+import * as lmStudioManager from './lmStudioManager.js'
+
+const DATA_DIR = path.join(process.cwd(), 'data', 'cos')
+const EVOLUTION_FILE = path.join(DATA_DIR, 'evolution.json')
+
+// Default evolution state
+const DEFAULT_STATE = {
+ currentBaseModel: 'gpt-oss-20b',
+ currentThinkingThresholds: {
+ contextLength: { low: 1000, medium: 3000, high: 6000 },
+ complexity: { low: 0.4, medium: 0.6, high: 0.8 }
+ },
+ proposals: [],
+ evolutionHistory: [],
+ performanceBaseline: {
+ successRate: 0.7,
+ avgDurationMs: 60000,
+ recordedAt: null
+ },
+ settings: {
+ autoApproveModelChanges: true, // Full autonomy
+ autoApproveThresholdChanges: true,
+ rollbackThreshold: 0.2, // Rollback if success rate drops >20%
+ minTasksBeforeEvaluation: 50 // Evaluate after 50 tasks
+ }
+}
+
+// In-memory state
+let evolutionState = null
+
+/**
+ * Ensure data directory exists
+ */
+async function ensureDataDir() {
+ await fs.mkdir(DATA_DIR, { recursive: true })
+}
+
+/**
+ * Load evolution state
+ * @returns {Promise} - Evolution state
+ */
+async function loadState() {
+ if (evolutionState) return evolutionState
+
+ await ensureDataDir()
+
+ const exists = await fs.access(EVOLUTION_FILE).then(() => true).catch(() => false)
+ if (exists) {
+ const content = await fs.readFile(EVOLUTION_FILE, 'utf-8')
+ evolutionState = JSON.parse(content)
+ } else {
+ evolutionState = { ...DEFAULT_STATE }
+ }
+
+ return evolutionState
+}
+
+/**
+ * Save evolution state
+ */
+async function saveState() {
+ await ensureDataDir()
+ await fs.writeFile(EVOLUTION_FILE, JSON.stringify(evolutionState, null, 2))
+}
+
+/**
+ * Get current evolution state
+ * @returns {Promise} - Current state
+ */
+async function getState() {
+ return loadState()
+}
+
+/**
+ * Propose a base model change
+ * With full autonomy, this executes immediately.
+ *
+ * @param {string} newModel - New model identifier
+ * @param {string} reasoning - Reasoning for change
+ * @returns {Promise} - Proposal result
+ */
+async function proposeBaseModelChange(newModel, reasoning) {
+ const state = await loadState()
+ const proposal = {
+ id: uuidv4(),
+ type: 'base-model-change',
+ currentModel: state.currentBaseModel,
+ proposedModel: newModel,
+ reasoning,
+ createdAt: new Date().toISOString(),
+ status: 'approved', // Auto-approved with full autonomy
+ executedAt: null,
+ result: null
+ }
+
+ state.proposals.push(proposal)
+
+ // Execute immediately (full autonomy)
+ const result = await executeBaseModelChange(proposal.id)
+
+ await saveState()
+
+ console.log(`๐งฌ Evolution: Base model changed ${proposal.currentModel} โ ${newModel}`)
+ cosEvents.emit('evolution:modelChanged', {
+ from: proposal.currentModel,
+ to: newModel,
+ reasoning
+ })
+
+ return result
+}
+
+/**
+ * Execute a base model change
+ * @param {string} proposalId - Proposal to execute
+ * @returns {Promise} - Execution result
+ */
+async function executeBaseModelChange(proposalId) {
+ const state = await loadState()
+ const proposal = state.proposals.find(p => p.id === proposalId)
+
+ if (!proposal) {
+ return { success: false, error: 'Proposal not found' }
+ }
+
+ // Record the change
+ const previousModel = state.currentBaseModel
+ state.currentBaseModel = proposal.proposedModel
+ proposal.executedAt = new Date().toISOString()
+ proposal.status = 'executed'
+
+ // Record in history
+ state.evolutionHistory.push({
+ type: 'base-model-change',
+ from: previousModel,
+ to: proposal.proposedModel,
+ reasoning: proposal.reasoning,
+ executedAt: proposal.executedAt
+ })
+
+ await saveState()
+
+ return {
+ success: true,
+ previousModel,
+ newModel: proposal.proposedModel
+ }
+}
+
+/**
+ * Request a model download
+ * @param {string} modelId - Model to download
+ * @param {string} purpose - Why this model is needed
+ * @returns {Promise} - Download result
+ */
+async function requestModelDownload(modelId, purpose) {
+ const state = await loadState()
+
+ // Record the request
+ state.evolutionHistory.push({
+ type: 'model-download-request',
+ modelId,
+ purpose,
+ requestedAt: new Date().toISOString()
+ })
+
+ await saveState()
+
+ // Attempt download via LM Studio
+ const result = await lmStudioManager.downloadModel(modelId)
+
+ console.log(`๐ฅ Evolution: Model download requested - ${modelId}`)
+ cosEvents.emit('evolution:downloadRequested', { modelId, purpose, result })
+
+ return result
+}
+
+/**
+ * Adjust thinking thresholds
+ * @param {string} thresholdType - 'contextLength' or 'complexity'
+ * @param {Object} newValues - New threshold values
+ * @param {string} reasoning - Reasoning for change
+ * @returns {Promise} - Adjustment result
+ */
+async function adjustThinkingThreshold(thresholdType, newValues, reasoning) {
+ const state = await loadState()
+
+ if (!state.currentThinkingThresholds[thresholdType]) {
+ return { success: false, error: 'Unknown threshold type' }
+ }
+
+ const previousValues = { ...state.currentThinkingThresholds[thresholdType] }
+
+ // Apply new values
+ Object.assign(state.currentThinkingThresholds[thresholdType], newValues)
+
+ // Record in history
+ state.evolutionHistory.push({
+ type: 'threshold-adjustment',
+ thresholdType,
+ from: previousValues,
+ to: state.currentThinkingThresholds[thresholdType],
+ reasoning,
+ executedAt: new Date().toISOString()
+ })
+
+ await saveState()
+
+ console.log(`๐๏ธ Evolution: Threshold ${thresholdType} adjusted`)
+ cosEvents.emit('evolution:thresholdAdjusted', {
+ thresholdType,
+ from: previousValues,
+ to: state.currentThinkingThresholds[thresholdType],
+ reasoning
+ })
+
+ return {
+ success: true,
+ thresholdType,
+ previousValues,
+ newValues: state.currentThinkingThresholds[thresholdType]
+ }
+}
+
+/**
+ * Record performance metrics for baseline comparison
+ * @param {Object} metrics - Current performance metrics
+ */
+async function recordPerformanceMetrics(metrics) {
+ const state = await loadState()
+
+ const previous = { ...state.performanceBaseline }
+
+ state.performanceBaseline = {
+ successRate: metrics.successRate,
+ avgDurationMs: metrics.avgDurationMs,
+ taskCount: metrics.taskCount || 0,
+ recordedAt: new Date().toISOString()
+ }
+
+ // Check for significant drop requiring rollback
+ if (previous.successRate && metrics.successRate) {
+ const drop = previous.successRate - metrics.successRate
+ if (drop > state.settings.rollbackThreshold) {
+ console.log(`โ ๏ธ Evolution: Performance drop detected (${(drop * 100).toFixed(1)}%)`)
+ cosEvents.emit('evolution:performanceDrop', {
+ previousRate: previous.successRate,
+ currentRate: metrics.successRate,
+ dropPercent: drop * 100
+ })
+ // Could trigger automatic rollback here
+ }
+ }
+
+ await saveState()
+}
+
+/**
+ * Rollback to previous base model
+ * @param {string} reason - Reason for rollback
+ * @returns {Promise} - Rollback result
+ */
+async function rollbackBaseModel(reason) {
+ const state = await loadState()
+
+ // Find the previous model change in history
+ const modelChanges = state.evolutionHistory
+ .filter(e => e.type === 'base-model-change')
+ .reverse()
+
+ if (modelChanges.length < 2) {
+ return { success: false, error: 'No previous model to rollback to' }
+ }
+
+ const previousChange = modelChanges[1]
+ const currentModel = state.currentBaseModel
+ const rollbackTo = previousChange.from
+
+ state.currentBaseModel = rollbackTo
+
+ state.evolutionHistory.push({
+ type: 'rollback',
+ from: currentModel,
+ to: rollbackTo,
+ reason,
+ executedAt: new Date().toISOString()
+ })
+
+ await saveState()
+
+ console.log(`โช Evolution: Rolled back to ${rollbackTo}`)
+ cosEvents.emit('evolution:rollback', {
+ from: currentModel,
+ to: rollbackTo,
+ reason
+ })
+
+ return {
+ success: true,
+ previousModel: currentModel,
+ restoredModel: rollbackTo
+ }
+}
+
+/**
+ * Get evolution history
+ * @param {Object} options - Filter options
+ * @returns {Promise} - Evolution history
+ */
+async function getHistory(options = {}) {
+ const state = await loadState()
+ let history = [...state.evolutionHistory]
+
+ if (options.type) {
+ history = history.filter(e => e.type === options.type)
+ }
+
+ // Sort by date descending
+ history.sort((a, b) =>
+ new Date(b.executedAt || b.requestedAt).getTime() -
+ new Date(a.executedAt || a.requestedAt).getTime()
+ )
+
+ const limit = options.limit || 50
+ return history.slice(0, limit)
+}
+
+/**
+ * Get pending proposals (for display, though auto-approved)
+ * @returns {Promise} - Pending proposals
+ */
+async function getPendingProposals() {
+ const state = await loadState()
+ return state.proposals.filter(p => p.status === 'pending')
+}
+
+/**
+ * Get evolution statistics
+ * @returns {Promise} - Evolution stats
+ */
+async function getStats() {
+ const state = await loadState()
+
+ const byType = {}
+ for (const entry of state.evolutionHistory) {
+ byType[entry.type] = (byType[entry.type] || 0) + 1
+ }
+
+ return {
+ currentBaseModel: state.currentBaseModel,
+ currentThresholds: state.currentThinkingThresholds,
+ performanceBaseline: state.performanceBaseline,
+ totalEvolutions: state.evolutionHistory.length,
+ byType,
+ pendingProposals: state.proposals.filter(p => p.status === 'pending').length,
+ settings: state.settings
+ }
+}
+
+/**
+ * Update evolution settings
+ * @param {Object} newSettings - Settings to update
+ * @returns {Promise} - Updated settings
+ */
+async function updateSettings(newSettings) {
+ const state = await loadState()
+
+ Object.assign(state.settings, newSettings)
+ await saveState()
+
+ return state.settings
+}
+
+/**
+ * Invalidate cache
+ */
+function invalidateCache() {
+ evolutionState = null
+}
+
+export {
+ getState,
+ proposeBaseModelChange,
+ requestModelDownload,
+ adjustThinkingThreshold,
+ recordPerformanceMetrics,
+ rollbackBaseModel,
+ getHistory,
+ getPendingProposals,
+ getStats,
+ updateSettings,
+ invalidateCache
+}
diff --git a/server/services/cosRunnerClient.js b/server/services/cosRunnerClient.js
index 24bc6c3..81714a6 100644
--- a/server/services/cosRunnerClient.js
+++ b/server/services/cosRunnerClient.js
@@ -110,6 +110,10 @@ export async function spawnAgentViaRunner(options) {
workspacePath,
model,
envVars,
+ // New: CLI-agnostic parameters
+ cliCommand,
+ cliArgs,
+ // Legacy (deprecated)
claudePath
} = options;
@@ -127,6 +131,8 @@ export async function spawnAgentViaRunner(options) {
workspacePath,
model,
envVars,
+ cliCommand,
+ cliArgs,
claudePath
}),
signal: controller.signal
diff --git a/server/services/digital-twin.js b/server/services/digital-twin.js
new file mode 100644
index 0000000..a6c584f
--- /dev/null
+++ b/server/services/digital-twin.js
@@ -0,0 +1,2620 @@
+/**
+ * Digital Twin Service
+ *
+ * Core business logic for the Digital Twin feature:
+ * - Document CRUD (manage digital twin markdown files)
+ * - Behavioral testing against LLMs
+ * - Enrichment questionnaire system
+ * - Export for external LLM use
+ * - CoS integration (digital twin context injection)
+ */
+
+import { readFile, writeFile, unlink, readdir, mkdir, stat } from 'fs/promises';
+import { existsSync } from 'fs';
+import { join, dirname, basename } from 'path';
+import { fileURLToPath } from 'url';
+import { v4 as uuidv4 } from 'uuid';
+import EventEmitter from 'events';
+import { getActiveProvider, getProviderById } from './providers.js';
+import { buildPrompt } from './promptService.js';
+import {
+ digitalTwinMetaSchema,
+ documentMetaSchema,
+ testHistoryEntrySchema
+} from '../lib/digitalTwinValidation.js';
+import { safeJSONParse } from '../lib/fileUtils.js';
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = dirname(__filename);
+const DIGITAL_TWIN_DIR = join(__dirname, '../../data/digital-twin');
+const META_FILE = join(DIGITAL_TWIN_DIR, 'meta.json');
+
+// Event emitter for digital twin data changes
+export const digitalTwinEvents = new EventEmitter();
+export const soulEvents = digitalTwinEvents; // Alias for backwards compatibility
+
+// In-memory cache
+const cache = {
+ meta: { data: null, timestamp: 0 },
+ documents: { data: null, timestamp: 0 },
+ tests: { data: null, timestamp: 0 }
+};
+const CACHE_TTL_MS = 5000;
+
+// Default meta structure
+const DEFAULT_META = {
+ version: '1.0.0',
+ documents: [],
+ testHistory: [],
+ enrichment: { completedCategories: [], lastSession: null },
+ settings: { autoInjectToCoS: true, maxContextTokens: 4000 }
+};
+
+// Enrichment category configurations
+export const ENRICHMENT_CATEGORIES = {
+ core_memories: {
+ label: 'Core Memories',
+ description: 'Formative experiences that shaped your identity',
+ targetDoc: 'MEMORIES.md',
+ targetCategory: 'enrichment',
+ questions: [
+ 'What childhood memory still influences how you approach problems today?',
+ 'Describe a pivotal moment that changed your worldview.',
+ 'What failure taught you the most important lesson?'
+ ]
+ },
+ favorite_books: {
+ label: 'Favorite Books',
+ description: 'Books that shaped your thinking',
+ targetDoc: 'BOOKS.md',
+ targetCategory: 'entertainment',
+ listBased: true,
+ itemLabel: 'Book',
+ itemPlaceholder: 'e.g., Gรถdel, Escher, Bach by Douglas Hofstadter',
+ notePlaceholder: 'Why this book matters to you, what it taught you...',
+ analyzePrompt: 'Analyze these book choices to understand the reader\'s intellectual interests, values, and worldview.',
+ questions: [
+ 'What book fundamentally changed how you see the world?',
+ 'Which book do you find yourself re-reading or recommending most?',
+ 'What fiction shaped your values or aspirations?'
+ ]
+ },
+ favorite_movies: {
+ label: 'Favorite Movies',
+ description: 'Films that resonate with your aesthetic and values',
+ targetDoc: 'MOVIES.md',
+ targetCategory: 'entertainment',
+ listBased: true,
+ itemLabel: 'Movie',
+ itemPlaceholder: 'e.g., Blade Runner 2049',
+ notePlaceholder: 'What draws you to this film, memorable scenes or themes...',
+ analyzePrompt: 'Analyze these film choices to understand the person\'s aesthetic preferences, emotional resonance patterns, and values.',
+ questions: [
+ 'What film captures your aesthetic sensibility?',
+ 'Which movie do you quote or reference most often?',
+ 'What film made you think differently about a topic?'
+ ]
+ },
+ music_taste: {
+ label: 'Music Taste',
+ description: 'Music as cognitive infrastructure',
+ targetDoc: 'MUSIC.md',
+ targetCategory: 'audio',
+ listBased: true,
+ itemLabel: 'Album/Artist',
+ itemPlaceholder: 'e.g., OK Computer by Radiohead',
+ notePlaceholder: 'When you listen to this, how you use it (focus, energy, mood)...',
+ analyzePrompt: 'Analyze these music choices to understand how this person uses music for cognitive and emotional regulation.',
+ questions: [
+ 'What album do you use for deep focus work?',
+ 'What music captures your emotional baseline?',
+ 'Describe your relationship with music - is it background or active engagement?'
+ ]
+ },
+ communication: {
+ label: 'Communication Style',
+ description: 'How you prefer to give and receive information',
+ targetDoc: 'COMMUNICATION.md',
+ targetCategory: 'social',
+ questions: [
+ 'How do you prefer to receive critical feedback?',
+ 'Do you prefer direct confrontation or diplomatic approach in disagreements?',
+ 'What communication style irritates you most?'
+ ]
+ },
+ decision_making: {
+ label: 'Decision Making',
+ description: 'How you approach choices and uncertainty',
+ targetDoc: 'PREFERENCES.md',
+ targetCategory: 'core',
+ questions: [
+ 'Do you decide quickly with limited info, or deliberate extensively?',
+ 'How do you handle irreversible decisions differently from reversible ones?',
+ 'What role does intuition play in your decision-making?'
+ ]
+ },
+ values: {
+ label: 'Values',
+ description: 'Core principles that guide your actions',
+ targetDoc: 'VALUES.md',
+ targetCategory: 'core',
+ questions: [
+ 'What principle would you never compromise, even at personal cost?',
+ 'What value do you wish more people held?',
+ 'Where do you draw the line between pragmatism and principle?'
+ ]
+ },
+ aesthetics: {
+ label: 'Aesthetic Preferences',
+ description: 'Visual and design sensibilities',
+ targetDoc: 'AESTHETICS.md',
+ targetCategory: 'creative',
+ questions: [
+ 'Minimalist or maximalist - where do you fall?',
+ 'What visual style or design movement resonates with you?',
+ 'How important is aesthetic coherence in your work environment?'
+ ]
+ },
+ daily_routines: {
+ label: 'Daily Routines',
+ description: 'Habits and rhythms that structure your day',
+ targetDoc: 'ROUTINES.md',
+ targetCategory: 'lifestyle',
+ questions: [
+ 'Are you a morning person or night owl, and how does this affect your work?',
+ 'What daily ritual is non-negotiable for your productivity?',
+ 'How do you recharge - solitude, social, physical activity?'
+ ]
+ },
+ career_skills: {
+ label: 'Career & Skills',
+ description: 'Professional expertise and growth areas',
+ targetDoc: 'CAREER.md',
+ targetCategory: 'professional',
+ questions: [
+ 'What are you known for professionally?',
+ 'What skill are you actively trying to develop?',
+ 'What unique perspective does your background give you?'
+ ]
+ },
+ non_negotiables: {
+ label: 'Non-Negotiables',
+ description: 'Principles and boundaries that define your limits',
+ targetDoc: 'NON_NEGOTIABLES.md',
+ targetCategory: 'core',
+ questions: [
+ 'What principle would you never compromise, even at significant personal cost?',
+ 'What behavior in others immediately erodes your trust?',
+ 'What topic should your digital twin absolutely refuse to engage with?'
+ ]
+ },
+ decision_heuristics: {
+ label: 'Decision Heuristics',
+ description: 'Mental models and shortcuts for making choices',
+ targetDoc: 'DECISION_HEURISTICS.md',
+ targetCategory: 'core',
+ questions: [
+ 'When facing a decision with limited information, do you act quickly or wait for more data?',
+ 'How do you weigh reversible vs irreversible decisions differently?',
+ 'What role does optionality play in your decision-making?'
+ ]
+ },
+ error_intolerance: {
+ label: 'Error Intolerance',
+ description: 'What your digital twin should never do',
+ targetDoc: 'ERROR_INTOLERANCE.md',
+ targetCategory: 'core',
+ questions: [
+ 'What communication style or reasoning pattern irritates you most?',
+ 'What should your digital twin never do when responding to you?',
+ 'What type of "help" actually makes things worse for you?'
+ ]
+ },
+ personality_assessments: {
+ label: 'Personality Assessments',
+ description: 'Personality type results from assessments like Myers-Briggs, Big Five, DISC, Enneagram, etc.',
+ targetDoc: 'PERSONALITY.md',
+ targetCategory: 'core',
+ questions: [
+ 'What is your Myers-Briggs type (e.g., INTJ, ENFP)? If you test differently at different times, list all results.',
+ 'If you know your Big Five (OCEAN) scores, what are they? High/low on Openness, Conscientiousness, Extraversion, Agreeableness, Neuroticism?',
+ 'Have you taken other personality assessments (Enneagram, DISC, StrengthsFinder, etc.)? Share those results.'
+ ]
+ }
+};
+
+// =============================================================================
+// HELPERS
+// =============================================================================
+
+function generateId() {
+ return uuidv4();
+}
+
+function now() {
+ return new Date().toISOString();
+}
+
+async function ensureSoulDir() {
+ if (!existsSync(DIGITAL_TWIN_DIR)) {
+ await mkdir(DIGITAL_TWIN_DIR, { recursive: true });
+ console.log(`๐งฌ Created soul data directory: ${DIGITAL_TWIN_DIR}`);
+ }
+}
+
+// =============================================================================
+// META / SETTINGS
+// =============================================================================
+
+export async function loadMeta() {
+ if (cache.meta.data && (Date.now() - cache.meta.timestamp) < CACHE_TTL_MS) {
+ return cache.meta.data;
+ }
+
+ await ensureSoulDir();
+
+ if (!existsSync(META_FILE)) {
+ // Scan existing documents and build initial meta
+ const meta = await buildInitialMeta();
+ await saveMeta(meta);
+ return meta;
+ }
+
+ const content = await readFile(META_FILE, 'utf-8');
+ const parsed = safeJSONParse(content, DEFAULT_META);
+ const validated = digitalTwinMetaSchema.safeParse(parsed);
+
+ cache.meta.data = validated.success ? validated.data : { ...DEFAULT_META, ...parsed };
+ cache.meta.timestamp = Date.now();
+ return cache.meta.data;
+}
+
+async function buildInitialMeta() {
+ const meta = { ...DEFAULT_META };
+
+ const files = await readdir(DIGITAL_TWIN_DIR).catch(() => []);
+ const mdFiles = files.filter(f => f.endsWith('.md'));
+
+ for (const file of mdFiles) {
+ const content = await readFile(join(DIGITAL_TWIN_DIR, file), 'utf-8').catch(() => '');
+ const title = extractTitle(content) || file.replace('.md', '');
+ const category = inferCategory(file);
+ const version = extractVersion(content);
+
+ meta.documents.push({
+ id: generateId(),
+ filename: file,
+ title,
+ category,
+ version,
+ enabled: true,
+ priority: getPriorityForFile(file),
+ weight: 5 // Default weight
+ });
+ }
+
+ // Sort by priority
+ meta.documents.sort((a, b) => a.priority - b.priority);
+
+ return meta;
+}
+
+function extractTitle(content) {
+ const match = content.match(/^#\s+(.+)/m);
+ return match ? match[1].trim() : null;
+}
+
+function extractVersion(content) {
+ const match = content.match(/\*\*Version:\*\*\s*([\d.]+)/);
+ return match ? match[1] : null;
+}
+
+function inferCategory(filename) {
+ const upper = filename.toUpperCase();
+
+ // Audio/Music
+ if (upper.startsWith('AUDIO') || upper.includes('MUSIC')) return 'audio';
+
+ // Behavioral tests
+ if (upper.includes('BEHAVIORAL') || upper.includes('TEST_SUITE')) return 'behavioral';
+
+ // Entertainment (movies, books, TV, games)
+ if (upper.includes('MOVIE') || upper.includes('FILM') || upper.includes('BOOK') ||
+ upper.includes('TV') || upper.includes('GAME') || upper.includes('ENTERTAINMENT')) return 'entertainment';
+
+ // Professional
+ if (upper.includes('CAREER') || upper.includes('SKILL') || upper.includes('WORK') ||
+ upper.includes('PROFESSIONAL')) return 'professional';
+
+ // Lifestyle
+ if (upper.includes('ROUTINE') || upper.includes('HABIT') || upper.includes('HEALTH') ||
+ upper.includes('LIFESTYLE') || upper.includes('DAILY')) return 'lifestyle';
+
+ // Social
+ if (upper.includes('SOCIAL') || upper.includes('COMMUNICATION') ||
+ upper.includes('RELATIONSHIP')) return 'social';
+
+ // Creative
+ if (upper.includes('AESTHETIC') || upper.includes('CREATIVE') || upper.includes('ART') ||
+ upper.includes('DESIGN')) return 'creative';
+
+ // Enrichment (generic enrichment outputs)
+ if (['MEMORIES.md', 'FAVORITES.md', 'PREFERENCES.md'].includes(filename)) return 'enrichment';
+
+ // Default to core identity
+ return 'core';
+}
+
+function getPriorityForFile(filename) {
+ const priorities = {
+ 'SOUL.md': 1,
+ 'Expanded.md': 2,
+ 'BEHAVIORAL_TEST_SUITE.md': 100
+ };
+ return priorities[filename] || 50;
+}
+
+export async function saveMeta(meta) {
+ await ensureSoulDir();
+ await writeFile(META_FILE, JSON.stringify(meta, null, 2));
+ cache.meta.data = meta;
+ cache.meta.timestamp = Date.now();
+ soulEvents.emit('meta:changed', meta);
+}
+
+export async function updateMeta(updates) {
+ const meta = await loadMeta();
+ const updated = { ...meta, ...updates };
+ await saveMeta(updated);
+ return updated;
+}
+
+export async function updateSettings(settings) {
+ const meta = await loadMeta();
+ meta.settings = { ...meta.settings, ...settings };
+ await saveMeta(meta);
+ return meta.settings;
+}
+
+// =============================================================================
+// DOCUMENT OPERATIONS
+// =============================================================================
+
+export async function getDocuments() {
+ const meta = await loadMeta();
+ const documents = [];
+
+ for (const doc of meta.documents) {
+ const filePath = join(DIGITAL_TWIN_DIR, doc.filename);
+ const exists = existsSync(filePath);
+
+ if (exists) {
+ const stats = await stat(filePath);
+ documents.push({
+ ...doc,
+ lastModified: stats.mtime.toISOString(),
+ size: stats.size
+ });
+ }
+ }
+
+ return documents;
+}
+
+export async function getDocumentById(id) {
+ const meta = await loadMeta();
+ const docMeta = meta.documents.find(d => d.id === id);
+
+ if (!docMeta) return null;
+
+ const filePath = join(DIGITAL_TWIN_DIR, docMeta.filename);
+ if (!existsSync(filePath)) return null;
+
+ const content = await readFile(filePath, 'utf-8');
+ const stats = await stat(filePath);
+
+ return {
+ ...docMeta,
+ content,
+ lastModified: stats.mtime.toISOString(),
+ size: stats.size
+ };
+}
+
+export async function createDocument(data) {
+ await ensureSoulDir();
+
+ const meta = await loadMeta();
+ const filePath = join(DIGITAL_TWIN_DIR, data.filename);
+
+ // Check if file already exists
+ if (existsSync(filePath)) {
+ throw new Error(`Document ${data.filename} already exists`);
+ }
+
+ // Write the file
+ await writeFile(filePath, data.content);
+
+ // Add to meta
+ const docMeta = {
+ id: generateId(),
+ filename: data.filename,
+ title: data.title,
+ category: data.category,
+ version: extractVersion(data.content),
+ enabled: data.enabled !== false,
+ priority: data.priority || 50,
+ weight: data.weight || 5
+ };
+
+ meta.documents.push(docMeta);
+ meta.documents.sort((a, b) => a.priority - b.priority);
+ await saveMeta(meta);
+
+ console.log(`๐งฌ Created soul document: ${data.filename}`);
+ return { ...docMeta, content: data.content };
+}
+
+export async function updateDocument(id, updates) {
+ const meta = await loadMeta();
+ const docIndex = meta.documents.findIndex(d => d.id === id);
+
+ if (docIndex === -1) return null;
+
+ const docMeta = meta.documents[docIndex];
+ const filePath = join(DIGITAL_TWIN_DIR, docMeta.filename);
+
+ // Update file content if provided
+ if (updates.content) {
+ await writeFile(filePath, updates.content);
+ docMeta.version = extractVersion(updates.content);
+ }
+
+ // Update metadata
+ if (updates.title) docMeta.title = updates.title;
+ if (updates.enabled !== undefined) docMeta.enabled = updates.enabled;
+ if (updates.priority !== undefined) {
+ docMeta.priority = updates.priority;
+ meta.documents.sort((a, b) => a.priority - b.priority);
+ }
+ if (updates.weight !== undefined) docMeta.weight = updates.weight;
+
+ meta.documents[docIndex] = docMeta;
+ await saveMeta(meta);
+
+ console.log(`๐งฌ Updated soul document: ${docMeta.filename}`);
+ return await getDocumentById(id);
+}
+
+export async function deleteDocument(id) {
+ const meta = await loadMeta();
+ const docIndex = meta.documents.findIndex(d => d.id === id);
+
+ if (docIndex === -1) return false;
+
+ const docMeta = meta.documents[docIndex];
+ const filePath = join(DIGITAL_TWIN_DIR, docMeta.filename);
+
+ // Delete file
+ if (existsSync(filePath)) {
+ await unlink(filePath);
+ }
+
+ // Remove from meta
+ meta.documents.splice(docIndex, 1);
+ await saveMeta(meta);
+
+ console.log(`๐งฌ Deleted soul document: ${docMeta.filename}`);
+ return true;
+}
+
+// =============================================================================
+// BEHAVIORAL TESTING
+// =============================================================================
+
+export async function parseTestSuite() {
+ if (cache.tests.data && (Date.now() - cache.tests.timestamp) < CACHE_TTL_MS) {
+ return cache.tests.data;
+ }
+
+ const testFile = join(DIGITAL_TWIN_DIR, 'BEHAVIORAL_TEST_SUITE.md');
+ if (!existsSync(testFile)) {
+ return [];
+ }
+
+ const content = await readFile(testFile, 'utf-8');
+ const tests = [];
+
+ // Parse test blocks using regex
+ const testPattern = /### Test (\d+): (.+?)\n\n\*\*Prompt\*\*\s*\n([\s\S]*?)\n\n\*\*Expected Behavior\*\*\s*\n([\s\S]*?)\n\n\*\*Failure Signals\*\*\s*\n([\s\S]*?)(?=\n---|\n### Test|\n## |$)/g;
+
+ let match;
+ while ((match = testPattern.exec(content)) !== null) {
+ tests.push({
+ testId: parseInt(match[1]),
+ testName: match[2].trim(),
+ prompt: match[3].trim().replace(/^"|"$/g, ''),
+ expectedBehavior: match[4].trim(),
+ failureSignals: match[5].trim()
+ });
+ }
+
+ cache.tests.data = tests;
+ cache.tests.timestamp = Date.now();
+
+ return tests;
+}
+
+export async function runTests(providerId, model, testIds = null) {
+ const tests = await parseTestSuite();
+ const soulContext = await getSoulForPrompt();
+
+ const provider = await getProviderById(providerId);
+ if (!provider || !provider.enabled) {
+ throw new Error(`Provider ${providerId} not found or disabled`);
+ }
+
+ // Filter tests if specific IDs provided
+ const testsToRun = testIds
+ ? tests.filter(t => testIds.includes(t.testId))
+ : tests;
+
+ const results = [];
+ let passed = 0, failed = 0, partial = 0;
+
+ for (const test of testsToRun) {
+ const result = await runSingleTest(test, soulContext, providerId, model);
+ results.push(result);
+
+ if (result.result === 'passed') passed++;
+ else if (result.result === 'failed') failed++;
+ else if (result.result === 'partial') partial++;
+ }
+
+ // Save to history
+ const historyEntry = {
+ runId: generateId(),
+ providerId,
+ model,
+ score: testsToRun.length > 0 ? (passed + partial * 0.5) / testsToRun.length : 0,
+ passed,
+ failed,
+ partial,
+ total: testsToRun.length,
+ timestamp: now()
+ };
+
+ const meta = await loadMeta();
+ meta.testHistory.unshift(historyEntry);
+ meta.testHistory = meta.testHistory.slice(0, 50); // Keep last 50 runs
+ await saveMeta(meta);
+
+ console.log(`๐งฌ Test run complete: ${passed}/${testsToRun.length} passed`);
+
+ return {
+ ...historyEntry,
+ results
+ };
+}
+
+async function runSingleTest(test, soulContext, providerId, model) {
+ const provider = await getProviderById(providerId);
+
+ // Build the prompt with soul context as system prompt
+ const systemPrompt = `You are embodying the following identity. Respond as this person would, based on the soul document below:\n\n${soulContext}`;
+
+ let response = '';
+
+ if (provider.type === 'api') {
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) {
+ headers['Authorization'] = `Bearer ${provider.apiKey}`;
+ }
+
+ const apiResponse = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model,
+ messages: [
+ { role: 'system', content: systemPrompt },
+ { role: 'user', content: test.prompt }
+ ],
+ temperature: 0.7,
+ max_tokens: 1000
+ })
+ });
+
+ if (!apiResponse.ok) {
+ throw new Error(`API error: ${apiResponse.status}`);
+ }
+
+ const data = await apiResponse.json();
+ response = data.choices?.[0]?.message?.content || '';
+ } else {
+ // For CLI providers, combine system prompt with user prompt
+ const { spawn } = await import('child_process');
+ const combinedPrompt = `${systemPrompt}\n\nUser: ${test.prompt}`;
+
+ response = await new Promise((resolve, reject) => {
+ const args = [...(provider.args || []), combinedPrompt];
+ let output = '';
+
+ const child = spawn(provider.command, args, {
+ env: { ...process.env, ...provider.envVars },
+ shell: false
+ });
+
+ child.stdout.on('data', (data) => { output += data.toString(); });
+ child.stderr.on('data', (data) => { output += data.toString(); });
+ child.on('close', () => resolve(output));
+ child.on('error', reject);
+
+ setTimeout(() => { child.kill(); reject(new Error('Timeout')); }, 60000);
+ });
+ }
+
+ // Score the response
+ const scoring = await scoreTestResponse(test, response, providerId, model);
+
+ return {
+ testId: test.testId,
+ testName: test.testName,
+ prompt: test.prompt,
+ expectedBehavior: test.expectedBehavior,
+ failureSignals: test.failureSignals,
+ response,
+ result: scoring.result,
+ reasoning: scoring.reasoning
+ };
+}
+
+async function scoreTestResponse(test, response, providerId, model) {
+ // Use AI to score the response
+ const prompt = await buildPrompt('soul-test-scorer', {
+ testName: test.testName,
+ prompt: test.prompt,
+ expectedBehavior: test.expectedBehavior,
+ failureSignals: test.failureSignals,
+ response: response.substring(0, 2000) // Truncate for scoring
+ }).catch(() => null);
+
+ if (!prompt) {
+ // Fallback: simple keyword matching
+ const hasFailureSignals = test.failureSignals.toLowerCase().split('\n')
+ .some(signal => response.toLowerCase().includes(signal.trim().slice(2)));
+
+ return {
+ result: hasFailureSignals ? 'failed' : 'passed',
+ reasoning: 'Automated keyword matching (prompt template unavailable)'
+ };
+ }
+
+ const provider = await getProviderById(providerId);
+
+ if (provider.type === 'api') {
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) {
+ headers['Authorization'] = `Bearer ${provider.apiKey}`;
+ }
+
+ const apiResponse = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.1,
+ max_tokens: 500
+ })
+ });
+
+ if (apiResponse.ok) {
+ const data = await apiResponse.json();
+ const scoringResponse = data.choices?.[0]?.message?.content || '';
+ return parseScoreResponse(scoringResponse);
+ }
+ }
+
+ // Default fallback
+ return { result: 'partial', reasoning: 'Unable to score - defaulting to partial' };
+}
+
+function parseScoreResponse(response) {
+ const lower = response.toLowerCase();
+
+ let result = 'partial';
+ if (lower.includes('"result": "passed"') || lower.includes('result: passed')) {
+ result = 'passed';
+ } else if (lower.includes('"result": "failed"') || lower.includes('result: failed')) {
+ result = 'failed';
+ }
+
+ // Extract reasoning
+ const reasoningMatch = response.match(/"reasoning":\s*"([^"]+)"/);
+ const reasoning = reasoningMatch ? reasoningMatch[1] : response.substring(0, 200);
+
+ return { result, reasoning };
+}
+
+export async function getTestHistory(limit = 10) {
+ const meta = await loadMeta();
+ return meta.testHistory.slice(0, limit);
+}
+
+// =============================================================================
+// ENRICHMENT
+// =============================================================================
+
+export function getEnrichmentCategories() {
+ return Object.entries(ENRICHMENT_CATEGORIES).map(([key, config]) => ({
+ id: key,
+ label: config.label,
+ description: config.description,
+ targetDoc: config.targetDoc,
+ sampleQuestions: config.questions.length,
+ // List-based category config
+ listBased: config.listBased || false,
+ itemLabel: config.itemLabel,
+ itemPlaceholder: config.itemPlaceholder,
+ notePlaceholder: config.notePlaceholder
+ }));
+}
+
+export async function generateEnrichmentQuestion(category, providerOverride, modelOverride) {
+ const config = ENRICHMENT_CATEGORIES[category];
+ if (!config) {
+ throw new Error(`Unknown enrichment category: ${category}`);
+ }
+
+ const meta = await loadMeta();
+ const questionsAnswered = meta.enrichment.questionsAnswered?.[category] || 0;
+
+ // Use predefined questions first
+ if (questionsAnswered < config.questions.length) {
+ return {
+ questionId: generateId(),
+ category,
+ question: config.questions[questionsAnswered],
+ isGenerated: false,
+ questionNumber: questionsAnswered + 1,
+ totalQuestions: config.questions.length
+ };
+ }
+
+ // Generate follow-up question using AI
+ const existingSoul = await getSoulForPrompt({ maxTokens: 2000 });
+
+ const prompt = await buildPrompt('soul-enrichment', {
+ category,
+ categoryLabel: config.label,
+ categoryDescription: config.description,
+ existingSoul,
+ questionsAnswered
+ }).catch(() => null);
+
+ if (!prompt) {
+ return {
+ questionId: generateId(),
+ category,
+ question: config.questions[0], // Fallback to first question
+ isGenerated: false,
+ questionNumber: questionsAnswered + 1,
+ totalQuestions: config.questions.length
+ };
+ }
+
+ const provider = providerOverride
+ ? await getProviderById(providerOverride)
+ : await getActiveProvider();
+
+ if (!provider) {
+ throw new Error('No AI provider available');
+ }
+
+ const model = modelOverride || provider.defaultModel;
+
+ let question = config.questions[0]; // Fallback
+
+ if (provider.type === 'api') {
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) headers['Authorization'] = `Bearer ${provider.apiKey}`;
+
+ const response = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.8,
+ max_tokens: 200
+ })
+ });
+
+ if (response.ok) {
+ const data = await response.json();
+ question = data.choices?.[0]?.message?.content?.trim() || question;
+ }
+ }
+
+ return {
+ questionId: generateId(),
+ category,
+ question,
+ isGenerated: true,
+ questionNumber: questionsAnswered + 1,
+ totalQuestions: null // Unlimited for generated questions
+ };
+}
+
+export async function processEnrichmentAnswer(data) {
+ const { category, question, answer, providerOverride, modelOverride } = data;
+ const config = ENRICHMENT_CATEGORIES[category];
+
+ if (!config) {
+ throw new Error(`Unknown enrichment category: ${category}`);
+ }
+
+ // Generate content to add to the target document
+ const provider = providerOverride
+ ? await getProviderById(providerOverride)
+ : await getActiveProvider();
+
+ let formattedContent = `### ${question}\n\n${answer}\n\n`;
+
+ if (provider) {
+ const prompt = await buildPrompt('soul-enrichment-process', {
+ category,
+ categoryLabel: config.label,
+ question,
+ answer
+ }).catch(() => null);
+
+ if (prompt && provider.type === 'api') {
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) headers['Authorization'] = `Bearer ${provider.apiKey}`;
+
+ const response = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model: providerOverride || provider.defaultModel,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.3,
+ max_tokens: 500
+ })
+ });
+
+ if (response.ok) {
+ const respData = await response.json();
+ formattedContent = respData.choices?.[0]?.message?.content?.trim() || formattedContent;
+ }
+ }
+ }
+
+ // Append to target document
+ const targetPath = join(DIGITAL_TWIN_DIR, config.targetDoc);
+ let existingContent = '';
+
+ if (existsSync(targetPath)) {
+ existingContent = await readFile(targetPath, 'utf-8');
+ } else {
+ existingContent = `# ${config.label}\n\n`;
+ }
+
+ await writeFile(targetPath, existingContent + '\n' + formattedContent);
+
+ // Update meta
+ const meta = await loadMeta();
+ if (!meta.enrichment.questionsAnswered) {
+ meta.enrichment.questionsAnswered = {};
+ }
+ meta.enrichment.questionsAnswered[category] =
+ (meta.enrichment.questionsAnswered[category] || 0) + 1;
+ meta.enrichment.lastSession = now();
+
+ // Check if we've completed a category (3+ questions answered)
+ if (meta.enrichment.questionsAnswered[category] >= 3 &&
+ !meta.enrichment.completedCategories.includes(category)) {
+ meta.enrichment.completedCategories.push(category);
+ }
+
+ // Ensure document is in meta
+ const existingDoc = meta.documents.find(d => d.filename === config.targetDoc);
+ if (!existingDoc) {
+ meta.documents.push({
+ id: generateId(),
+ filename: config.targetDoc,
+ title: config.label,
+ category: config.targetCategory || 'enrichment',
+ enabled: true,
+ priority: 30
+ });
+ }
+
+ await saveMeta(meta);
+
+ console.log(`๐งฌ Enrichment answer processed for ${category}`);
+
+ return {
+ category,
+ targetDoc: config.targetDoc,
+ contentAdded: formattedContent
+ };
+}
+
+export async function getEnrichmentProgress() {
+ const meta = await loadMeta();
+ const categories = Object.keys(ENRICHMENT_CATEGORIES);
+
+ const progress = {};
+ for (const cat of categories) {
+ const answered = meta.enrichment.questionsAnswered?.[cat] || 0;
+ const baseQuestions = ENRICHMENT_CATEGORIES[cat].questions.length;
+ progress[cat] = {
+ answered,
+ baseQuestions,
+ completed: meta.enrichment.completedCategories.includes(cat),
+ percentage: Math.min(100, Math.round((answered / baseQuestions) * 100))
+ };
+ }
+
+ return {
+ categories: progress,
+ completedCount: meta.enrichment.completedCategories.length,
+ totalCategories: categories.length,
+ lastSession: meta.enrichment.lastSession
+ };
+}
+
+/**
+ * Analyze a list of items (books, movies, music) and generate document content
+ * @param {string} category - The enrichment category
+ * @param {Array} items - Array of { title, note } objects
+ * @param {string} providerId - Provider to use for analysis
+ * @param {string} model - Model to use
+ * @returns {Object} - { analysis, suggestedContent, items }
+ */
+export async function analyzeEnrichmentList(category, items, providerId, model) {
+ const config = ENRICHMENT_CATEGORIES[category];
+ if (!config) {
+ throw new Error(`Unknown enrichment category: ${category}`);
+ }
+
+ if (!config.listBased) {
+ throw new Error(`Category ${category} does not support list-based enrichment`);
+ }
+
+ if (!items || items.length === 0) {
+ throw new Error('No items provided');
+ }
+
+ const provider = await getProviderById(providerId);
+ if (!provider || !provider.enabled) {
+ throw new Error('Provider not found or disabled');
+ }
+
+ // Format items for the prompt
+ const itemsList = items.map((item, i) => {
+ let entry = `${i + 1}. ${item.title}`;
+ if (item.note) {
+ entry += `\n User's note: ${item.note}`;
+ }
+ return entry;
+ }).join('\n\n');
+
+ // Build the analysis prompt
+ const prompt = `You are analyzing someone's ${config.label.toLowerCase()} to understand their personality, values, and preferences.
+
+${config.analyzePrompt}
+
+## Items provided:
+
+${itemsList}
+
+## Your task:
+
+1. **Analysis**: For each item, briefly note what it might reveal about the person (themes, values, intellectual interests, emotional patterns).
+
+2. **Patterns**: Identify 3-5 overarching patterns or themes across all choices.
+
+3. **Personality Insights**: What does this collection suggest about the person's:
+ - Intellectual interests and curiosities
+ - Values and worldview
+ - Aesthetic preferences
+ - Emotional landscape
+
+4. **Generate Document**: Create a markdown document for ${config.targetDoc} that captures these insights in a format useful for an AI digital twin.
+
+Respond in JSON format:
+\`\`\`json
+{
+ "itemAnalysis": [
+ { "title": "...", "insights": "..." }
+ ],
+ "patterns": ["pattern 1", "pattern 2", ...],
+ "personalityInsights": {
+ "intellectualInterests": "...",
+ "valuesWorldview": "...",
+ "aestheticPreferences": "...",
+ "emotionalLandscape": "..."
+ },
+ "suggestedDocument": "# ${config.label}\\n\\n..."
+}
+\`\`\``;
+
+ if (provider.type !== 'api') {
+ throw new Error('List analysis requires an API provider');
+ }
+
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) {
+ headers['Authorization'] = `Bearer ${provider.apiKey}`;
+ }
+
+ const response = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.7,
+ max_tokens: 3000
+ })
+ });
+
+ if (!response.ok) {
+ throw new Error(`API error: ${response.status}`);
+ }
+
+ const data = await response.json();
+ const responseText = data.choices?.[0]?.message?.content || '';
+
+ // Parse the JSON response
+ const jsonMatch = responseText.match(/```json\s*([\s\S]*?)\s*```/);
+ if (jsonMatch) {
+ const parsed = JSON.parse(jsonMatch[1]);
+ return {
+ category,
+ items,
+ itemAnalysis: parsed.itemAnalysis || [],
+ patterns: parsed.patterns || [],
+ personalityInsights: parsed.personalityInsights || {},
+ suggestedDocument: parsed.suggestedDocument || '',
+ targetDoc: config.targetDoc,
+ targetCategory: config.targetCategory
+ };
+ }
+
+ // Fallback if JSON parsing fails
+ return {
+ category,
+ items,
+ rawResponse: responseText,
+ suggestedDocument: responseText,
+ targetDoc: config.targetDoc,
+ targetCategory: config.targetCategory
+ };
+}
+
+/**
+ * Save analyzed list content to document
+ */
+export async function saveEnrichmentListDocument(category, content, items) {
+ const config = ENRICHMENT_CATEGORIES[category];
+ if (!config) {
+ throw new Error(`Unknown enrichment category: ${category}`);
+ }
+
+ await ensureSoulDir();
+
+ const targetPath = join(DIGITAL_TWIN_DIR, config.targetDoc);
+ await writeFile(targetPath, content);
+
+ // Update meta
+ const meta = await loadMeta();
+
+ // Mark as completed since they provided a full list
+ if (!meta.enrichment.completedCategories.includes(category)) {
+ meta.enrichment.completedCategories.push(category);
+ }
+
+ // Store the list items for future reference/editing
+ if (!meta.enrichment.listItems) {
+ meta.enrichment.listItems = {};
+ }
+ meta.enrichment.listItems[category] = items;
+ meta.enrichment.lastSession = now();
+
+ // Ensure document is in meta
+ const existingDoc = meta.documents.find(d => d.filename === config.targetDoc);
+ if (!existingDoc) {
+ meta.documents.push({
+ id: generateId(),
+ filename: config.targetDoc,
+ title: config.label,
+ category: config.targetCategory || 'enrichment',
+ enabled: true,
+ priority: 30
+ });
+ }
+
+ await saveMeta(meta);
+
+ console.log(`๐งฌ Saved list-based enrichment for ${category} (${items.length} items)`);
+
+ return {
+ category,
+ targetDoc: config.targetDoc,
+ itemCount: items.length
+ };
+}
+
+/**
+ * Get previously saved list items for a category
+ */
+export async function getEnrichmentListItems(category) {
+ const meta = await loadMeta();
+ return meta.enrichment.listItems?.[category] || [];
+}
+
+// =============================================================================
+// EXPORT
+// =============================================================================
+
+export function getExportFormats() {
+ return [
+ { id: 'system_prompt', label: 'System Prompt', description: 'Combined markdown for direct injection' },
+ { id: 'claude_md', label: 'CLAUDE.md', description: 'Format for Claude Code integration' },
+ { id: 'json', label: 'JSON', description: 'Structured JSON for API integration' },
+ { id: 'individual', label: 'Individual Files', description: 'Separate files for each document' }
+ ];
+}
+
+export async function exportDigitalTwin(format, documentIds = null, includeDisabled = false) {
+ const meta = await loadMeta();
+ let docs = meta.documents;
+
+ // Filter by IDs if provided
+ if (documentIds) {
+ docs = docs.filter(d => documentIds.includes(d.id));
+ }
+
+ // Filter disabled unless explicitly included
+ if (!includeDisabled) {
+ docs = docs.filter(d => d.enabled);
+ }
+
+ // Exclude behavioral test suite from exports
+ docs = docs.filter(d => d.category !== 'behavioral');
+
+ // Sort by priority
+ docs.sort((a, b) => a.priority - b.priority);
+
+ // Load content for each document
+ const documentsWithContent = [];
+ for (const doc of docs) {
+ const filePath = join(DIGITAL_TWIN_DIR, doc.filename);
+ if (existsSync(filePath)) {
+ const content = await readFile(filePath, 'utf-8');
+ documentsWithContent.push({ ...doc, content });
+ }
+ }
+
+ switch (format) {
+ case 'system_prompt':
+ return exportAsSystemPrompt(documentsWithContent);
+ case 'claude_md':
+ return exportAsClaudeMd(documentsWithContent);
+ case 'json':
+ return exportAsJson(documentsWithContent);
+ case 'individual':
+ return exportAsIndividual(documentsWithContent);
+ default:
+ throw new Error(`Unknown export format: ${format}`);
+ }
+}
+
+function exportAsSystemPrompt(docs) {
+ let output = '# User Identity & Persona (Soul)\n\n';
+ output += 'The following describes the identity, values, and preferences of the user you are assisting. ';
+ output += 'Use this context to align your responses with their communication style, values, and goals.\n\n';
+ output += '---\n\n';
+
+ for (const doc of docs) {
+ output += doc.content + '\n\n---\n\n';
+ }
+
+ return {
+ format: 'system_prompt',
+ content: output.trim(),
+ documentCount: docs.length,
+ tokenEstimate: Math.ceil(output.length / 4)
+ };
+}
+
+function exportAsClaudeMd(docs) {
+ let output = '# Soul - User Identity\n\n';
+ output += '> This section defines the identity, values, and preferences of the user.\n\n';
+
+ for (const doc of docs) {
+ // Remove the main header from each doc to avoid duplication
+ const content = doc.content.replace(/^#\s+.+\n+/, '');
+ output += `## ${doc.title}\n\n${content}\n\n`;
+ }
+
+ return {
+ format: 'claude_md',
+ content: output.trim(),
+ documentCount: docs.length,
+ tokenEstimate: Math.ceil(output.length / 4)
+ };
+}
+
+function exportAsJson(docs) {
+ const structured = {
+ version: '1.0.0',
+ exportedAt: now(),
+ documents: docs.map(doc => ({
+ id: doc.id,
+ title: doc.title,
+ category: doc.category,
+ content: doc.content
+ })),
+ metadata: {
+ totalDocuments: docs.length,
+ categories: [...new Set(docs.map(d => d.category))]
+ }
+ };
+
+ const jsonString = JSON.stringify(structured, null, 2);
+
+ return {
+ format: 'json',
+ content: jsonString,
+ documentCount: docs.length,
+ tokenEstimate: Math.ceil(jsonString.length / 4)
+ };
+}
+export const exportSoul = exportDigitalTwin; // Alias for backwards compatibility
+
+function exportAsIndividual(docs) {
+ return {
+ format: 'individual',
+ files: docs.map(doc => ({
+ filename: doc.filename,
+ title: doc.title,
+ category: doc.category,
+ content: doc.content
+ })),
+ documentCount: docs.length,
+ tokenEstimate: docs.reduce((sum, d) => sum + Math.ceil(d.content.length / 4), 0)
+ };
+}
+
+// =============================================================================
+// COS INTEGRATION
+// =============================================================================
+
+export async function getDigitalTwinForPrompt(options = {}) {
+ const { maxTokens = 4000 } = options;
+ const meta = await loadMeta();
+
+ if (!meta.settings.autoInjectToCoS) {
+ return '';
+ }
+
+ // Get enabled documents sorted by weight (desc) then priority (asc)
+ // Higher weight = more important = included first
+ const docs = meta.documents
+ .filter(d => d.enabled && d.category !== 'behavioral')
+ .sort((a, b) => {
+ const weightA = a.weight || 5;
+ const weightB = b.weight || 5;
+ if (weightB !== weightA) return weightB - weightA; // Higher weight first
+ return a.priority - b.priority; // Then by priority
+ });
+
+ let output = '';
+ let tokenCount = 0;
+ const maxChars = maxTokens * 4; // Rough char-to-token estimate
+
+ for (const doc of docs) {
+ const filePath = join(DIGITAL_TWIN_DIR, doc.filename);
+ if (!existsSync(filePath)) continue;
+
+ const content = await readFile(filePath, 'utf-8');
+
+ if (tokenCount + content.length > maxChars) {
+ // Truncate if we're over budget
+ const remaining = maxChars - tokenCount;
+ if (remaining > 500) {
+ output += content.substring(0, remaining) + '\n\n[Truncated due to token limit]\n';
+ }
+ break;
+ }
+
+ output += content + '\n\n---\n\n';
+ tokenCount += content.length;
+ }
+
+ return output.trim();
+}
+export const getSoulForPrompt = getDigitalTwinForPrompt; // Alias for backwards compatibility
+
+// =============================================================================
+// STATUS & SUMMARY
+// =============================================================================
+
+export async function getDigitalTwinStatus() {
+ const meta = await loadMeta();
+ const documents = await getDocuments();
+ const testHistory = meta.testHistory.slice(0, 5);
+ const enrichmentProgress = await getEnrichmentProgress();
+
+ // Calculate health score
+ const docScore = Math.min(1, documents.filter(d => d.enabled).length / 5);
+ const testScore = testHistory.length > 0 ? testHistory[0].score : 0;
+ const enrichScore = enrichmentProgress.completedCount / enrichmentProgress.totalCategories;
+
+ const healthScore = Math.round(((docScore + testScore + enrichScore) / 3) * 100);
+
+ return {
+ healthScore,
+ documentCount: documents.length,
+ enabledDocuments: documents.filter(d => d.enabled).length,
+ documentsByCategory: {
+ core: documents.filter(d => d.category === 'core').length,
+ audio: documents.filter(d => d.category === 'audio').length,
+ behavioral: documents.filter(d => d.category === 'behavioral').length,
+ enrichment: documents.filter(d => d.category === 'enrichment').length
+ },
+ lastTestRun: testHistory[0] || null,
+ enrichmentProgress: {
+ completedCategories: enrichmentProgress.completedCount,
+ totalCategories: enrichmentProgress.totalCategories
+ },
+ settings: meta.settings
+ };
+}
+export const getSoulStatus = getDigitalTwinStatus; // Alias for backwards compatibility
+
+// =============================================================================
+// VALIDATION & ANALYSIS
+// =============================================================================
+
+// Required sections for a complete digital twin
+const REQUIRED_SECTIONS = [
+ {
+ id: 'identity',
+ label: 'Identity Basics',
+ description: 'Name, role, and one-liner description',
+ keywords: ['name', 'role', 'who i am', 'identity', 'about me'],
+ suggestedEnrichment: null,
+ suggestedDoc: 'SOUL.md'
+ },
+ {
+ id: 'values',
+ label: 'Core Values',
+ description: 'At least 3 clearly defined principles',
+ keywords: ['values', 'principles', 'believe', 'important to me'],
+ suggestedEnrichment: 'values',
+ suggestedDoc: 'VALUES.md'
+ },
+ {
+ id: 'communication',
+ label: 'Communication Style',
+ description: 'How you prefer to give and receive information',
+ keywords: ['communication', 'prefer', 'feedback', 'style', 'tone'],
+ suggestedEnrichment: 'communication',
+ suggestedDoc: 'COMMUNICATION.md'
+ },
+ {
+ id: 'decision_making',
+ label: 'Decision Making',
+ description: 'How you approach choices and uncertainty',
+ keywords: ['decision', 'choose', 'uncertainty', 'risk', 'intuition'],
+ suggestedEnrichment: 'decision_heuristics',
+ suggestedDoc: 'DECISION_HEURISTICS.md'
+ },
+ {
+ id: 'non_negotiables',
+ label: 'Non-Negotiables',
+ description: 'Principles and boundaries you never compromise',
+ keywords: ['non-negotiable', 'never', 'boundary', 'refuse', 'limit'],
+ suggestedEnrichment: 'non_negotiables',
+ suggestedDoc: 'NON_NEGOTIABLES.md'
+ },
+ {
+ id: 'error_intolerance',
+ label: 'Error Intolerance',
+ description: 'What your digital twin should never do',
+ keywords: ['never do', 'irritate', 'annoy', 'hate', 'worst'],
+ suggestedEnrichment: 'error_intolerance',
+ suggestedDoc: 'ERROR_INTOLERANCE.md'
+ }
+];
+
+export async function validateCompleteness() {
+ const documents = await getDocuments();
+ const enabledDocs = documents.filter(d => d.enabled && d.category !== 'behavioral');
+
+ // Load content for all enabled documents
+ const contents = [];
+ for (const doc of enabledDocs) {
+ const filePath = join(DIGITAL_TWIN_DIR, doc.filename);
+ if (existsSync(filePath)) {
+ const content = await readFile(filePath, 'utf-8');
+ contents.push({ doc, content: content.toLowerCase() });
+ }
+ }
+
+ const allContent = contents.map(c => c.content).join('\n');
+ const found = [];
+ const missing = [];
+
+ for (const section of REQUIRED_SECTIONS) {
+ const hasKeywords = section.keywords.some(kw => allContent.includes(kw.toLowerCase()));
+ const hasDoc = enabledDocs.some(d =>
+ d.filename.toLowerCase().includes(section.id.replace('_', '')) ||
+ d.title.toLowerCase().includes(section.label.toLowerCase())
+ );
+
+ if (hasKeywords || hasDoc) {
+ found.push(section.id);
+ } else {
+ missing.push({
+ id: section.id,
+ label: section.label,
+ description: section.description,
+ suggestion: section.suggestedEnrichment
+ ? `Answer questions in the "${ENRICHMENT_CATEGORIES[section.suggestedEnrichment]?.label}" enrichment category`
+ : `Create a ${section.suggestedDoc} document`,
+ enrichmentCategory: section.suggestedEnrichment
+ });
+ }
+ }
+
+ const score = Math.round((found.length / REQUIRED_SECTIONS.length) * 100);
+
+ return {
+ score,
+ total: REQUIRED_SECTIONS.length,
+ found: found.length,
+ missing,
+ suggestions: missing.map(m => m.suggestion)
+ };
+}
+
+export async function detectContradictions(providerId, model) {
+ const documents = await getDocuments();
+ const enabledDocs = documents.filter(d => d.enabled && d.category !== 'behavioral');
+
+ if (enabledDocs.length < 2) {
+ return { issues: [], message: 'Need at least 2 documents to detect contradictions' };
+ }
+
+ // Load all document contents
+ let combinedContent = '';
+ for (const doc of enabledDocs) {
+ const filePath = join(DIGITAL_TWIN_DIR, doc.filename);
+ if (existsSync(filePath)) {
+ const content = await readFile(filePath, 'utf-8');
+ combinedContent += `\n\n## Document: ${doc.filename}\n\n${content}`;
+ }
+ }
+
+ // Build the prompt
+ const prompt = await buildPrompt('soul-contradiction-detector', {
+ soulContent: combinedContent.substring(0, 15000) // Limit to avoid token limits
+ }).catch(() => null);
+
+ if (!prompt) {
+ return { issues: [], error: 'Contradiction detector prompt template not found' };
+ }
+
+ const provider = await getProviderById(providerId);
+ if (!provider || !provider.enabled) {
+ return { issues: [], error: 'Provider not found or disabled' };
+ }
+
+ if (provider.type === 'api') {
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) {
+ headers['Authorization'] = `Bearer ${provider.apiKey}`;
+ }
+
+ const response = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.3,
+ max_tokens: 2000
+ })
+ });
+
+ if (response.ok) {
+ const data = await response.json();
+ const responseText = data.choices?.[0]?.message?.content || '';
+ return parseContradictionResponse(responseText);
+ }
+ }
+
+ return { issues: [], error: 'Failed to analyze contradictions' };
+}
+
+function parseContradictionResponse(response) {
+ // Try to extract JSON from the response
+ const jsonMatch = response.match(/```json\s*([\s\S]*?)\s*```/);
+ if (jsonMatch) {
+ const parsed = JSON.parse(jsonMatch[1]);
+ return { issues: parsed.issues || [], summary: parsed.summary };
+ }
+
+ // Fallback: try direct JSON parse
+ if (response.trim().startsWith('{') || response.trim().startsWith('[')) {
+ const parsed = JSON.parse(response);
+ return { issues: parsed.issues || parsed || [], summary: parsed.summary };
+ }
+
+ return { issues: [], rawResponse: response };
+}
+
+export async function generateDynamicTests(providerId, model) {
+ const soulContent = await getSoulForPrompt({ maxTokens: 8000 });
+
+ if (!soulContent || soulContent.length < 100) {
+ return { tests: [], error: 'Insufficient soul content to generate tests' };
+ }
+
+ const prompt = await buildPrompt('soul-test-generator', {
+ soulContent
+ }).catch(() => null);
+
+ if (!prompt) {
+ return { tests: [], error: 'Test generator prompt template not found' };
+ }
+
+ const provider = await getProviderById(providerId);
+ if (!provider || !provider.enabled) {
+ return { tests: [], error: 'Provider not found or disabled' };
+ }
+
+ if (provider.type === 'api') {
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) {
+ headers['Authorization'] = `Bearer ${provider.apiKey}`;
+ }
+
+ const response = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.7,
+ max_tokens: 3000
+ })
+ });
+
+ if (response.ok) {
+ const data = await response.json();
+ const responseText = data.choices?.[0]?.message?.content || '';
+ return parseGeneratedTests(responseText);
+ }
+ }
+
+ return { tests: [], error: 'Failed to generate tests' };
+}
+
+function parseGeneratedTests(response) {
+ // Try to extract JSON from the response
+ const jsonMatch = response.match(/```json\s*([\s\S]*?)\s*```/);
+ if (jsonMatch) {
+ const parsed = JSON.parse(jsonMatch[1]);
+ return { tests: parsed.tests || parsed || [] };
+ }
+
+ // Fallback: try direct JSON parse
+ if (response.trim().startsWith('{') || response.trim().startsWith('[')) {
+ const parsed = JSON.parse(response);
+ return { tests: parsed.tests || parsed || [] };
+ }
+
+ return { tests: [], rawResponse: response };
+}
+
+export async function analyzeWritingSamples(samples, providerId, model) {
+ if (!samples || samples.length === 0) {
+ return { error: 'No writing samples provided' };
+ }
+
+ const combinedSamples = samples.map((s, i) => `--- Sample ${i + 1} ---\n${s}`).join('\n\n');
+
+ const prompt = await buildPrompt('soul-writing-analyzer', {
+ samples: combinedSamples
+ }).catch(() => null);
+
+ if (!prompt) {
+ return { error: 'Writing analyzer prompt template not found' };
+ }
+
+ const provider = await getProviderById(providerId);
+ if (!provider || !provider.enabled) {
+ return { error: 'Provider not found or disabled' };
+ }
+
+ if (provider.type === 'api') {
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) {
+ headers['Authorization'] = `Bearer ${provider.apiKey}`;
+ }
+
+ const response = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.5,
+ max_tokens: 2000
+ })
+ });
+
+ if (response.ok) {
+ const data = await response.json();
+ const responseText = data.choices?.[0]?.message?.content || '';
+ return parseWritingAnalysis(responseText);
+ }
+ }
+
+ return { error: 'Failed to analyze writing samples' };
+}
+
+function parseWritingAnalysis(response) {
+ // Try to extract JSON
+ const jsonMatch = response.match(/```json\s*([\s\S]*?)\s*```/);
+ if (jsonMatch) {
+ const parsed = JSON.parse(jsonMatch[1]);
+ return {
+ analysis: parsed.analysis || parsed,
+ suggestedContent: parsed.suggestedContent || parsed.document || ''
+ };
+ }
+
+ // Extract markdown content for document if present
+ const mdMatch = response.match(/```markdown\s*([\s\S]*?)\s*```/);
+ const suggestedContent = mdMatch ? mdMatch[1] : '';
+
+ return {
+ analysis: { rawResponse: response },
+ suggestedContent
+ };
+}
+
+// =============================================================================
+// TRAIT ANALYSIS & CONFIDENCE SCORING (Phase 1 & 2)
+// =============================================================================
+
+/**
+ * Get all twin content for analysis (excludes behavioral tests)
+ */
+async function getAllTwinContent() {
+ const meta = await loadMeta();
+ const enabledDocs = meta.documents.filter(d => d.enabled && d.category !== 'behavioral');
+
+ const contents = [];
+ for (const doc of enabledDocs) {
+ const filePath = join(DIGITAL_TWIN_DIR, doc.filename);
+ if (existsSync(filePath)) {
+ const content = await readFile(filePath, 'utf-8');
+ contents.push(`## ${doc.title} (${doc.filename})\n\n${content}`);
+ }
+ }
+
+ return contents.join('\n\n---\n\n');
+}
+
+/**
+ * Get current traits from meta
+ */
+export async function getTraits() {
+ const meta = await loadMeta();
+ return meta.traits || null;
+}
+
+/**
+ * Update traits manually (partial update)
+ */
+export async function updateTraits(updates) {
+ const meta = await loadMeta();
+ const currentTraits = meta.traits || {};
+
+ const newTraits = {
+ ...currentTraits,
+ lastAnalyzed: new Date().toISOString(),
+ analysisVersion: 'manual'
+ };
+
+ // Merge Big Five if provided
+ if (updates.bigFive) {
+ newTraits.bigFive = { ...currentTraits.bigFive, ...updates.bigFive };
+ }
+
+ // Replace values hierarchy if provided
+ if (updates.valuesHierarchy) {
+ newTraits.valuesHierarchy = updates.valuesHierarchy;
+ }
+
+ // Merge communication profile if provided
+ if (updates.communicationProfile) {
+ newTraits.communicationProfile = {
+ ...currentTraits.communicationProfile,
+ ...updates.communicationProfile
+ };
+ }
+
+ meta.traits = newTraits;
+ await saveMeta(meta);
+ digitalTwinEvents.emit('traits:updated', newTraits);
+
+ return newTraits;
+}
+
+/**
+ * Analyze digital twin documents to extract personality traits
+ */
+export async function analyzeTraits(providerId, model, forceReanalyze = false) {
+ const meta = await loadMeta();
+
+ // Check if we have recent analysis and don't need to reanalyze
+ if (!forceReanalyze && meta.traits?.lastAnalyzed) {
+ const lastAnalyzed = new Date(meta.traits.lastAnalyzed);
+ const hoursSince = (Date.now() - lastAnalyzed.getTime()) / (1000 * 60 * 60);
+ if (hoursSince < 24) {
+ return { traits: meta.traits, cached: true };
+ }
+ }
+
+ const twinContent = await getAllTwinContent();
+ if (!twinContent || twinContent.length < 100) {
+ return { error: 'Not enough digital twin content to analyze. Add more documents first.' };
+ }
+
+ const prompt = await buildPrompt('twin-trait-extractor', {
+ twinContent
+ }).catch(() => null);
+
+ if (!prompt) {
+ return { error: 'Trait extractor prompt template not found' };
+ }
+
+ const provider = await getProviderById(providerId);
+ if (!provider || !provider.enabled) {
+ return { error: 'Provider not found or disabled' };
+ }
+
+ if (provider.type === 'api') {
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) {
+ headers['Authorization'] = `Bearer ${provider.apiKey}`;
+ }
+
+ const response = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.3,
+ max_tokens: 3000
+ })
+ });
+
+ if (response.ok) {
+ const data = await response.json();
+ const responseText = data.choices?.[0]?.message?.content || '';
+ const parsedTraits = parseTraitsResponse(responseText);
+
+ if (parsedTraits.error) {
+ return parsedTraits;
+ }
+
+ // Save to meta
+ const traits = {
+ bigFive: parsedTraits.bigFive,
+ valuesHierarchy: parsedTraits.valuesHierarchy,
+ communicationProfile: parsedTraits.communicationProfile,
+ lastAnalyzed: new Date().toISOString(),
+ analysisVersion: '1.0'
+ };
+
+ meta.traits = traits;
+ await saveMeta(meta);
+ digitalTwinEvents.emit('traits:analyzed', traits);
+
+ return { traits, analysisNotes: parsedTraits.analysisNotes };
+ }
+
+ return { error: `Provider request failed: ${response.status}` };
+ }
+
+ return { error: 'Provider type not supported for trait analysis' };
+}
+
+function parseTraitsResponse(response) {
+ const jsonMatch = response.match(/```json\s*([\s\S]*?)\s*```/);
+ if (jsonMatch) {
+ const parsed = JSON.parse(jsonMatch[1]);
+ return parsed;
+ }
+
+ if (response.trim().startsWith('{')) {
+ return JSON.parse(response);
+ }
+
+ return { error: 'Failed to parse traits response', rawResponse: response };
+}
+
+/**
+ * Get current confidence scores from meta
+ */
+export async function getConfidence() {
+ const meta = await loadMeta();
+ return meta.confidence || null;
+}
+
+/**
+ * Calculate confidence scores for all personality dimensions
+ */
+export async function calculateConfidence(providerId, model) {
+ const twinContent = await getAllTwinContent();
+ const meta = await loadMeta();
+ const currentTraits = meta.traits || {};
+
+ // If no provider specified, do local calculation
+ if (!providerId || !model) {
+ return calculateLocalConfidence(twinContent, currentTraits, meta);
+ }
+
+ const prompt = await buildPrompt('twin-confidence-analyzer', {
+ twinContent,
+ currentTraits: JSON.stringify(currentTraits, null, 2)
+ }).catch(() => null);
+
+ if (!prompt) {
+ // Fall back to local calculation
+ return calculateLocalConfidence(twinContent, currentTraits, meta);
+ }
+
+ const provider = await getProviderById(providerId);
+ if (!provider || !provider.enabled) {
+ return calculateLocalConfidence(twinContent, currentTraits, meta);
+ }
+
+ if (provider.type === 'api') {
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) {
+ headers['Authorization'] = `Bearer ${provider.apiKey}`;
+ }
+
+ const response = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.3,
+ max_tokens: 2000
+ })
+ });
+
+ if (response.ok) {
+ const data = await response.json();
+ const responseText = data.choices?.[0]?.message?.content || '';
+ const parsed = parseConfidenceResponse(responseText);
+
+ if (!parsed.error) {
+ const confidence = {
+ ...parsed,
+ lastCalculated: new Date().toISOString()
+ };
+
+ meta.confidence = confidence;
+ await saveMeta(meta);
+ digitalTwinEvents.emit('confidence:calculated', confidence);
+
+ return { confidence };
+ }
+ }
+ }
+
+ // Fall back to local calculation
+ return calculateLocalConfidence(twinContent, currentTraits, meta);
+}
+
+function parseConfidenceResponse(response) {
+ const jsonMatch = response.match(/```json\s*([\s\S]*?)\s*```/);
+ if (jsonMatch) {
+ return JSON.parse(jsonMatch[1]);
+ }
+
+ if (response.trim().startsWith('{')) {
+ return JSON.parse(response);
+ }
+
+ return { error: 'Failed to parse confidence response' };
+}
+
+/**
+ * Calculate confidence locally without LLM (simpler heuristic-based)
+ */
+async function calculateLocalConfidence(twinContent, traits, meta) {
+ const contentLower = twinContent.toLowerCase();
+ const documents = await getDocuments();
+ const enabledDocs = documents.filter(d => d.enabled && d.category !== 'behavioral');
+
+ // Evidence counts based on keyword presence and document existence
+ const dimensions = {
+ openness: calculateDimensionConfidence(contentLower, ['curious', 'creative', 'explore', 'novel', 'experiment', 'learn'], traits?.bigFive?.O),
+ conscientiousness: calculateDimensionConfidence(contentLower, ['organize', 'plan', 'discipline', 'routine', 'structure', 'systematic'], traits?.bigFive?.C),
+ extraversion: calculateDimensionConfidence(contentLower, ['social', 'energy', 'people', 'outgoing', 'network', 'collaborate'], traits?.bigFive?.E),
+ agreeableness: calculateDimensionConfidence(contentLower, ['empathy', 'cooperate', 'trust', 'kind', 'help', 'support'], traits?.bigFive?.A),
+ neuroticism: calculateDimensionConfidence(contentLower, ['stress', 'anxiety', 'emotion', 'worry', 'calm', 'stable'], traits?.bigFive?.N),
+ values: calculateDimensionConfidence(contentLower, ['value', 'principle', 'believe', 'important', 'priority', 'matter'], null, enabledDocs.some(d => d.filename.toLowerCase().includes('value'))),
+ communication: calculateDimensionConfidence(contentLower, ['communicate', 'prefer', 'feedback', 'tone', 'style', 'write'], null, enabledDocs.some(d => d.filename.toLowerCase().includes('communi') || d.filename.toLowerCase().includes('writing'))),
+ decision_making: calculateDimensionConfidence(contentLower, ['decision', 'choose', 'heuristic', 'rule', 'approach', 'consider'], null, enabledDocs.some(d => d.filename.toLowerCase().includes('decision'))),
+ boundaries: calculateDimensionConfidence(contentLower, ['never', 'boundary', 'non-negotiable', 'refuse', 'limit', 'error'], null, enabledDocs.some(d => d.filename.toLowerCase().includes('non_negot') || d.filename.toLowerCase().includes('error'))),
+ identity: calculateDimensionConfidence(contentLower, ['name', 'who i am', 'identity', 'role', 'purpose', 'mission'], null, enabledDocs.some(d => d.filename.toLowerCase().includes('soul') || d.category === 'core'))
+ };
+
+ // Calculate overall
+ const scores = Object.values(dimensions);
+ const overall = scores.reduce((a, b) => a + b, 0) / scores.length;
+
+ // Generate gaps for low-confidence dimensions
+ const gaps = generateGapRecommendations(dimensions);
+
+ const confidence = {
+ overall: Math.round(overall * 100) / 100,
+ dimensions,
+ gaps,
+ lastCalculated: new Date().toISOString()
+ };
+
+ meta.confidence = confidence;
+ await saveMeta(meta);
+ digitalTwinEvents.emit('confidence:calculated', confidence);
+
+ return { confidence, method: 'local' };
+}
+
+function calculateDimensionConfidence(content, keywords, existingScore, hasDocument = false) {
+ let score = 0;
+
+ // Keyword evidence (up to 0.5)
+ const keywordHits = keywords.filter(k => content.includes(k)).length;
+ score += Math.min(0.5, keywordHits * 0.1);
+
+ // Document existence bonus (0.2)
+ if (hasDocument) score += 0.2;
+
+ // Existing trait score bonus (0.3)
+ if (existingScore !== undefined && existingScore !== null) score += 0.3;
+
+ return Math.min(1, Math.round(score * 100) / 100);
+}
+
+function generateGapRecommendations(dimensions) {
+ const gaps = [];
+ const threshold = 0.6;
+
+ const dimensionConfig = {
+ openness: {
+ suggestedCategory: 'personality_assessments',
+ questions: [
+ 'How do you typically react to new ideas or unconventional approaches?',
+ 'What topics or subjects consistently spark your curiosity?',
+ 'How comfortable are you with ambiguity and uncertainty?'
+ ]
+ },
+ conscientiousness: {
+ suggestedCategory: 'daily_routines',
+ questions: [
+ 'Describe your typical approach to planning and organization.',
+ 'How do you handle deadlines and commitments?',
+ 'What systems or routines keep you productive?'
+ ]
+ },
+ extraversion: {
+ suggestedCategory: 'communication',
+ questions: [
+ 'How do you prefer to spend your free time - with others or alone?',
+ 'In group settings, do you tend to lead conversations or observe?',
+ 'Where do you get your energy from - social interaction or solitude?'
+ ]
+ },
+ agreeableness: {
+ suggestedCategory: 'values',
+ questions: [
+ 'How do you typically handle disagreements with others?',
+ 'What role does empathy play in your decision-making?',
+ 'How do you balance your needs with the needs of others?'
+ ]
+ },
+ neuroticism: {
+ suggestedCategory: 'personality_assessments',
+ questions: [
+ 'How do you typically respond to unexpected setbacks or failures?',
+ 'What situations tend to make you feel anxious or stressed?',
+ 'How would others describe your emotional stability?'
+ ]
+ },
+ values: {
+ suggestedCategory: 'values',
+ questions: [
+ 'What principles guide your most important decisions?',
+ 'Which values would you never compromise, even under pressure?',
+ 'What do you want to be known for?'
+ ]
+ },
+ communication: {
+ suggestedCategory: 'communication',
+ questions: [
+ 'How do you prefer to receive feedback - direct or diplomatic?',
+ 'What communication styles do you find most effective?',
+ 'How would you describe your writing voice?'
+ ]
+ },
+ decision_making: {
+ suggestedCategory: 'decision_heuristics',
+ questions: [
+ 'What mental shortcuts or rules of thumb guide your choices?',
+ 'How do you balance intuition vs. analysis in decisions?',
+ 'What factors do you prioritize when making important choices?'
+ ]
+ },
+ boundaries: {
+ suggestedCategory: 'non_negotiables',
+ questions: [
+ 'What behaviors or requests would you always refuse?',
+ 'What principles are absolutely non-negotiable for you?',
+ 'What should your digital twin never do or say?'
+ ]
+ },
+ identity: {
+ suggestedCategory: 'core_memories',
+ questions: [
+ 'How would you introduce yourself in one sentence?',
+ 'What makes you uniquely you?',
+ 'What is your core purpose or mission?'
+ ]
+ }
+ };
+
+ for (const [dimension, score] of Object.entries(dimensions)) {
+ if (score < threshold) {
+ const config = dimensionConfig[dimension];
+ gaps.push({
+ dimension,
+ confidence: score,
+ evidenceCount: Math.round(score * 5),
+ requiredEvidence: 5,
+ suggestedQuestions: config?.questions || [],
+ suggestedCategory: config?.suggestedCategory || 'core_memories'
+ });
+ }
+ }
+
+ // Sort by confidence (lowest first)
+ gaps.sort((a, b) => a.confidence - b.confidence);
+
+ return gaps;
+}
+
+/**
+ * Get gap recommendations (prioritized list of what to enrich)
+ */
+export async function getGapRecommendations() {
+ const meta = await loadMeta();
+
+ // If no confidence data, calculate it first
+ if (!meta.confidence) {
+ const result = await calculateConfidence();
+ return result.confidence?.gaps || [];
+ }
+
+ return meta.confidence.gaps || [];
+}
+
+// =============================================================================
+// EXTERNAL DATA IMPORT (Phase 4)
+// =============================================================================
+
+/**
+ * Parse Goodreads CSV export
+ * CSV columns: Book Id, Title, Author, Author l-f, Additional Authors, ISBN, ISBN13,
+ * My Rating, Average Rating, Publisher, Binding, Number of Pages, Year Published,
+ * Original Publication Year, Date Read, Date Added, Bookshelves, Bookshelves with positions,
+ * Exclusive Shelf, My Review, Spoiler, Private Notes, Read Count, Owned Copies
+ */
+function parseGoodreadsCSV(csvData) {
+ const lines = csvData.split('\n');
+ if (lines.length < 2) return [];
+
+ // Parse header to find column indices
+ const header = parseCSVLine(lines[0]);
+ const titleIdx = header.findIndex(h => h.toLowerCase() === 'title');
+ const authorIdx = header.findIndex(h => h.toLowerCase() === 'author');
+ const ratingIdx = header.findIndex(h => h.toLowerCase() === 'my rating');
+ const dateReadIdx = header.findIndex(h => h.toLowerCase() === 'date read');
+ const shelvesIdx = header.findIndex(h => h.toLowerCase() === 'bookshelves');
+ const reviewIdx = header.findIndex(h => h.toLowerCase() === 'my review');
+
+ const books = [];
+ for (let i = 1; i < lines.length; i++) {
+ if (!lines[i].trim()) continue;
+ const cols = parseCSVLine(lines[i]);
+
+ const rating = ratingIdx >= 0 ? parseInt(cols[ratingIdx]) : 0;
+ // Only include books that were actually read (have a rating > 0 or date read)
+ if (rating > 0 || (dateReadIdx >= 0 && cols[dateReadIdx])) {
+ books.push({
+ title: cols[titleIdx] || '',
+ author: cols[authorIdx] || '',
+ rating: rating || undefined,
+ dateRead: dateReadIdx >= 0 ? cols[dateReadIdx] : undefined,
+ shelves: shelvesIdx >= 0 && cols[shelvesIdx] ? cols[shelvesIdx].split(',').map(s => s.trim()) : [],
+ review: reviewIdx >= 0 ? cols[reviewIdx] : undefined
+ });
+ }
+ }
+
+ return books;
+}
+
+/**
+ * Parse a CSV line handling quoted fields
+ */
+function parseCSVLine(line) {
+ const result = [];
+ let current = '';
+ let inQuotes = false;
+
+ for (let i = 0; i < line.length; i++) {
+ const char = line[i];
+ if (char === '"') {
+ if (inQuotes && line[i + 1] === '"') {
+ current += '"';
+ i++;
+ } else {
+ inQuotes = !inQuotes;
+ }
+ } else if (char === ',' && !inQuotes) {
+ result.push(current.trim());
+ current = '';
+ } else {
+ current += char;
+ }
+ }
+ result.push(current.trim());
+
+ return result;
+}
+
+/**
+ * Parse Spotify extended streaming history JSON
+ * Spotify exports: endTime, artistName, trackName, msPlayed
+ */
+function parseSpotifyJSON(jsonData) {
+ const data = JSON.parse(jsonData);
+
+ // Handle both array format and object with streams array
+ const streams = Array.isArray(data) ? data : (data.streams || data);
+ if (!Array.isArray(streams)) return [];
+
+ // Aggregate by artist
+ const artistCounts = new Map();
+ const trackCounts = new Map();
+
+ for (const entry of streams) {
+ const artist = entry.artistName || entry.master_metadata_album_artist_name;
+ const track = entry.trackName || entry.master_metadata_track_name;
+ const msPlayed = entry.msPlayed || entry.ms_played || 0;
+
+ if (artist) {
+ const existing = artistCounts.get(artist) || { playCount: 0, msPlayed: 0 };
+ artistCounts.set(artist, {
+ playCount: existing.playCount + 1,
+ msPlayed: existing.msPlayed + msPlayed
+ });
+ }
+
+ if (track && artist) {
+ const key = `${track}|||${artist}`;
+ const existing = trackCounts.get(key) || { playCount: 0, msPlayed: 0 };
+ trackCounts.set(key, {
+ trackName: track,
+ artistName: artist,
+ playCount: existing.playCount + 1,
+ msPlayed: existing.msPlayed + msPlayed
+ });
+ }
+ }
+
+ // Return top artists and tracks
+ const topArtists = Array.from(artistCounts.entries())
+ .map(([name, data]) => ({ artistName: name, ...data }))
+ .sort((a, b) => b.msPlayed - a.msPlayed)
+ .slice(0, 50);
+
+ const topTracks = Array.from(trackCounts.values())
+ .sort((a, b) => b.playCount - a.playCount)
+ .slice(0, 50);
+
+ return { artists: topArtists, tracks: topTracks };
+}
+
+/**
+ * Parse Letterboxd CSV export
+ */
+function parseLetterboxdCSV(csvData) {
+ const lines = csvData.split('\n');
+ if (lines.length < 2) return [];
+
+ const header = parseCSVLine(lines[0]);
+ const nameIdx = header.findIndex(h => h.toLowerCase().includes('name') || h.toLowerCase() === 'title');
+ const yearIdx = header.findIndex(h => h.toLowerCase() === 'year');
+ const ratingIdx = header.findIndex(h => h.toLowerCase() === 'rating');
+ const dateIdx = header.findIndex(h => h.toLowerCase().includes('watched'));
+ const reviewIdx = header.findIndex(h => h.toLowerCase() === 'review');
+ const tagsIdx = header.findIndex(h => h.toLowerCase() === 'tags');
+
+ const films = [];
+ for (let i = 1; i < lines.length; i++) {
+ if (!lines[i].trim()) continue;
+ const cols = parseCSVLine(lines[i]);
+
+ films.push({
+ title: cols[nameIdx] || cols[0] || '',
+ year: yearIdx >= 0 && cols[yearIdx] ? parseInt(cols[yearIdx]) : undefined,
+ rating: ratingIdx >= 0 && cols[ratingIdx] ? parseFloat(cols[ratingIdx]) : undefined,
+ watchedDate: dateIdx >= 0 ? cols[dateIdx] : undefined,
+ review: reviewIdx >= 0 ? cols[reviewIdx] : undefined,
+ tags: tagsIdx >= 0 && cols[tagsIdx] ? cols[tagsIdx].split(',').map(t => t.trim()) : []
+ });
+ }
+
+ return films.filter(f => f.title);
+}
+
+/**
+ * Parse iCal/ICS calendar file
+ */
+function parseICalData(icsData) {
+ const events = [];
+ const eventBlocks = icsData.split('BEGIN:VEVENT');
+
+ for (let i = 1; i < eventBlocks.length; i++) {
+ const block = eventBlocks[i].split('END:VEVENT')[0];
+ const event = {};
+
+ const summaryMatch = block.match(/SUMMARY[^:]*:(.+?)(?:\r?\n(?![^\r\n])|\r?\n[A-Z])/s);
+ if (summaryMatch) event.summary = summaryMatch[1].replace(/\r?\n\s/g, '').trim();
+
+ const startMatch = block.match(/DTSTART[^:]*:(\d{8}T?\d{0,6})/);
+ if (startMatch) event.start = startMatch[1];
+
+ const endMatch = block.match(/DTEND[^:]*:(\d{8}T?\d{0,6})/);
+ if (endMatch) event.end = endMatch[1];
+
+ const rruleMatch = block.match(/RRULE:/);
+ event.recurring = !!rruleMatch;
+
+ const categoriesMatch = block.match(/CATEGORIES[^:]*:(.+?)(?:\r?\n[A-Z])/s);
+ if (categoriesMatch) {
+ event.categories = categoriesMatch[1].split(',').map(c => c.trim());
+ }
+
+ if (event.summary) {
+ events.push(event);
+ }
+ }
+
+ return events;
+}
+
+/**
+ * Analyze imported data and extract personality insights
+ */
+export async function analyzeImportedData(source, rawData, providerId, model) {
+ let parsedData;
+ let dataDescription;
+
+ // Parse based on source
+ switch (source) {
+ case 'goodreads': {
+ parsedData = parseGoodreadsCSV(rawData);
+ if (parsedData.length === 0) {
+ return { error: 'No books found in Goodreads export. Make sure you exported your library.' };
+ }
+ const topRated = parsedData.filter(b => b.rating >= 4).slice(0, 20);
+ const authors = [...new Set(parsedData.map(b => b.author).filter(Boolean))].slice(0, 20);
+ const shelves = [...new Set(parsedData.flatMap(b => b.shelves || []))].slice(0, 15);
+ dataDescription = `Reading History (${parsedData.length} books):\n` +
+ `Top-rated books: ${topRated.map(b => `"${b.title}" by ${b.author} (${b.rating}/5)`).join(', ')}\n` +
+ `Favorite authors: ${authors.join(', ')}\n` +
+ `Shelves/genres: ${shelves.join(', ')}\n` +
+ `Sample reviews: ${parsedData.filter(b => b.review).slice(0, 3).map(b => `"${b.title}": ${b.review?.substring(0, 200)}...`).join('\n')}`;
+ break;
+ }
+
+ case 'spotify': {
+ parsedData = parseSpotifyJSON(rawData);
+ if (!parsedData.artists || parsedData.artists.length === 0) {
+ return { error: 'No listening data found in Spotify export.' };
+ }
+ const topArtists = parsedData.artists.slice(0, 15);
+ const topTracks = parsedData.tracks?.slice(0, 15) || [];
+ const totalHours = Math.round(topArtists.reduce((sum, a) => sum + a.msPlayed, 0) / 3600000);
+ dataDescription = `Listening History (${totalHours} hours tracked):\n` +
+ `Top artists: ${topArtists.map(a => `${a.artistName} (${Math.round(a.msPlayed / 60000)} min)`).join(', ')}\n` +
+ `Top tracks: ${topTracks.map(t => `"${t.trackName}" by ${t.artistName}`).join(', ')}`;
+ break;
+ }
+
+ case 'letterboxd': {
+ parsedData = parseLetterboxdCSV(rawData);
+ if (parsedData.length === 0) {
+ return { error: 'No films found in Letterboxd export.' };
+ }
+ const topRated = parsedData.filter(f => f.rating >= 4).slice(0, 20);
+ const tags = [...new Set(parsedData.flatMap(f => f.tags || []))].slice(0, 15);
+ dataDescription = `Film History (${parsedData.length} films):\n` +
+ `Top-rated films: ${topRated.map(f => `"${f.title}" (${f.year}) - ${f.rating}/5`).join(', ')}\n` +
+ `Tags/themes: ${tags.join(', ')}\n` +
+ `Sample reviews: ${parsedData.filter(f => f.review).slice(0, 3).map(f => `"${f.title}": ${f.review?.substring(0, 200)}...`).join('\n')}`;
+ break;
+ }
+
+ case 'ical': {
+ parsedData = parseICalData(rawData);
+ if (parsedData.length === 0) {
+ return { error: 'No events found in calendar export.' };
+ }
+ const recurring = parsedData.filter(e => e.recurring);
+ const categories = [...new Set(parsedData.flatMap(e => e.categories || []))];
+ const eventTypes = {};
+ parsedData.forEach(e => {
+ const type = categorizeEvent(e.summary);
+ eventTypes[type] = (eventTypes[type] || 0) + 1;
+ });
+ dataDescription = `Calendar Analysis (${parsedData.length} events, ${recurring.length} recurring):\n` +
+ `Event types: ${Object.entries(eventTypes).map(([k, v]) => `${k}: ${v}`).join(', ')}\n` +
+ `Categories: ${categories.join(', ')}\n` +
+ `Recurring commitments: ${recurring.slice(0, 10).map(e => e.summary).join(', ')}`;
+ break;
+ }
+
+ default:
+ return { error: `Unknown import source: ${source}` };
+ }
+
+ // Build analysis prompt
+ const prompt = await buildPrompt('twin-import-analyzer', {
+ source,
+ dataDescription,
+ itemCount: Array.isArray(parsedData) ? parsedData.length : (parsedData.artists?.length || 0)
+ }).catch(() => null);
+
+ if (!prompt) {
+ // Fallback to inline prompt
+ const fallbackPrompt = buildImportAnalyzerPrompt(source, dataDescription);
+ return analyzeWithPrompt(fallbackPrompt, providerId, model, source, parsedData);
+ }
+
+ return analyzeWithPrompt(prompt, providerId, model, source, parsedData);
+}
+
+/**
+ * Categorize calendar event by its summary
+ */
+function categorizeEvent(summary) {
+ const lower = (summary || '').toLowerCase();
+ if (lower.includes('meeting') || lower.includes('call') || lower.includes('sync')) return 'work';
+ if (lower.includes('gym') || lower.includes('workout') || lower.includes('run') || lower.includes('yoga')) return 'fitness';
+ if (lower.includes('doctor') || lower.includes('dentist') || lower.includes('appointment')) return 'health';
+ if (lower.includes('dinner') || lower.includes('lunch') || lower.includes('coffee')) return 'social';
+ if (lower.includes('class') || lower.includes('lesson') || lower.includes('course')) return 'learning';
+ if (lower.includes('travel') || lower.includes('flight') || lower.includes('trip')) return 'travel';
+ return 'other';
+}
+
+/**
+ * Build fallback prompt for import analysis
+ */
+function buildImportAnalyzerPrompt(source, dataDescription) {
+ const sourceLabels = {
+ goodreads: 'reading history',
+ spotify: 'music listening history',
+ letterboxd: 'film watching history',
+ ical: 'calendar/schedule patterns'
+ };
+
+ return `Analyze this ${sourceLabels[source] || source} data to understand the person's personality, values, and interests.
+
+## Data
+${dataDescription}
+
+## Analysis Instructions
+Based on this data, infer:
+
+1. **Personality Traits (Big Five)**: What does their ${sourceLabels[source]} suggest about their Openness, Conscientiousness, Extraversion, Agreeableness, and Neuroticism? Provide estimates from 0.0 to 1.0.
+
+2. **Values**: What values seem important to this person based on their choices?
+
+3. **Interests & Themes**: What topics, genres, or themes do they gravitate toward?
+
+4. **Patterns**: Any notable patterns in their behavior (e.g., variety vs. consistency, niche vs. mainstream)?
+
+5. **Suggested Document Content**: Write a short markdown document summarizing key insights about their ${sourceLabels[source]} preferences.
+
+## Output Format
+Respond with JSON only:
+
+\`\`\`json
+{
+ "insights": {
+ "patterns": ["pattern 1", "pattern 2"],
+ "preferences": ["preference 1", "preference 2"],
+ "personalityInferences": {
+ "bigFive": { "O": 0.7, "C": 0.6, "E": 0.5, "A": 0.6, "N": 0.4 },
+ "values": ["value1", "value2"],
+ "interests": ["interest1", "interest2"]
+ }
+ },
+ "suggestedDocuments": [
+ {
+ "filename": "READING_PROFILE.md",
+ "title": "Reading Profile",
+ "category": "entertainment",
+ "content": "# Reading Profile\\n\\nMarkdown content here..."
+ }
+ ],
+ "rawSummary": "2-3 sentence summary of what this data reveals about the person"
+}
+\`\`\``;
+}
+
+/**
+ * Send prompt to AI and parse response
+ */
+async function analyzeWithPrompt(prompt, providerId, model, source, parsedData) {
+ const provider = await getProviderById(providerId);
+ if (!provider || !provider.enabled) {
+ return { error: 'Provider not found or disabled' };
+ }
+
+ if (provider.type === 'api') {
+ const headers = { 'Content-Type': 'application/json' };
+ if (provider.apiKey) {
+ headers['Authorization'] = `Bearer ${provider.apiKey}`;
+ }
+
+ const response = await fetch(`${provider.endpoint}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model,
+ messages: [{ role: 'user', content: prompt }],
+ temperature: 0.4,
+ max_tokens: 3000
+ })
+ });
+
+ if (response.ok) {
+ const data = await response.json();
+ const responseText = data.choices?.[0]?.message?.content || '';
+ return parseImportAnalysisResponse(responseText, source, parsedData);
+ }
+
+ return { error: `Provider request failed: ${response.status}` };
+ }
+
+ return { error: 'Provider type not supported' };
+}
+
+/**
+ * Parse AI response for import analysis
+ */
+function parseImportAnalysisResponse(response, source, parsedData) {
+ const jsonMatch = response.match(/```json\s*([\s\S]*?)\s*```/);
+ if (jsonMatch) {
+ const parsed = JSON.parse(jsonMatch[1]);
+ return {
+ source,
+ itemCount: Array.isArray(parsedData) ? parsedData.length : (parsedData.artists?.length || 0),
+ ...parsed
+ };
+ }
+
+ if (response.trim().startsWith('{')) {
+ const parsed = JSON.parse(response);
+ return {
+ source,
+ itemCount: Array.isArray(parsedData) ? parsedData.length : (parsedData.artists?.length || 0),
+ ...parsed
+ };
+ }
+
+ return {
+ source,
+ itemCount: Array.isArray(parsedData) ? parsedData.length : (parsedData.artists?.length || 0),
+ insights: { patterns: [], preferences: [] },
+ rawSummary: response
+ };
+}
+
+/**
+ * Save imported analysis as a document
+ */
+export async function saveImportAsDocument(source, suggestedDoc) {
+ const { filename, title, category, content } = suggestedDoc;
+
+ // Check if document already exists
+ const meta = await loadMeta();
+ const existingDoc = meta.documents.find(d => d.filename === filename);
+
+ if (existingDoc) {
+ // Update existing document
+ return updateDocument(existingDoc.id, { content, title });
+ }
+
+ // Create new document
+ return createDocument({
+ filename,
+ title,
+ category,
+ content,
+ enabled: true,
+ priority: 5
+ });
+}
+
+/**
+ * Get list of supported import sources
+ */
+export function getImportSources() {
+ return [
+ {
+ id: 'goodreads',
+ name: 'Goodreads',
+ description: 'Import your reading history to analyze literary preferences and themes',
+ format: 'CSV',
+ instructions: 'Go to My Books > Import/Export > Export Library. Download the CSV file.'
+ },
+ {
+ id: 'spotify',
+ name: 'Spotify',
+ description: 'Import listening history to analyze music preferences and emotional patterns',
+ format: 'JSON',
+ instructions: 'Go to Account > Privacy Settings > Download your data. Request "Extended streaming history". Extract the JSON files.'
+ },
+ {
+ id: 'letterboxd',
+ name: 'Letterboxd',
+ description: 'Import film diary to analyze viewing preferences and aesthetic tastes',
+ format: 'CSV',
+ instructions: 'Go to Settings > Import & Export > Export Your Data. Download the diary.csv or films.csv.'
+ },
+ {
+ id: 'ical',
+ name: 'Calendar (iCal)',
+ description: 'Import calendar to analyze routine patterns and time allocation',
+ format: 'ICS',
+ instructions: 'Export your calendar as .ics file from Google Calendar, Apple Calendar, or Outlook.'
+ }
+ ];
+}
diff --git a/server/services/errorRecovery.js b/server/services/errorRecovery.js
new file mode 100644
index 0000000..cf4f48a
--- /dev/null
+++ b/server/services/errorRecovery.js
@@ -0,0 +1,515 @@
+/**
+ * Error Recovery Service
+ *
+ * Analyzes errors and selects appropriate recovery strategies.
+ * Provides structured error handling for agent operations.
+ */
+
+import { cosEvents } from './cosEvents.js'
+
+// Recovery strategies
+const STRATEGIES = {
+ RETRY: 'retry', // Simple retry with backoff
+ ESCALATE: 'escalate', // Use a more powerful model
+ FALLBACK: 'fallback', // Use fallback provider
+ DECOMPOSE: 'decompose', // Break task into smaller parts
+ DEFER: 'defer', // Reschedule for later
+ INVESTIGATE: 'investigate', // Create investigation task
+ SKIP: 'skip', // Skip and move on
+ MANUAL: 'manual' // Require human intervention
+}
+
+// Error categories for pattern matching
+const ERROR_PATTERNS = {
+ // Rate limiting
+ rateLimit: {
+ patterns: [
+ /rate.?limit/i,
+ /too many requests/i,
+ /429/,
+ /quota exceeded/i,
+ /throttl/i
+ ],
+ strategies: [STRATEGIES.DEFER, STRATEGIES.FALLBACK],
+ cooldownMs: 60000
+ },
+
+ // Authentication
+ auth: {
+ patterns: [
+ /unauthorized/i,
+ /authentication/i,
+ /invalid.*key/i,
+ /403/,
+ /401/,
+ /api.?key/i
+ ],
+ strategies: [STRATEGIES.FALLBACK, STRATEGIES.MANUAL],
+ cooldownMs: 0
+ },
+
+ // Model unavailable
+ modelUnavailable: {
+ patterns: [
+ /model.*not.*found/i,
+ /model.*unavailable/i,
+ /model.*overloaded/i,
+ /503/,
+ /capacity/i
+ ],
+ strategies: [STRATEGIES.FALLBACK, STRATEGIES.DEFER],
+ cooldownMs: 30000
+ },
+
+ // Context too long
+ contextLength: {
+ patterns: [
+ /context.*length/i,
+ /token.*limit/i,
+ /maximum.*tokens/i,
+ /too.*long/i,
+ /input.*too.*large/i
+ ],
+ strategies: [STRATEGIES.DECOMPOSE],
+ cooldownMs: 0
+ },
+
+ // Network issues
+ network: {
+ patterns: [
+ /network/i,
+ /timeout/i,
+ /ECONNREFUSED/,
+ /ETIMEDOUT/,
+ /ENOTFOUND/,
+ /connection.*reset/i,
+ /socket.*hang.*up/i
+ ],
+ strategies: [STRATEGIES.RETRY, STRATEGIES.DEFER],
+ cooldownMs: 5000
+ },
+
+ // Content filtering
+ contentFilter: {
+ patterns: [
+ /content.*filter/i,
+ /safety/i,
+ /refus/i,
+ /cannot.*help/i,
+ /inappropriate/i
+ ],
+ strategies: [STRATEGIES.INVESTIGATE, STRATEGIES.SKIP],
+ cooldownMs: 0
+ },
+
+ // Resource exhaustion
+ resource: {
+ patterns: [
+ /out of memory/i,
+ /memory.*limit/i,
+ /disk.*space/i,
+ /no.*space/i
+ ],
+ strategies: [STRATEGIES.DEFER, STRATEGIES.MANUAL],
+ cooldownMs: 300000
+ },
+
+ // Process errors
+ process: {
+ patterns: [
+ /process.*exit/i,
+ /killed/i,
+ /signal/i,
+ /zombie/i
+ ],
+ strategies: [STRATEGIES.RETRY, STRATEGIES.INVESTIGATE],
+ cooldownMs: 10000
+ }
+}
+
+// Recovery attempt tracking
+const recoveryAttempts = new Map()
+const MAX_RECOVERY_ATTEMPTS = 3
+const ATTEMPT_RESET_MS = 3600000 // 1 hour
+
+// Recovery history
+const recoveryHistory = []
+const MAX_HISTORY = 200
+
+/**
+ * Analyze an error to determine its category and recommended recovery
+ * @param {Error|Object} error - Error to analyze
+ * @param {Object} context - Additional context
+ * @returns {Object} - Error analysis result
+ */
+function analyzeError(error, context = {}) {
+ const errorMessage = error?.message || error?.error || String(error)
+ const errorCode = error?.code || error?.status
+
+ let category = 'unknown'
+ let patterns = []
+ let suggestedStrategies = [STRATEGIES.RETRY]
+ let cooldownMs = 0
+
+ // Match against known patterns
+ for (const [cat, config] of Object.entries(ERROR_PATTERNS)) {
+ for (const pattern of config.patterns) {
+ if (pattern.test(errorMessage) || (errorCode && pattern.test(String(errorCode)))) {
+ category = cat
+ patterns = config.patterns.map(p => p.source)
+ suggestedStrategies = config.strategies
+ cooldownMs = config.cooldownMs
+ break
+ }
+ }
+ if (category !== 'unknown') break
+ }
+
+ return {
+ category,
+ message: errorMessage.substring(0, 500),
+ code: errorCode,
+ matchedPatterns: patterns,
+ suggestedStrategies,
+ cooldownMs,
+ severity: getSeverity(category),
+ recoverable: suggestedStrategies[0] !== STRATEGIES.MANUAL,
+ context: {
+ taskId: context.taskId,
+ agentId: context.agentId,
+ provider: context.provider,
+ model: context.model
+ }
+ }
+}
+
+/**
+ * Get severity level for error category
+ * @param {string} category - Error category
+ * @returns {string} - Severity level
+ */
+function getSeverity(category) {
+ const severities = {
+ rateLimit: 'medium',
+ auth: 'high',
+ modelUnavailable: 'medium',
+ contextLength: 'low',
+ network: 'medium',
+ contentFilter: 'low',
+ resource: 'high',
+ process: 'medium',
+ unknown: 'medium'
+ }
+ return severities[category] || 'medium'
+}
+
+/**
+ * Select the best recovery strategy based on analysis and history
+ * @param {Object} analysis - Error analysis from analyzeError()
+ * @param {Object} options - Recovery options
+ * @returns {Object} - Selected strategy with parameters
+ */
+function selectRecoveryStrategy(analysis, options = {}) {
+ const { taskId, agentId, attemptNumber = 1 } = options
+
+ // Check if max attempts reached
+ const attemptKey = `${taskId || agentId || 'global'}`
+ const attempts = getAttemptCount(attemptKey)
+
+ if (attempts >= MAX_RECOVERY_ATTEMPTS) {
+ return {
+ strategy: STRATEGIES.MANUAL,
+ reason: 'Maximum recovery attempts exceeded',
+ params: { requiresApproval: true }
+ }
+ }
+
+ // Get first viable strategy
+ const strategy = analysis.suggestedStrategies[0] || STRATEGIES.RETRY
+
+ // Calculate backoff for retries
+ let params = {}
+ if (strategy === STRATEGIES.RETRY || strategy === STRATEGIES.DEFER) {
+ const baseDelay = analysis.cooldownMs || 5000
+ const backoffDelay = baseDelay * Math.pow(2, attempts)
+ params.delayMs = Math.min(backoffDelay, 300000) // Max 5 minutes
+ }
+
+ if (strategy === STRATEGIES.ESCALATE) {
+ params.suggestHeavyModel = true
+ }
+
+ if (strategy === STRATEGIES.DECOMPOSE) {
+ params.suggestSmallerContext = true
+ params.maxChunkSize = 2000
+ }
+
+ if (strategy === STRATEGIES.FALLBACK) {
+ params.useFallbackProvider = true
+ }
+
+ return {
+ strategy,
+ reason: `Error category: ${analysis.category}`,
+ params,
+ attemptNumber: attempts + 1,
+ maxAttempts: MAX_RECOVERY_ATTEMPTS
+ }
+}
+
+/**
+ * Get recovery attempt count for a key
+ * @param {string} key - Attempt tracking key
+ * @returns {number} - Current attempt count
+ */
+function getAttemptCount(key) {
+ const record = recoveryAttempts.get(key)
+ if (!record) return 0
+
+ // Reset if too old
+ if (Date.now() - record.lastAttempt > ATTEMPT_RESET_MS) {
+ recoveryAttempts.delete(key)
+ return 0
+ }
+
+ return record.count
+}
+
+/**
+ * Record a recovery attempt
+ * @param {string} key - Attempt tracking key
+ * @param {Object} data - Attempt data
+ */
+function recordAttempt(key, data = {}) {
+ const record = recoveryAttempts.get(key) || { count: 0, history: [] }
+
+ record.count++
+ record.lastAttempt = Date.now()
+ record.history.push({
+ timestamp: Date.now(),
+ strategy: data.strategy,
+ success: data.success
+ })
+
+ // Keep only last 5 attempts in history
+ if (record.history.length > 5) {
+ record.history.shift()
+ }
+
+ recoveryAttempts.set(key, record)
+}
+
+/**
+ * Execute a recovery strategy
+ * @param {string} strategy - Strategy name
+ * @param {Object} task - Original task
+ * @param {Object} error - Original error
+ * @param {Object} params - Strategy parameters
+ * @returns {Promise} - Recovery result
+ */
+async function executeRecovery(strategy, task, error, params = {}) {
+ const startTime = Date.now()
+ const attemptKey = task?.id || 'global'
+
+ recordAttempt(attemptKey, { strategy, success: null })
+
+ let result = { success: false, strategy, action: null }
+
+ switch (strategy) {
+ case STRATEGIES.RETRY:
+ if (params.delayMs) {
+ await new Promise(resolve => setTimeout(resolve, params.delayMs))
+ }
+ result = {
+ success: true,
+ strategy,
+ action: 'retry_now',
+ message: `Retry after ${params.delayMs}ms delay`
+ }
+ break
+
+ case STRATEGIES.DEFER:
+ result = {
+ success: true,
+ strategy,
+ action: 'reschedule',
+ rescheduleAfterMs: params.delayMs || 60000,
+ message: `Task rescheduled for ${params.delayMs}ms later`
+ }
+ break
+
+ case STRATEGIES.FALLBACK:
+ result = {
+ success: true,
+ strategy,
+ action: 'use_fallback',
+ useFallback: true,
+ message: 'Switching to fallback provider'
+ }
+ break
+
+ case STRATEGIES.ESCALATE:
+ result = {
+ success: true,
+ strategy,
+ action: 'escalate_model',
+ useHeavyModel: true,
+ message: 'Escalating to heavy model'
+ }
+ break
+
+ case STRATEGIES.DECOMPOSE:
+ result = {
+ success: true,
+ strategy,
+ action: 'decompose_task',
+ maxChunkSize: params.maxChunkSize || 2000,
+ message: 'Breaking task into smaller chunks'
+ }
+ break
+
+ case STRATEGIES.INVESTIGATE:
+ result = {
+ success: true,
+ strategy,
+ action: 'create_investigation',
+ createInvestigationTask: true,
+ originalError: error?.message,
+ message: 'Creating investigation task'
+ }
+ break
+
+ case STRATEGIES.SKIP:
+ result = {
+ success: true,
+ strategy,
+ action: 'skip_task',
+ skipped: true,
+ message: 'Task skipped due to unrecoverable error'
+ }
+ break
+
+ case STRATEGIES.MANUAL:
+ result = {
+ success: false,
+ strategy,
+ action: 'require_manual',
+ requiresManualIntervention: true,
+ message: 'Manual intervention required'
+ }
+ break
+
+ default:
+ result = {
+ success: false,
+ strategy,
+ action: 'unknown_strategy',
+ message: `Unknown strategy: ${strategy}`
+ }
+ }
+
+ // Update attempt record with result
+ const record = recoveryAttempts.get(attemptKey)
+ if (record && record.history.length > 0) {
+ record.history[record.history.length - 1].success = result.success
+ }
+
+ // Add to history
+ recoveryHistory.unshift({
+ timestamp: Date.now(),
+ taskId: task?.id,
+ errorCategory: error?.category || 'unknown',
+ strategy,
+ success: result.success,
+ durationMs: Date.now() - startTime
+ })
+
+ while (recoveryHistory.length > MAX_HISTORY) {
+ recoveryHistory.pop()
+ }
+
+ // Emit event
+ cosEvents.emit('recovery:executed', {
+ taskId: task?.id,
+ strategy,
+ success: result.success,
+ action: result.action
+ })
+
+ return result
+}
+
+/**
+ * Get recovery statistics
+ * @returns {Object} - Recovery stats
+ */
+function getStats() {
+ const recent = recoveryHistory.slice(0, 100)
+ const successCount = recent.filter(r => r.success).length
+
+ const byStrategy = {}
+ const byCategory = {}
+
+ for (const record of recent) {
+ byStrategy[record.strategy] = (byStrategy[record.strategy] || 0) + 1
+ byCategory[record.errorCategory] = (byCategory[record.errorCategory] || 0) + 1
+ }
+
+ return {
+ totalAttempts: recoveryHistory.length,
+ recentAttempts: recent.length,
+ successRate: recent.length > 0 ? ((successCount / recent.length) * 100).toFixed(1) + '%' : '0%',
+ byStrategy,
+ byCategory,
+ activeAttemptKeys: recoveryAttempts.size
+ }
+}
+
+/**
+ * Get recovery history
+ * @param {Object} options - Filter options
+ * @returns {Array} - Recovery history
+ */
+function getHistory(options = {}) {
+ let history = [...recoveryHistory]
+
+ if (options.strategy) {
+ history = history.filter(r => r.strategy === options.strategy)
+ }
+
+ if (options.success !== undefined) {
+ history = history.filter(r => r.success === options.success)
+ }
+
+ const limit = options.limit || 50
+ return history.slice(0, limit)
+}
+
+/**
+ * Reset attempt counter for a key
+ * @param {string} key - Attempt tracking key
+ */
+function resetAttempts(key) {
+ recoveryAttempts.delete(key)
+}
+
+/**
+ * Clear all attempt counters
+ */
+function clearAllAttempts() {
+ recoveryAttempts.clear()
+}
+
+export {
+ STRATEGIES,
+ ERROR_PATTERNS,
+ analyzeError,
+ selectRecoveryStrategy,
+ executeRecovery,
+ recordAttempt,
+ getAttemptCount,
+ getStats,
+ getHistory,
+ resetAttempts,
+ clearAllAttempts
+}
diff --git a/server/services/errorRecovery.test.js b/server/services/errorRecovery.test.js
new file mode 100644
index 0000000..2d726c8
--- /dev/null
+++ b/server/services/errorRecovery.test.js
@@ -0,0 +1,389 @@
+import { describe, it, expect, beforeEach, vi } from 'vitest';
+import {
+ STRATEGIES,
+ ERROR_PATTERNS,
+ analyzeError,
+ selectRecoveryStrategy,
+ executeRecovery,
+ recordAttempt,
+ getAttemptCount,
+ getStats,
+ getHistory,
+ resetAttempts,
+ clearAllAttempts
+} from './errorRecovery.js';
+
+// Mock the cosEvents
+vi.mock('./cos.js', () => ({
+ cosEvents: {
+ emit: vi.fn()
+ }
+}));
+
+describe('Error Recovery Service', () => {
+ beforeEach(() => {
+ clearAllAttempts();
+ });
+
+ describe('STRATEGIES', () => {
+ it('should have all required strategies', () => {
+ expect(STRATEGIES.RETRY).toBe('retry');
+ expect(STRATEGIES.ESCALATE).toBe('escalate');
+ expect(STRATEGIES.FALLBACK).toBe('fallback');
+ expect(STRATEGIES.DECOMPOSE).toBe('decompose');
+ expect(STRATEGIES.DEFER).toBe('defer');
+ expect(STRATEGIES.INVESTIGATE).toBe('investigate');
+ expect(STRATEGIES.SKIP).toBe('skip');
+ expect(STRATEGIES.MANUAL).toBe('manual');
+ });
+ });
+
+ describe('ERROR_PATTERNS', () => {
+ it('should have patterns for rate limiting', () => {
+ expect(ERROR_PATTERNS.rateLimit).toBeDefined();
+ expect(ERROR_PATTERNS.rateLimit.patterns.length).toBeGreaterThan(0);
+ });
+
+ it('should have patterns for auth errors', () => {
+ expect(ERROR_PATTERNS.auth).toBeDefined();
+ expect(ERROR_PATTERNS.auth.strategies).toContain(STRATEGIES.FALLBACK);
+ });
+
+ it('should have patterns for network errors', () => {
+ expect(ERROR_PATTERNS.network).toBeDefined();
+ expect(ERROR_PATTERNS.network.cooldownMs).toBe(5000);
+ });
+ });
+
+ describe('analyzeError', () => {
+ it('should categorize rate limit errors', () => {
+ const result = analyzeError({ message: 'Rate limit exceeded' });
+
+ expect(result.category).toBe('rateLimit');
+ expect(result.suggestedStrategies).toContain(STRATEGIES.DEFER);
+ });
+
+ it('should categorize auth errors', () => {
+ const result = analyzeError({ message: 'Unauthorized access' });
+
+ expect(result.category).toBe('auth');
+ expect(result.severity).toBe('high');
+ });
+
+ it('should categorize model unavailable errors', () => {
+ const result = analyzeError({ message: 'Model not found', code: 503 });
+
+ expect(result.category).toBe('modelUnavailable');
+ expect(result.suggestedStrategies).toContain(STRATEGIES.FALLBACK);
+ });
+
+ it('should categorize context length errors', () => {
+ const result = analyzeError({ message: 'Token limit exceeded' });
+
+ expect(result.category).toBe('contextLength');
+ expect(result.suggestedStrategies).toContain(STRATEGIES.DECOMPOSE);
+ });
+
+ it('should categorize network errors', () => {
+ const result = analyzeError({ message: 'ECONNREFUSED' });
+
+ expect(result.category).toBe('network');
+ expect(result.suggestedStrategies).toContain(STRATEGIES.RETRY);
+ });
+
+ it('should return unknown for unrecognized errors', () => {
+ const result = analyzeError({ message: 'Some random error' });
+
+ expect(result.category).toBe('unknown');
+ expect(result.suggestedStrategies).toContain(STRATEGIES.RETRY);
+ });
+
+ it('should include context when provided', () => {
+ const result = analyzeError(
+ { message: 'Error' },
+ { taskId: 'task-1', agentId: 'agent-1' }
+ );
+
+ expect(result.context.taskId).toBe('task-1');
+ expect(result.context.agentId).toBe('agent-1');
+ });
+
+ it('should truncate long error messages', () => {
+ const longMessage = 'x'.repeat(1000);
+ const result = analyzeError({ message: longMessage });
+
+ expect(result.message.length).toBeLessThanOrEqual(500);
+ });
+
+ it('should mark manual strategies as not recoverable', () => {
+ const result = analyzeError({ message: 'Unknown critical failure' });
+ // Most errors should be recoverable by default
+ expect(result.recoverable).toBe(true);
+ });
+ });
+
+ describe('selectRecoveryStrategy', () => {
+ it('should select first suggested strategy', () => {
+ const analysis = {
+ suggestedStrategies: [STRATEGIES.FALLBACK, STRATEGIES.DEFER],
+ cooldownMs: 5000
+ };
+
+ const result = selectRecoveryStrategy(analysis);
+
+ expect(result.strategy).toBe(STRATEGIES.FALLBACK);
+ });
+
+ it('should calculate backoff delay for retry', () => {
+ const analysis = {
+ suggestedStrategies: [STRATEGIES.RETRY],
+ cooldownMs: 1000
+ };
+
+ const result = selectRecoveryStrategy(analysis);
+
+ expect(result.params.delayMs).toBeDefined();
+ expect(result.params.delayMs).toBeGreaterThanOrEqual(1000);
+ });
+
+ it('should increase delay for subsequent attempts', () => {
+ const analysis = {
+ suggestedStrategies: [STRATEGIES.RETRY],
+ cooldownMs: 1000
+ };
+
+ recordAttempt('test-task');
+ recordAttempt('test-task');
+
+ const result = selectRecoveryStrategy(analysis, { taskId: 'test-task' });
+
+ // Delay should be exponentially higher
+ expect(result.params.delayMs).toBeGreaterThan(1000);
+ });
+
+ it('should return MANUAL when max attempts exceeded', () => {
+ // Exhaust attempts
+ for (let i = 0; i < 3; i++) {
+ recordAttempt('exhausted-task');
+ }
+
+ const analysis = { suggestedStrategies: [STRATEGIES.RETRY] };
+ const result = selectRecoveryStrategy(analysis, { taskId: 'exhausted-task' });
+
+ expect(result.strategy).toBe(STRATEGIES.MANUAL);
+ expect(result.reason).toContain('Maximum recovery attempts');
+ });
+
+ it('should set params for escalate strategy', () => {
+ const analysis = { suggestedStrategies: [STRATEGIES.ESCALATE] };
+ const result = selectRecoveryStrategy(analysis);
+
+ expect(result.params.suggestHeavyModel).toBe(true);
+ });
+
+ it('should set params for decompose strategy', () => {
+ const analysis = { suggestedStrategies: [STRATEGIES.DECOMPOSE] };
+ const result = selectRecoveryStrategy(analysis);
+
+ expect(result.params.suggestSmallerContext).toBe(true);
+ expect(result.params.maxChunkSize).toBeDefined();
+ });
+ });
+
+ describe('executeRecovery', () => {
+ it('should execute retry strategy', async () => {
+ const result = await executeRecovery(
+ STRATEGIES.RETRY,
+ { id: 'task-1' },
+ { message: 'Error' },
+ { delayMs: 10 }
+ );
+
+ expect(result.success).toBe(true);
+ expect(result.action).toBe('retry_now');
+ });
+
+ it('should execute defer strategy', async () => {
+ const result = await executeRecovery(
+ STRATEGIES.DEFER,
+ { id: 'task-1' },
+ { message: 'Error' },
+ { delayMs: 1000 }
+ );
+
+ expect(result.success).toBe(true);
+ expect(result.action).toBe('reschedule');
+ expect(result.rescheduleAfterMs).toBe(1000);
+ });
+
+ it('should execute fallback strategy', async () => {
+ const result = await executeRecovery(
+ STRATEGIES.FALLBACK,
+ { id: 'task-1' },
+ { message: 'Error' }
+ );
+
+ expect(result.success).toBe(true);
+ expect(result.action).toBe('use_fallback');
+ expect(result.useFallback).toBe(true);
+ });
+
+ it('should execute escalate strategy', async () => {
+ const result = await executeRecovery(
+ STRATEGIES.ESCALATE,
+ { id: 'task-1' },
+ { message: 'Error' }
+ );
+
+ expect(result.success).toBe(true);
+ expect(result.action).toBe('escalate_model');
+ expect(result.useHeavyModel).toBe(true);
+ });
+
+ it('should execute decompose strategy', async () => {
+ const result = await executeRecovery(
+ STRATEGIES.DECOMPOSE,
+ { id: 'task-1' },
+ { message: 'Error' },
+ { maxChunkSize: 1000 }
+ );
+
+ expect(result.success).toBe(true);
+ expect(result.action).toBe('decompose_task');
+ expect(result.maxChunkSize).toBe(1000);
+ });
+
+ it('should execute investigate strategy', async () => {
+ const result = await executeRecovery(
+ STRATEGIES.INVESTIGATE,
+ { id: 'task-1' },
+ { message: 'Weird error' }
+ );
+
+ expect(result.success).toBe(true);
+ expect(result.action).toBe('create_investigation');
+ expect(result.createInvestigationTask).toBe(true);
+ });
+
+ it('should execute skip strategy', async () => {
+ const result = await executeRecovery(
+ STRATEGIES.SKIP,
+ { id: 'task-1' },
+ { message: 'Error' }
+ );
+
+ expect(result.success).toBe(true);
+ expect(result.action).toBe('skip_task');
+ expect(result.skipped).toBe(true);
+ });
+
+ it('should execute manual strategy as unsuccessful', async () => {
+ const result = await executeRecovery(
+ STRATEGIES.MANUAL,
+ { id: 'task-1' },
+ { message: 'Error' }
+ );
+
+ expect(result.success).toBe(false);
+ expect(result.action).toBe('require_manual');
+ expect(result.requiresManualIntervention).toBe(true);
+ });
+
+ it('should handle unknown strategy', async () => {
+ const result = await executeRecovery(
+ 'unknown',
+ { id: 'task-1' },
+ { message: 'Error' }
+ );
+
+ expect(result.success).toBe(false);
+ expect(result.action).toBe('unknown_strategy');
+ });
+ });
+
+ describe('recordAttempt', () => {
+ it('should increment attempt count', () => {
+ expect(getAttemptCount('test-key')).toBe(0);
+
+ recordAttempt('test-key');
+ expect(getAttemptCount('test-key')).toBe(1);
+
+ recordAttempt('test-key');
+ expect(getAttemptCount('test-key')).toBe(2);
+ });
+
+ it('should track different keys separately', () => {
+ recordAttempt('key-1');
+ recordAttempt('key-2');
+ recordAttempt('key-2');
+
+ expect(getAttemptCount('key-1')).toBe(1);
+ expect(getAttemptCount('key-2')).toBe(2);
+ });
+ });
+
+ describe('getStats', () => {
+ it('should return recovery statistics', async () => {
+ await executeRecovery(STRATEGIES.RETRY, { id: 'task-1' }, {}, { delayMs: 1 });
+ await executeRecovery(STRATEGIES.FALLBACK, { id: 'task-2' }, {});
+
+ const stats = getStats();
+
+ expect(stats.totalAttempts).toBeGreaterThan(0);
+ expect(stats.byStrategy[STRATEGIES.RETRY]).toBeDefined();
+ expect(stats.successRate).toBeDefined();
+ });
+ });
+
+ describe('getHistory', () => {
+ it('should return recovery history', async () => {
+ await executeRecovery(STRATEGIES.RETRY, { id: 'task-1' }, {}, { delayMs: 1 });
+
+ const history = getHistory();
+
+ expect(history.length).toBeGreaterThan(0);
+ expect(history[0].strategy).toBe(STRATEGIES.RETRY);
+ });
+
+ it('should filter by strategy', async () => {
+ await executeRecovery(STRATEGIES.RETRY, { id: 'task-1' }, {}, { delayMs: 1 });
+ await executeRecovery(STRATEGIES.FALLBACK, { id: 'task-2' }, {});
+
+ const retryOnly = getHistory({ strategy: STRATEGIES.RETRY });
+
+ expect(retryOnly.every(r => r.strategy === STRATEGIES.RETRY)).toBe(true);
+ });
+
+ it('should respect limit', async () => {
+ for (let i = 0; i < 5; i++) {
+ await executeRecovery(STRATEGIES.RETRY, { id: `task-${i}` }, {}, { delayMs: 1 });
+ }
+
+ const limited = getHistory({ limit: 2 });
+ expect(limited.length).toBe(2);
+ });
+ });
+
+ describe('resetAttempts', () => {
+ it('should reset attempt counter for specific key', () => {
+ recordAttempt('test-key');
+ recordAttempt('test-key');
+ expect(getAttemptCount('test-key')).toBe(2);
+
+ resetAttempts('test-key');
+ expect(getAttemptCount('test-key')).toBe(0);
+ });
+ });
+
+ describe('clearAllAttempts', () => {
+ it('should clear all attempt counters', () => {
+ recordAttempt('key-1');
+ recordAttempt('key-2');
+
+ clearAllAttempts();
+
+ expect(getAttemptCount('key-1')).toBe(0);
+ expect(getAttemptCount('key-2')).toBe(0);
+ });
+ });
+});
diff --git a/server/services/eventScheduler.js b/server/services/eventScheduler.js
new file mode 100644
index 0000000..54f34d4
--- /dev/null
+++ b/server/services/eventScheduler.js
@@ -0,0 +1,482 @@
+/**
+ * Event Scheduler Service
+ *
+ * Event-driven scheduling with cron expressions and timeout-safe timers.
+ * Replaces setInterval with more robust scheduling.
+ */
+
+import { cosEvents } from './cosEvents.js'
+
+// Maximum safe setTimeout value (2^31 - 1 ms, ~24.8 days)
+const MAX_TIMEOUT = 2147483647
+
+// Scheduled events storage
+const scheduledEvents = new Map()
+
+// Active timers
+const activeTimers = new Map()
+
+// Event history
+const eventHistory = []
+const MAX_HISTORY = 500
+
+/**
+ * Parse cron expression to next execution time
+ * Supports: minute hour dayOfMonth month dayOfWeek
+ *
+ * Special values:
+ * - '*' = any value
+ * - 'number' = specific value
+ * - 'start/step' = every step starting at start
+ *
+ * @param {string} cronExpr - Cron expression
+ * @param {Date} from - Starting point (default: now)
+ * @returns {Date} - Next execution time
+ */
+function parseCronToNextRun(cronExpr, from = new Date()) {
+ const parts = cronExpr.trim().split(/\s+/)
+ if (parts.length !== 5) {
+ throw new Error(`Invalid cron expression: ${cronExpr}`)
+ }
+
+ const [minuteExpr, hourExpr, dayOfMonthExpr, monthExpr, dayOfWeekExpr] = parts
+
+ // Simple implementation - find next matching time
+ const next = new Date(from)
+ next.setSeconds(0, 0)
+ next.setMinutes(next.getMinutes() + 1) // Start from next minute
+
+ // Maximum search: 2 years
+ const maxDate = new Date(from)
+ maxDate.setFullYear(maxDate.getFullYear() + 2)
+
+ while (next < maxDate) {
+ if (matchesCronField(next.getMonth() + 1, monthExpr) &&
+ matchesCronField(next.getDate(), dayOfMonthExpr) &&
+ matchesCronField(next.getDay(), dayOfWeekExpr) &&
+ matchesCronField(next.getHours(), hourExpr) &&
+ matchesCronField(next.getMinutes(), minuteExpr)) {
+ return next
+ }
+ next.setMinutes(next.getMinutes() + 1)
+ }
+
+ return null // No match found within 2 years
+}
+
+/**
+ * Check if a value matches a cron field expression
+ * @param {number} value - Current value
+ * @param {string} expr - Cron field expression
+ * @returns {boolean} - True if matches
+ */
+function matchesCronField(value, expr) {
+ if (expr === '*') return true
+
+ // Handle comma-separated values
+ if (expr.includes(',')) {
+ return expr.split(',').some(part => matchesCronField(value, part.trim()))
+ }
+
+ // Handle ranges (e.g., 1-5)
+ if (expr.includes('-')) {
+ const [start, end] = expr.split('-').map(Number)
+ return value >= start && value <= end
+ }
+
+ // Handle step values (e.g., */5 or 0/10)
+ if (expr.includes('/')) {
+ const [start, step] = expr.split('/')
+ const startNum = start === '*' ? 0 : Number(start)
+ const stepNum = Number(step)
+ return (value - startNum) % stepNum === 0 && value >= startNum
+ }
+
+ // Direct value match
+ return Number(expr) === value
+}
+
+/**
+ * Create a timeout-safe timer
+ * Handles values larger than MAX_TIMEOUT by chaining
+ *
+ * @param {Function} callback - Function to call
+ * @param {number} delayMs - Delay in milliseconds
+ * @param {string} eventId - Event identifier for tracking
+ * @returns {Object} - Timer handle
+ */
+function createSafeTimer(callback, delayMs, eventId) {
+ const clampedDelay = Math.min(delayMs, MAX_TIMEOUT)
+
+ if (delayMs <= MAX_TIMEOUT) {
+ // Simple case - use regular setTimeout
+ const timerId = setTimeout(() => {
+ activeTimers.delete(eventId)
+ callback()
+ }, clampedDelay)
+
+ return { timerId, type: 'simple' }
+ }
+
+ // Chain timeouts for longer delays
+ const remaining = delayMs - MAX_TIMEOUT
+ const timerId = setTimeout(() => {
+ // Schedule the next chunk
+ const nextTimer = createSafeTimer(callback, remaining, eventId)
+ activeTimers.set(eventId, nextTimer)
+ }, MAX_TIMEOUT)
+
+ return { timerId, type: 'chained', remaining }
+}
+
+/**
+ * Schedule an event
+ *
+ * @param {Object} config - Event configuration
+ * @param {string} config.id - Unique event identifier
+ * @param {string} config.type - Event type (cron, interval, once)
+ * @param {string} config.cron - Cron expression (for type: cron)
+ * @param {number} config.intervalMs - Interval in ms (for type: interval)
+ * @param {number} config.delayMs - Delay in ms (for type: once)
+ * @param {Function} config.handler - Event handler function
+ * @param {Object} config.metadata - Additional metadata
+ * @returns {Object} - Scheduled event
+ */
+function schedule(config) {
+ const { id, type, cron, intervalMs, delayMs, handler, metadata = {} } = config
+
+ if (!id || !type || !handler) {
+ throw new Error('Event requires id, type, and handler')
+ }
+
+ // Cancel existing event with same ID
+ if (scheduledEvents.has(id)) {
+ cancel(id)
+ }
+
+ const event = {
+ id,
+ type,
+ cron,
+ intervalMs,
+ delayMs,
+ handler,
+ metadata,
+ createdAt: Date.now(),
+ nextRunAt: null,
+ lastRunAt: null,
+ runCount: 0,
+ active: true
+ }
+
+ // Calculate next run time
+ switch (type) {
+ case 'cron':
+ if (!cron) throw new Error('Cron type requires cron expression')
+ event.nextRunAt = parseCronToNextRun(cron)?.getTime() || null
+ break
+
+ case 'interval':
+ if (!intervalMs) throw new Error('Interval type requires intervalMs')
+ event.nextRunAt = Date.now() + intervalMs
+ break
+
+ case 'once':
+ if (!delayMs) throw new Error('Once type requires delayMs')
+ event.nextRunAt = Date.now() + delayMs
+ break
+
+ default:
+ throw new Error(`Unknown event type: ${type}`)
+ }
+
+ scheduledEvents.set(id, event)
+ scheduleNextRun(event)
+
+ console.log(`๐
Event scheduled: ${id} (${type}) - next run: ${event.nextRunAt ? new Date(event.nextRunAt).toISOString() : 'never'}`)
+ cosEvents.emit('scheduler:scheduled', { id, type, nextRunAt: event.nextRunAt })
+
+ return event
+}
+
+/**
+ * Schedule the next run of an event
+ * @param {Object} event - Event object
+ */
+function scheduleNextRun(event) {
+ if (!event.active || !event.nextRunAt) return
+
+ const delay = event.nextRunAt - Date.now()
+ if (delay < 0) {
+ // Already past - run immediately for non-recurring, or calculate next for recurring
+ if (event.type === 'once') {
+ runEvent(event)
+ return
+ }
+ // Calculate next occurrence
+ updateNextRunTime(event)
+ scheduleNextRun(event)
+ return
+ }
+
+ const timer = createSafeTimer(() => runEvent(event), delay, event.id)
+ activeTimers.set(event.id, timer)
+}
+
+/**
+ * Run an event
+ * @param {Object} event - Event object
+ */
+async function runEvent(event) {
+ const startTime = Date.now()
+
+ event.lastRunAt = startTime
+ event.runCount++
+
+ let success = true
+ let error = null
+
+ try {
+ await event.handler(event)
+ } catch (err) {
+ success = false
+ error = err.message
+ console.error(`โ ๏ธ Event ${event.id} failed: ${err.message}`)
+ }
+
+ // Record in history
+ eventHistory.unshift({
+ eventId: event.id,
+ type: event.type,
+ runAt: startTime,
+ duration: Date.now() - startTime,
+ success,
+ error
+ })
+
+ while (eventHistory.length > MAX_HISTORY) {
+ eventHistory.pop()
+ }
+
+ // Emit completion
+ cosEvents.emit('scheduler:ran', {
+ id: event.id,
+ success,
+ runCount: event.runCount
+ })
+
+ // Schedule next run for recurring events
+ if (event.active && event.type !== 'once') {
+ updateNextRunTime(event)
+ scheduleNextRun(event)
+ } else if (event.type === 'once') {
+ event.active = false
+ activeTimers.delete(event.id)
+ }
+}
+
+/**
+ * Update the next run time for a recurring event
+ * @param {Object} event - Event object
+ */
+function updateNextRunTime(event) {
+ switch (event.type) {
+ case 'cron':
+ const nextDate = parseCronToNextRun(event.cron, new Date())
+ event.nextRunAt = nextDate?.getTime() || null
+ break
+
+ case 'interval':
+ event.nextRunAt = Date.now() + event.intervalMs
+ break
+
+ case 'once':
+ event.nextRunAt = null
+ break
+ }
+}
+
+/**
+ * Cancel a scheduled event
+ * @param {string} id - Event identifier
+ * @returns {boolean} - True if event was found and cancelled
+ */
+function cancel(id) {
+ const event = scheduledEvents.get(id)
+ if (!event) return false
+
+ event.active = false
+
+ const timer = activeTimers.get(id)
+ if (timer) {
+ clearTimeout(timer.timerId)
+ activeTimers.delete(id)
+ }
+
+ scheduledEvents.delete(id)
+ console.log(`๐
Event cancelled: ${id}`)
+ cosEvents.emit('scheduler:cancelled', { id })
+
+ return true
+}
+
+/**
+ * Pause a scheduled event
+ * @param {string} id - Event identifier
+ * @returns {boolean} - True if event was found and paused
+ */
+function pause(id) {
+ const event = scheduledEvents.get(id)
+ if (!event) return false
+
+ event.active = false
+
+ const timer = activeTimers.get(id)
+ if (timer) {
+ clearTimeout(timer.timerId)
+ activeTimers.delete(id)
+ }
+
+ console.log(`โธ๏ธ Event paused: ${id}`)
+ return true
+}
+
+/**
+ * Resume a paused event
+ * @param {string} id - Event identifier
+ * @returns {boolean} - True if event was found and resumed
+ */
+function resume(id) {
+ const event = scheduledEvents.get(id)
+ if (!event) return false
+
+ event.active = true
+ updateNextRunTime(event)
+ scheduleNextRun(event)
+
+ console.log(`โถ๏ธ Event resumed: ${id}`)
+ return true
+}
+
+/**
+ * Get all scheduled events
+ * @returns {Array} - All scheduled events
+ */
+function getScheduledEvents() {
+ return Array.from(scheduledEvents.values()).map(e => ({
+ id: e.id,
+ type: e.type,
+ active: e.active,
+ nextRunAt: e.nextRunAt,
+ lastRunAt: e.lastRunAt,
+ runCount: e.runCount,
+ metadata: e.metadata
+ }))
+}
+
+/**
+ * Get event by ID
+ * @param {string} id - Event identifier
+ * @returns {Object|null} - Event or null
+ */
+function getEvent(id) {
+ const event = scheduledEvents.get(id)
+ if (!event) return null
+
+ return {
+ id: event.id,
+ type: event.type,
+ active: event.active,
+ cron: event.cron,
+ intervalMs: event.intervalMs,
+ nextRunAt: event.nextRunAt,
+ lastRunAt: event.lastRunAt,
+ runCount: event.runCount,
+ metadata: event.metadata
+ }
+}
+
+/**
+ * Get event history
+ * @param {Object} options - Filter options
+ * @returns {Array} - Event history
+ */
+function getHistory(options = {}) {
+ let history = [...eventHistory]
+
+ if (options.eventId) {
+ history = history.filter(h => h.eventId === options.eventId)
+ }
+
+ if (options.success !== undefined) {
+ history = history.filter(h => h.success === options.success)
+ }
+
+ const limit = options.limit || 50
+ return history.slice(0, limit)
+}
+
+/**
+ * Get scheduler statistics
+ * @returns {Object} - Scheduler stats
+ */
+function getStats() {
+ const events = Array.from(scheduledEvents.values())
+ const recent = eventHistory.slice(0, 100)
+
+ return {
+ totalEvents: events.length,
+ activeEvents: events.filter(e => e.active).length,
+ activeTimers: activeTimers.size,
+ byType: events.reduce((acc, e) => {
+ acc[e.type] = (acc[e.type] || 0) + 1
+ return acc
+ }, {}),
+ totalRuns: eventHistory.length,
+ recentSuccessRate: recent.length > 0
+ ? ((recent.filter(h => h.success).length / recent.length) * 100).toFixed(1) + '%'
+ : '100%'
+ }
+}
+
+/**
+ * Cancel all scheduled events
+ * @returns {number} - Number of events cancelled
+ */
+function cancelAll() {
+ const count = scheduledEvents.size
+
+ for (const id of scheduledEvents.keys()) {
+ cancel(id)
+ }
+
+ return count
+}
+
+/**
+ * Trigger an event immediately (for testing or manual runs)
+ * @param {string} id - Event identifier
+ * @returns {Promise} - True if event was found and triggered
+ */
+async function triggerNow(id) {
+ const event = scheduledEvents.get(id)
+ if (!event) return false
+
+ await runEvent(event)
+ return true
+}
+
+export {
+ schedule,
+ cancel,
+ pause,
+ resume,
+ getScheduledEvents,
+ getEvent,
+ getHistory,
+ getStats,
+ cancelAll,
+ triggerNow,
+ parseCronToNextRun,
+ MAX_TIMEOUT
+}
diff --git a/server/services/executionLanes.js b/server/services/executionLanes.js
new file mode 100644
index 0000000..aab9525
--- /dev/null
+++ b/server/services/executionLanes.js
@@ -0,0 +1,440 @@
+/**
+ * Execution Lanes Service
+ *
+ * Lane-based concurrency control for agent execution.
+ * Lanes: critical (1), standard (2), background (3)
+ */
+
+import { cosEvents } from './cosEvents.js'
+
+// Lane configuration
+const LANES = {
+ critical: {
+ name: 'critical',
+ maxConcurrent: 1,
+ priority: 1,
+ description: 'High-priority user tasks, blocking operations'
+ },
+ standard: {
+ name: 'standard',
+ maxConcurrent: 2,
+ priority: 2,
+ description: 'Normal task execution'
+ },
+ background: {
+ name: 'background',
+ maxConcurrent: 3,
+ priority: 3,
+ description: 'Self-improvement, idle work, non-urgent tasks'
+ }
+}
+
+// Lane occupancy tracking
+const laneOccupancy = {
+ critical: new Map(), // agentId -> { taskId, startedAt, metadata }
+ standard: new Map(),
+ background: new Map()
+}
+
+// Queue for tasks waiting for lane availability
+const waitingQueue = {
+ critical: [],
+ standard: [],
+ background: []
+}
+
+// Statistics
+const stats = {
+ acquired: 0,
+ released: 0,
+ queued: 0,
+ timeouts: 0,
+ promotions: 0
+}
+
+/**
+ * Get lane by name or determine from task priority
+ * @param {string|Object} laneOrTask - Lane name or task object
+ * @returns {string} - Lane name
+ */
+function determineLane(laneOrTask) {
+ if (typeof laneOrTask === 'string') {
+ return LANES[laneOrTask] ? laneOrTask : 'standard'
+ }
+
+ const task = laneOrTask
+ const priority = task?.priority?.toUpperCase()
+
+ switch (priority) {
+ case 'URGENT':
+ case 'CRITICAL':
+ return 'critical'
+ case 'HIGH':
+ case 'MEDIUM':
+ return 'standard'
+ case 'LOW':
+ case 'IDLE':
+ return 'background'
+ default:
+ return task?.metadata?.isUserTask ? 'standard' : 'background'
+ }
+}
+
+/**
+ * Check if a lane has available capacity
+ * @param {string} laneName - Lane name
+ * @returns {boolean} - True if capacity available
+ */
+function hasCapacity(laneName) {
+ const lane = LANES[laneName]
+ if (!lane) return false
+
+ const occupancy = laneOccupancy[laneName]
+ return occupancy.size < lane.maxConcurrent
+}
+
+/**
+ * Get current lane status
+ * @param {string} laneName - Lane name
+ * @returns {Object} - Lane status
+ */
+function getLaneStatus(laneName) {
+ const lane = LANES[laneName]
+ if (!lane) return null
+
+ const occupancy = laneOccupancy[laneName]
+ const queue = waitingQueue[laneName]
+
+ return {
+ name: lane.name,
+ maxConcurrent: lane.maxConcurrent,
+ currentOccupancy: occupancy.size,
+ available: lane.maxConcurrent - occupancy.size,
+ queueLength: queue.length,
+ occupants: Array.from(occupancy.entries()).map(([agentId, data]) => ({
+ agentId,
+ taskId: data.taskId,
+ startedAt: data.startedAt,
+ runningMs: Date.now() - data.startedAt
+ }))
+ }
+}
+
+/**
+ * Acquire a slot in a lane
+ * @param {string} laneName - Lane name
+ * @param {string} agentId - Agent identifier
+ * @param {Object} metadata - Additional metadata (taskId, etc.)
+ * @returns {Object} - Acquisition result
+ */
+function acquire(laneName, agentId, metadata = {}) {
+ const lane = LANES[laneName]
+ if (!lane) {
+ return { success: false, error: `Unknown lane: ${laneName}` }
+ }
+
+ const occupancy = laneOccupancy[laneName]
+
+ // Check if already in this lane
+ if (occupancy.has(agentId)) {
+ return { success: true, alreadyAcquired: true, lane: laneName }
+ }
+
+ // Check capacity
+ if (occupancy.size >= lane.maxConcurrent) {
+ return {
+ success: false,
+ error: 'Lane at capacity',
+ lane: laneName,
+ currentOccupancy: occupancy.size,
+ maxConcurrent: lane.maxConcurrent
+ }
+ }
+
+ // Acquire slot
+ occupancy.set(agentId, {
+ taskId: metadata.taskId,
+ startedAt: Date.now(),
+ metadata
+ })
+
+ stats.acquired++
+
+ cosEvents.emit('lane:acquired', {
+ lane: laneName,
+ agentId,
+ taskId: metadata.taskId,
+ occupancy: occupancy.size
+ })
+
+ console.log(`๐ค๏ธ Lane acquired: ${agentId} โ ${laneName} (${occupancy.size}/${lane.maxConcurrent})`)
+
+ return {
+ success: true,
+ lane: laneName,
+ position: occupancy.size
+ }
+}
+
+/**
+ * Release a lane slot
+ * @param {string} agentId - Agent identifier
+ * @returns {Object} - Release result
+ */
+function release(agentId) {
+ for (const [laneName, occupancy] of Object.entries(laneOccupancy)) {
+ if (occupancy.has(agentId)) {
+ const data = occupancy.get(agentId)
+ occupancy.delete(agentId)
+
+ stats.released++
+
+ const runningMs = Date.now() - data.startedAt
+
+ cosEvents.emit('lane:released', {
+ lane: laneName,
+ agentId,
+ taskId: data.taskId,
+ runningMs,
+ occupancy: occupancy.size
+ })
+
+ console.log(`๐ค๏ธ Lane released: ${agentId} โ ${laneName} (ran ${runningMs}ms)`)
+
+ // Process waiting queue for this lane
+ processWaitingQueue(laneName)
+
+ return {
+ success: true,
+ lane: laneName,
+ runningMs
+ }
+ }
+ }
+
+ return { success: false, error: 'Agent not in any lane' }
+}
+
+/**
+ * Wait for lane availability (with timeout)
+ * @param {string} laneName - Lane name
+ * @param {string} agentId - Agent identifier
+ * @param {Object} options - Wait options
+ * @returns {Promise} - Acquisition result
+ */
+async function waitForLane(laneName, agentId, options = {}) {
+ const { timeoutMs = 60000, metadata = {} } = options
+
+ // Try immediate acquisition
+ const immediate = acquire(laneName, agentId, metadata)
+ if (immediate.success) return immediate
+
+ // Add to waiting queue
+ return new Promise((resolve) => {
+ const queueEntry = {
+ agentId,
+ metadata,
+ resolve,
+ enqueuedAt: Date.now()
+ }
+
+ waitingQueue[laneName].push(queueEntry)
+ stats.queued++
+
+ console.log(`โณ Queued for lane: ${agentId} โ ${laneName} (position ${waitingQueue[laneName].length})`)
+
+ // Set timeout
+ const timeoutId = setTimeout(() => {
+ const idx = waitingQueue[laneName].indexOf(queueEntry)
+ if (idx !== -1) {
+ waitingQueue[laneName].splice(idx, 1)
+ stats.timeouts++
+ resolve({
+ success: false,
+ error: 'Lane wait timeout',
+ lane: laneName,
+ waitedMs: timeoutMs
+ })
+ }
+ }, timeoutMs)
+
+ queueEntry.timeoutId = timeoutId
+ })
+}
+
+/**
+ * Process waiting queue when a lane slot becomes available
+ * @param {string} laneName - Lane name
+ */
+function processWaitingQueue(laneName) {
+ const queue = waitingQueue[laneName]
+ if (queue.length === 0) return
+
+ const lane = LANES[laneName]
+ const occupancy = laneOccupancy[laneName]
+
+ while (queue.length > 0 && occupancy.size < lane.maxConcurrent) {
+ const entry = queue.shift()
+ clearTimeout(entry.timeoutId)
+
+ const result = acquire(laneName, entry.agentId, entry.metadata)
+ if (result.success) {
+ result.waitedMs = Date.now() - entry.enqueuedAt
+ entry.resolve(result)
+ } else {
+ // Failed to acquire despite capacity - shouldn't happen
+ entry.resolve(result)
+ }
+ }
+}
+
+/**
+ * Promote a task to a higher priority lane
+ * @param {string} agentId - Agent identifier
+ * @param {string} targetLane - Target lane (must be higher priority)
+ * @returns {Object} - Promotion result
+ */
+function promote(agentId, targetLane) {
+ const targetLaneConfig = LANES[targetLane]
+ if (!targetLaneConfig) {
+ return { success: false, error: `Unknown lane: ${targetLane}` }
+ }
+
+ // Find current lane
+ let currentLane = null
+ for (const [laneName, occupancy] of Object.entries(laneOccupancy)) {
+ if (occupancy.has(agentId)) {
+ currentLane = laneName
+ break
+ }
+ }
+
+ if (!currentLane) {
+ return { success: false, error: 'Agent not in any lane' }
+ }
+
+ // Check if promotion makes sense
+ if (LANES[currentLane].priority <= targetLaneConfig.priority) {
+ return { success: false, error: 'Target lane is not higher priority' }
+ }
+
+ // Check target capacity
+ if (!hasCapacity(targetLane)) {
+ return { success: false, error: 'Target lane at capacity' }
+ }
+
+ // Move agent
+ const data = laneOccupancy[currentLane].get(agentId)
+ laneOccupancy[currentLane].delete(agentId)
+ laneOccupancy[targetLane].set(agentId, data)
+
+ stats.promotions++
+
+ console.log(`โฌ๏ธ Lane promotion: ${agentId} ${currentLane} โ ${targetLane}`)
+
+ // Process queue for old lane
+ processWaitingQueue(currentLane)
+
+ return {
+ success: true,
+ fromLane: currentLane,
+ toLane: targetLane
+ }
+}
+
+/**
+ * Get overall lane statistics
+ * @returns {Object} - Lane statistics
+ */
+function getStats() {
+ const laneStats = {}
+
+ for (const laneName of Object.keys(LANES)) {
+ laneStats[laneName] = getLaneStatus(laneName)
+ }
+
+ const totalOccupancy = Object.values(laneOccupancy)
+ .reduce((sum, map) => sum + map.size, 0)
+ const totalCapacity = Object.values(LANES)
+ .reduce((sum, lane) => sum + lane.maxConcurrent, 0)
+ const totalQueued = Object.values(waitingQueue)
+ .reduce((sum, queue) => sum + queue.length, 0)
+
+ return {
+ lanes: laneStats,
+ totalOccupancy,
+ totalCapacity,
+ utilizationPercent: ((totalOccupancy / totalCapacity) * 100).toFixed(1) + '%',
+ totalQueued,
+ ...stats
+ }
+}
+
+/**
+ * Get agent's current lane
+ * @param {string} agentId - Agent identifier
+ * @returns {string|null} - Lane name or null
+ */
+function getAgentLane(agentId) {
+ for (const [laneName, occupancy] of Object.entries(laneOccupancy)) {
+ if (occupancy.has(agentId)) {
+ return laneName
+ }
+ }
+ return null
+}
+
+/**
+ * Force release all agents from a lane (for emergencies)
+ * @param {string} laneName - Lane name
+ * @returns {number} - Number of agents released
+ */
+function clearLane(laneName) {
+ const occupancy = laneOccupancy[laneName]
+ if (!occupancy) return 0
+
+ const count = occupancy.size
+ const agents = Array.from(occupancy.keys())
+
+ for (const agentId of agents) {
+ release(agentId)
+ }
+
+ console.log(`๐งน Cleared lane ${laneName}: ${count} agents`)
+ return count
+}
+
+/**
+ * Update lane configuration dynamically
+ * @param {string} laneName - Lane name
+ * @param {Object} config - New configuration
+ * @returns {Object} - Updated lane config
+ */
+function updateLaneConfig(laneName, config) {
+ const lane = LANES[laneName]
+ if (!lane) return null
+
+ if (config.maxConcurrent !== undefined) {
+ lane.maxConcurrent = config.maxConcurrent
+
+ // Process queue if we increased capacity
+ processWaitingQueue(laneName)
+ }
+
+ return { ...lane }
+}
+
+export {
+ LANES,
+ determineLane,
+ hasCapacity,
+ getLaneStatus,
+ acquire,
+ release,
+ waitForLane,
+ promote,
+ getStats,
+ getAgentLane,
+ clearLane,
+ updateLaneConfig
+}
diff --git a/server/services/executionLanes.test.js b/server/services/executionLanes.test.js
new file mode 100644
index 0000000..8a1638d
--- /dev/null
+++ b/server/services/executionLanes.test.js
@@ -0,0 +1,333 @@
+import { describe, it, expect, beforeEach, vi } from 'vitest';
+import {
+ LANES,
+ determineLane,
+ hasCapacity,
+ getLaneStatus,
+ acquire,
+ release,
+ waitForLane,
+ promote,
+ getStats,
+ getAgentLane,
+ clearLane,
+ updateLaneConfig
+} from './executionLanes.js';
+
+// Mock the cosEvents
+vi.mock('./cos.js', () => ({
+ cosEvents: {
+ emit: vi.fn()
+ }
+}));
+
+describe('Execution Lanes Service', () => {
+ beforeEach(() => {
+ // Clear all lanes before each test
+ clearLane('critical');
+ clearLane('standard');
+ clearLane('background');
+ });
+
+ describe('LANES', () => {
+ it('should have all required lanes', () => {
+ expect(LANES.critical).toBeDefined();
+ expect(LANES.standard).toBeDefined();
+ expect(LANES.background).toBeDefined();
+ });
+
+ it('should have correct max concurrent values', () => {
+ expect(LANES.critical.maxConcurrent).toBe(1);
+ expect(LANES.standard.maxConcurrent).toBe(2);
+ expect(LANES.background.maxConcurrent).toBe(3);
+ });
+
+ it('should have priority ordering', () => {
+ expect(LANES.critical.priority).toBeLessThan(LANES.standard.priority);
+ expect(LANES.standard.priority).toBeLessThan(LANES.background.priority);
+ });
+ });
+
+ describe('determineLane', () => {
+ it('should return lane name when given string', () => {
+ expect(determineLane('critical')).toBe('critical');
+ expect(determineLane('standard')).toBe('standard');
+ expect(determineLane('background')).toBe('background');
+ });
+
+ it('should default to standard for invalid lane name', () => {
+ expect(determineLane('invalid')).toBe('standard');
+ });
+
+ it('should determine critical for URGENT/CRITICAL priority', () => {
+ expect(determineLane({ priority: 'URGENT' })).toBe('critical');
+ expect(determineLane({ priority: 'CRITICAL' })).toBe('critical');
+ });
+
+ it('should determine standard for HIGH/MEDIUM priority', () => {
+ expect(determineLane({ priority: 'HIGH' })).toBe('standard');
+ expect(determineLane({ priority: 'MEDIUM' })).toBe('standard');
+ });
+
+ it('should determine background for LOW/IDLE priority', () => {
+ expect(determineLane({ priority: 'LOW' })).toBe('background');
+ expect(determineLane({ priority: 'IDLE' })).toBe('background');
+ });
+
+ it('should use isUserTask for default priority', () => {
+ expect(determineLane({ metadata: { isUserTask: true } })).toBe('standard');
+ expect(determineLane({ metadata: { isUserTask: false } })).toBe('background');
+ expect(determineLane({})).toBe('background');
+ });
+ });
+
+ describe('hasCapacity', () => {
+ it('should return true for empty lanes', () => {
+ expect(hasCapacity('critical')).toBe(true);
+ expect(hasCapacity('standard')).toBe(true);
+ expect(hasCapacity('background')).toBe(true);
+ });
+
+ it('should return false for unknown lane', () => {
+ expect(hasCapacity('unknown')).toBe(false);
+ });
+
+ it('should return false when lane is at capacity', () => {
+ acquire('critical', 'agent-1');
+ expect(hasCapacity('critical')).toBe(false);
+ });
+
+ it('should return true when lane has capacity', () => {
+ acquire('standard', 'agent-1');
+ expect(hasCapacity('standard')).toBe(true); // Can hold 2
+ });
+ });
+
+ describe('getLaneStatus', () => {
+ it('should return lane status', () => {
+ const status = getLaneStatus('standard');
+
+ expect(status.name).toBe('standard');
+ expect(status.maxConcurrent).toBe(2);
+ expect(status.currentOccupancy).toBe(0);
+ expect(status.available).toBe(2);
+ });
+
+ it('should return null for unknown lane', () => {
+ expect(getLaneStatus('unknown')).toBeNull();
+ });
+
+ it('should include occupant details', () => {
+ acquire('standard', 'agent-1', { taskId: 'task-1' });
+ const status = getLaneStatus('standard');
+
+ expect(status.occupants.length).toBe(1);
+ expect(status.occupants[0].agentId).toBe('agent-1');
+ expect(status.occupants[0].taskId).toBe('task-1');
+ });
+ });
+
+ describe('acquire', () => {
+ it('should acquire a lane slot', () => {
+ const result = acquire('standard', 'agent-1', { taskId: 'task-1' });
+
+ expect(result.success).toBe(true);
+ expect(result.lane).toBe('standard');
+ });
+
+ it('should fail for unknown lane', () => {
+ const result = acquire('unknown', 'agent-1');
+
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('Unknown lane');
+ });
+
+ it('should fail when lane is at capacity', () => {
+ acquire('critical', 'agent-1');
+ const result = acquire('critical', 'agent-2');
+
+ expect(result.success).toBe(false);
+ expect(result.error).toBe('Lane at capacity');
+ });
+
+ it('should return success if already acquired', () => {
+ acquire('standard', 'agent-1');
+ const result = acquire('standard', 'agent-1');
+
+ expect(result.success).toBe(true);
+ expect(result.alreadyAcquired).toBe(true);
+ });
+ });
+
+ describe('release', () => {
+ it('should release a lane slot', () => {
+ acquire('standard', 'agent-1');
+ const result = release('agent-1');
+
+ expect(result.success).toBe(true);
+ expect(result.lane).toBe('standard');
+ expect(result.runningMs).toBeGreaterThanOrEqual(0);
+ });
+
+ it('should fail for agent not in any lane', () => {
+ const result = release('nonexistent-agent');
+
+ expect(result.success).toBe(false);
+ expect(result.error).toBe('Agent not in any lane');
+ });
+
+ it('should free up capacity', () => {
+ acquire('critical', 'agent-1');
+ expect(hasCapacity('critical')).toBe(false);
+
+ release('agent-1');
+ expect(hasCapacity('critical')).toBe(true);
+ });
+ });
+
+ describe('waitForLane', () => {
+ it('should acquire immediately if capacity available', async () => {
+ const result = await waitForLane('standard', 'agent-1', {
+ metadata: { taskId: 'task-1' }
+ });
+
+ expect(result.success).toBe(true);
+ });
+
+ it('should timeout when lane stays at capacity', async () => {
+ acquire('critical', 'agent-1');
+
+ const result = await waitForLane('critical', 'agent-2', {
+ timeoutMs: 100
+ });
+
+ expect(result.success).toBe(false);
+ expect(result.error).toBe('Lane wait timeout');
+ });
+
+ it('should acquire when slot becomes available', async () => {
+ acquire('critical', 'agent-1');
+
+ // Start waiting
+ const waitPromise = waitForLane('critical', 'agent-2', {
+ timeoutMs: 1000
+ });
+
+ // Release after short delay
+ setTimeout(() => release('agent-1'), 50);
+
+ const result = await waitPromise;
+ expect(result.success).toBe(true);
+ expect(result.waitedMs).toBeGreaterThan(0);
+ });
+ });
+
+ describe('promote', () => {
+ it('should promote agent to higher priority lane', () => {
+ acquire('background', 'agent-1', { taskId: 'task-1' });
+ const result = promote('agent-1', 'standard');
+
+ expect(result.success).toBe(true);
+ expect(result.fromLane).toBe('background');
+ expect(result.toLane).toBe('standard');
+ });
+
+ it('should fail for unknown target lane', () => {
+ acquire('standard', 'agent-1');
+ const result = promote('agent-1', 'unknown');
+
+ expect(result.success).toBe(false);
+ });
+
+ it('should fail if agent not in any lane', () => {
+ const result = promote('nonexistent', 'critical');
+
+ expect(result.success).toBe(false);
+ expect(result.error).toBe('Agent not in any lane');
+ });
+
+ it('should fail if target is lower priority', () => {
+ acquire('critical', 'agent-1');
+ const result = promote('agent-1', 'background');
+
+ expect(result.success).toBe(false);
+ expect(result.error).toBe('Target lane is not higher priority');
+ });
+
+ it('should fail if target lane at capacity', () => {
+ acquire('critical', 'agent-1');
+ acquire('standard', 'agent-2');
+ const result = promote('agent-2', 'critical');
+
+ expect(result.success).toBe(false);
+ expect(result.error).toBe('Target lane at capacity');
+ });
+ });
+
+ describe('getStats', () => {
+ it('should return overall statistics', () => {
+ acquire('standard', 'agent-1');
+ acquire('background', 'agent-2');
+
+ const stats = getStats();
+
+ expect(stats.totalOccupancy).toBe(2);
+ expect(stats.totalCapacity).toBe(6); // 1 + 2 + 3
+ expect(stats.lanes.standard.currentOccupancy).toBe(1);
+ expect(stats.lanes.background.currentOccupancy).toBe(1);
+ });
+
+ it('should track acquired/released counts', () => {
+ acquire('standard', 'agent-1');
+ release('agent-1');
+
+ const stats = getStats();
+ expect(stats.acquired).toBeGreaterThan(0);
+ expect(stats.released).toBeGreaterThan(0);
+ });
+ });
+
+ describe('getAgentLane', () => {
+ it('should return lane for agent', () => {
+ acquire('standard', 'agent-1');
+ expect(getAgentLane('agent-1')).toBe('standard');
+ });
+
+ it('should return null for unknown agent', () => {
+ expect(getAgentLane('nonexistent')).toBeNull();
+ });
+ });
+
+ describe('clearLane', () => {
+ it('should clear all agents from lane', () => {
+ acquire('standard', 'agent-1');
+ acquire('standard', 'agent-2');
+
+ const count = clearLane('standard');
+
+ expect(count).toBe(2);
+ expect(hasCapacity('standard')).toBe(true);
+ expect(getLaneStatus('standard').currentOccupancy).toBe(0);
+ });
+
+ it('should return 0 for unknown lane', () => {
+ expect(clearLane('unknown')).toBe(0);
+ });
+ });
+
+ describe('updateLaneConfig', () => {
+ it('should update lane max concurrent', () => {
+ const originalMax = LANES.standard.maxConcurrent;
+ updateLaneConfig('standard', { maxConcurrent: 5 });
+
+ expect(LANES.standard.maxConcurrent).toBe(5);
+
+ // Restore
+ updateLaneConfig('standard', { maxConcurrent: originalMax });
+ });
+
+ it('should return null for unknown lane', () => {
+ expect(updateLaneConfig('unknown', {})).toBeNull();
+ });
+ });
+});
diff --git a/server/services/history.js b/server/services/history.js
index b3547a1..323dd11 100644
--- a/server/services/history.js
+++ b/server/services/history.js
@@ -1,12 +1,9 @@
-import { readFile, writeFile, mkdir } from 'fs/promises';
-import { existsSync } from 'fs';
-import { join, dirname } from 'path';
-import { fileURLToPath } from 'url';
+import { writeFile } from 'fs/promises';
+import { join } from 'path';
import { v4 as uuidv4 } from 'uuid';
+import { ensureDir, PATHS, readJSONFile } from '../lib/fileUtils.js';
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = dirname(__filename);
-const DATA_DIR = join(__dirname, '../../data');
+const DATA_DIR = PATHS.data;
const HISTORY_FILE = join(DATA_DIR, 'history.json');
const MAX_ENTRIES = 500;
@@ -16,9 +13,7 @@ let cacheTimestamp = 0;
const CACHE_TTL_MS = 2000; // 2 second cache TTL
async function ensureDataDir() {
- if (!existsSync(DATA_DIR)) {
- await mkdir(DATA_DIR, { recursive: true });
- }
+ await ensureDir(DATA_DIR);
}
async function loadHistory() {
@@ -30,14 +25,7 @@ async function loadHistory() {
await ensureDataDir();
- if (!existsSync(HISTORY_FILE)) {
- historyCache = { entries: [] };
- cacheTimestamp = now;
- return historyCache;
- }
-
- const content = await readFile(HISTORY_FILE, 'utf-8');
- historyCache = JSON.parse(content);
+ historyCache = await readJSONFile(HISTORY_FILE, { entries: [] });
cacheTimestamp = now;
return historyCache;
}
diff --git a/server/services/lmStudioManager.js b/server/services/lmStudioManager.js
new file mode 100644
index 0000000..6e25f50
--- /dev/null
+++ b/server/services/lmStudioManager.js
@@ -0,0 +1,406 @@
+/**
+ * LM Studio Manager Service
+ *
+ * Manages local LM Studio models for free local thinking.
+ * Provides model discovery, loading, unloading, and downloading.
+ */
+
+import { cosEvents } from './cosEvents.js'
+
+// Default LM Studio configuration
+const DEFAULT_CONFIG = {
+ baseUrl: 'http://localhost:1234',
+ timeout: 30000,
+ defaultThinkingModel: 'gpt-oss-20b'
+}
+
+// Cached state
+let config = { ...DEFAULT_CONFIG }
+let isAvailable = null
+let loadedModels = []
+let availableModels = []
+let lastCheckAt = null
+
+// Status tracking
+const status = {
+ lastError: null,
+ lastSuccessAt: null,
+ consecutiveErrors: 0
+}
+
+/**
+ * Make a request to LM Studio API
+ * @param {string} endpoint - API endpoint
+ * @param {Object} options - Fetch options
+ * @returns {Promise<*>} - Response data
+ */
+async function lmStudioRequest(endpoint, options = {}) {
+ const url = `${config.baseUrl}${endpoint}`
+ const controller = new AbortController()
+ const timeoutId = setTimeout(() => controller.abort(), options.timeout || config.timeout)
+
+ const response = await fetch(url, {
+ ...options,
+ signal: controller.signal,
+ headers: {
+ 'Content-Type': 'application/json',
+ ...options.headers
+ }
+ }).finally(() => clearTimeout(timeoutId))
+
+ if (!response.ok) {
+ throw new Error(`LM Studio API error: ${response.status} ${response.statusText}`)
+ }
+
+ return response.json()
+}
+
+/**
+ * Check if LM Studio is available
+ * @returns {Promise} - True if available
+ */
+async function checkLMStudioAvailable() {
+ const now = Date.now()
+
+ // Use cached result if recent (within 30 seconds)
+ if (lastCheckAt && now - lastCheckAt < 30000 && isAvailable !== null) {
+ return isAvailable
+ }
+
+ try {
+ await lmStudioRequest('/v1/models', { timeout: 5000 })
+ isAvailable = true
+ status.lastSuccessAt = now
+ status.consecutiveErrors = 0
+ status.lastError = null
+ lastCheckAt = now
+ return true
+ } catch (err) {
+ isAvailable = false
+ status.lastError = err.message
+ status.consecutiveErrors++
+ lastCheckAt = now
+ return false
+ }
+}
+
+/**
+ * Get currently loaded models
+ * @param {boolean} forceRefresh - Force refresh from API
+ * @returns {Promise} - Loaded models
+ */
+async function getLoadedModels(forceRefresh = false) {
+ if (!forceRefresh && loadedModels.length > 0) {
+ return loadedModels
+ }
+
+ const available = await checkLMStudioAvailable()
+ if (!available) {
+ return []
+ }
+
+ try {
+ const response = await lmStudioRequest('/v1/models')
+ loadedModels = (response.data || []).map(model => ({
+ id: model.id,
+ object: model.object,
+ created: model.created,
+ ownedBy: model.owned_by
+ }))
+ return loadedModels
+ } catch (err) {
+ console.error(`โ ๏ธ Failed to get LM Studio models: ${err.message}`)
+ return []
+ }
+}
+
+/**
+ * Get available models in LM Studio catalog
+ * Note: This requires LM Studio's /lmstudio.* endpoints if available
+ * @returns {Promise} - Available models
+ */
+async function getAvailableModels() {
+ const available = await checkLMStudioAvailable()
+ if (!available) {
+ return []
+ }
+
+ // LM Studio doesn't have a public catalog API by default
+ // Return loaded models as available for now
+ return getLoadedModels(true)
+}
+
+/**
+ * Download a model from LM Studio catalog
+ * Note: Requires LM Studio API support for downloads
+ *
+ * @param {string} modelId - Model identifier to download
+ * @returns {Promise} - Download result
+ */
+async function downloadModel(modelId) {
+ const available = await checkLMStudioAvailable()
+ if (!available) {
+ return {
+ success: false,
+ error: 'LM Studio not available'
+ }
+ }
+
+ // LM Studio doesn't have a public download API
+ // This would need to be implemented via LM Studio's developer API
+ console.log(`๐ฅ Model download requested: ${modelId}`)
+ cosEvents.emit('lmstudio:downloadRequested', { modelId })
+
+ return {
+ success: false,
+ error: 'Model downloading not yet supported via API',
+ modelId,
+ instruction: 'Please download the model manually via LM Studio UI'
+ }
+}
+
+/**
+ * Load a model into LM Studio memory
+ * @param {string} modelId - Model identifier to load
+ * @returns {Promise} - Load result
+ */
+async function loadModel(modelId) {
+ const available = await checkLMStudioAvailable()
+ if (!available) {
+ return { success: false, error: 'LM Studio not available' }
+ }
+
+ try {
+ // Try to make a test completion to trigger model loading
+ await lmStudioRequest('/v1/chat/completions', {
+ method: 'POST',
+ body: JSON.stringify({
+ model: modelId,
+ messages: [{ role: 'user', content: 'test' }],
+ max_tokens: 1
+ }),
+ timeout: 60000 // Loading can take a while
+ })
+
+ // Refresh loaded models
+ await getLoadedModels(true)
+
+ console.log(`๐ฆ Model loaded: ${modelId}`)
+ cosEvents.emit('lmstudio:modelLoaded', { modelId })
+
+ return { success: true, modelId }
+ } catch (err) {
+ console.error(`โ ๏ธ Failed to load model ${modelId}: ${err.message}`)
+ return { success: false, error: err.message, modelId }
+ }
+}
+
+/**
+ * Unload a model from LM Studio memory
+ * Note: Requires LM Studio API support for unloading
+ *
+ * @param {string} modelId - Model identifier to unload
+ * @returns {Promise} - Unload result
+ */
+async function unloadModel(modelId) {
+ const available = await checkLMStudioAvailable()
+ if (!available) {
+ return { success: false, error: 'LM Studio not available' }
+ }
+
+ // LM Studio doesn't have a public unload API
+ console.log(`๐ค Model unload requested: ${modelId}`)
+ cosEvents.emit('lmstudio:unloadRequested', { modelId })
+
+ return {
+ success: false,
+ error: 'Model unloading not yet supported via API',
+ modelId,
+ instruction: 'Please unload the model manually via LM Studio UI'
+ }
+}
+
+/**
+ * Get the recommended thinking model
+ * @returns {Promise} - Model ID or null if none available
+ */
+async function getRecommendedThinkingModel() {
+ const models = await getLoadedModels()
+
+ if (models.length === 0) {
+ return null
+ }
+
+ // Prefer specific thinking-optimized models
+ const preferredModels = [
+ 'gpt-oss-20b',
+ 'deepseek-r1',
+ 'qwen2.5-coder',
+ 'codellama',
+ 'mistral',
+ 'llama'
+ ]
+
+ for (const preferred of preferredModels) {
+ const match = models.find(m =>
+ m.id.toLowerCase().includes(preferred.toLowerCase())
+ )
+ if (match) return match.id
+ }
+
+ // Return first available model
+ return models[0]?.id || null
+}
+
+/**
+ * Make a quick completion request for local thinking
+ * @param {string} prompt - Prompt text
+ * @param {Object} options - Completion options
+ * @returns {Promise} - Completion result
+ */
+async function quickCompletion(prompt, options = {}) {
+ const available = await checkLMStudioAvailable()
+ if (!available) {
+ return { success: false, error: 'LM Studio not available' }
+ }
+
+ const model = options.model || await getRecommendedThinkingModel()
+ if (!model) {
+ return { success: false, error: 'No model available' }
+ }
+
+ try {
+ const response = await lmStudioRequest('/v1/chat/completions', {
+ method: 'POST',
+ body: JSON.stringify({
+ model,
+ messages: [
+ ...(options.systemPrompt ? [{ role: 'system', content: options.systemPrompt }] : []),
+ { role: 'user', content: prompt }
+ ],
+ max_tokens: options.maxTokens || 512,
+ temperature: options.temperature ?? 0.7,
+ stream: false
+ }),
+ timeout: options.timeout || 30000
+ })
+
+ const content = response.choices?.[0]?.message?.content || ''
+
+ return {
+ success: true,
+ content,
+ model,
+ usage: response.usage
+ }
+ } catch (err) {
+ return { success: false, error: err.message, model }
+ }
+}
+
+/**
+ * Get embeddings from local model
+ * @param {string} text - Text to embed
+ * @param {Object} options - Embedding options
+ * @returns {Promise} - Embedding result
+ */
+async function getEmbeddings(text, options = {}) {
+ const available = await checkLMStudioAvailable()
+ if (!available) {
+ return { success: false, error: 'LM Studio not available' }
+ }
+
+ const model = options.model || 'text-embedding-nomic-embed-text-v2-moe'
+
+ try {
+ const response = await lmStudioRequest('/v1/embeddings', {
+ method: 'POST',
+ body: JSON.stringify({
+ model,
+ input: text
+ }),
+ timeout: options.timeout || 10000
+ })
+
+ const embedding = response.data?.[0]?.embedding || []
+
+ return {
+ success: true,
+ embedding,
+ model,
+ dimensions: embedding.length
+ }
+ } catch (err) {
+ return { success: false, error: err.message, model }
+ }
+}
+
+/**
+ * Get LM Studio status
+ * @returns {Promise} - Status information
+ */
+async function getStatus() {
+ const available = await checkLMStudioAvailable()
+ const models = available ? await getLoadedModels() : []
+
+ return {
+ available,
+ baseUrl: config.baseUrl,
+ loadedModels: models.length,
+ models: models.map(m => m.id),
+ recommendedThinkingModel: available ? await getRecommendedThinkingModel() : null,
+ lastCheckAt: lastCheckAt ? new Date(lastCheckAt).toISOString() : null,
+ lastSuccessAt: status.lastSuccessAt ? new Date(status.lastSuccessAt).toISOString() : null,
+ lastError: status.lastError,
+ consecutiveErrors: status.consecutiveErrors
+ }
+}
+
+/**
+ * Update configuration
+ * @param {Object} newConfig - New configuration
+ * @returns {Object} - Updated configuration
+ */
+function updateConfig(newConfig) {
+ if (newConfig.baseUrl) {
+ config.baseUrl = newConfig.baseUrl
+ isAvailable = null // Force recheck
+ lastCheckAt = null
+ }
+
+ if (newConfig.timeout) {
+ config.timeout = newConfig.timeout
+ }
+
+ if (newConfig.defaultThinkingModel) {
+ config.defaultThinkingModel = newConfig.defaultThinkingModel
+ }
+
+ return { ...config }
+}
+
+/**
+ * Reset cached state
+ */
+function resetCache() {
+ isAvailable = null
+ loadedModels = []
+ availableModels = []
+ lastCheckAt = null
+}
+
+export {
+ checkLMStudioAvailable,
+ getLoadedModels,
+ getAvailableModels,
+ downloadModel,
+ loadModel,
+ unloadModel,
+ getRecommendedThinkingModel,
+ quickCompletion,
+ getEmbeddings,
+ getStatus,
+ updateConfig,
+ resetCache,
+ DEFAULT_CONFIG
+}
diff --git a/server/services/localThinking.js b/server/services/localThinking.js
new file mode 100644
index 0000000..300e03a
--- /dev/null
+++ b/server/services/localThinking.js
@@ -0,0 +1,339 @@
+/**
+ * Local Thinking Service
+ *
+ * Uses LM Studio for free local analysis and thinking.
+ * Decides when to escalate to cloud providers.
+ */
+
+import * as lmStudio from './lmStudioManager.js'
+import { cosEvents } from './cosEvents.js'
+
+// Task complexity thresholds for escalation
+const COMPLEXITY_THRESHOLDS = {
+ simple: 0.3, // Local model can handle
+ medium: 0.6, // Local might work, cloud preferred
+ complex: 0.8, // Cloud recommended
+ advanced: 1.0 // Cloud required
+}
+
+// Keywords that suggest higher complexity
+const COMPLEXITY_KEYWORDS = {
+ high: [
+ 'refactor', 'architect', 'design', 'security', 'audit',
+ 'optimize', 'performance', 'migration', 'integrate',
+ 'test coverage', 'comprehensive', 'entire codebase'
+ ],
+ medium: [
+ 'implement', 'feature', 'fix bug', 'update', 'modify',
+ 'add functionality', 'improve', 'enhance'
+ ],
+ low: [
+ 'format', 'rename', 'typo', 'comment', 'simple',
+ 'minor', 'quick', 'small', 'straightforward'
+ ]
+}
+
+// Usage tracking
+const stats = {
+ localAnalyses: 0,
+ cloudEscalations: 0,
+ localSuccesses: 0,
+ localFailures: 0
+}
+
+/**
+ * Analyze a task to determine complexity and requirements
+ * @param {Object} task - Task to analyze
+ * @returns {Promise} - Analysis result
+ */
+async function analyzeTask(task) {
+ const description = task.description || ''
+
+ // Quick keyword-based complexity estimate
+ const keywordComplexity = estimateComplexityFromKeywords(description)
+
+ // Check if LM Studio is available for deeper analysis
+ const lmAvailable = await lmStudio.checkLMStudioAvailable()
+
+ if (!lmAvailable) {
+ return {
+ complexity: keywordComplexity,
+ escalateToCloud: keywordComplexity > COMPLEXITY_THRESHOLDS.medium,
+ reason: 'LM Studio unavailable, using keyword analysis',
+ localAnalysis: false,
+ suggestions: []
+ }
+ }
+
+ // Use local model for deeper analysis
+ stats.localAnalyses++
+
+ const analysisPrompt = `Analyze this task and respond with JSON only:
+Task: ${description.substring(0, 500)}
+
+Respond with:
+{
+ "complexity": 0.0-1.0 (how complex is this task),
+ "requiresCodeUnderstanding": true/false,
+ "requiresMultiFileChanges": true/false,
+ "requiresArchitecturalDecisions": true/false,
+ "suggestedApproach": "brief approach",
+ "potentialRisks": ["risk1", "risk2"]
+}`
+
+ const result = await lmStudio.quickCompletion(analysisPrompt, {
+ maxTokens: 256,
+ temperature: 0.3,
+ systemPrompt: 'You are a code analysis assistant. Respond only with valid JSON.'
+ })
+
+ if (!result.success) {
+ stats.localFailures++
+ return {
+ complexity: keywordComplexity,
+ escalateToCloud: keywordComplexity > COMPLEXITY_THRESHOLDS.medium,
+ reason: `Local analysis failed: ${result.error}`,
+ localAnalysis: false,
+ suggestions: []
+ }
+ }
+
+ stats.localSuccesses++
+
+ // Parse local model response
+ let analysis
+ try {
+ // Extract JSON from response
+ const jsonMatch = result.content.match(/\{[\s\S]*\}/)
+ analysis = jsonMatch ? JSON.parse(jsonMatch[0]) : null
+ } catch (err) {
+ analysis = null
+ }
+
+ if (!analysis) {
+ return {
+ complexity: keywordComplexity,
+ escalateToCloud: keywordComplexity > COMPLEXITY_THRESHOLDS.medium,
+ reason: 'Could not parse local analysis',
+ localAnalysis: true,
+ rawResponse: result.content,
+ suggestions: []
+ }
+ }
+
+ const complexity = analysis.complexity || keywordComplexity
+
+ // Determine if cloud escalation is needed
+ const needsCloud = shouldEscalateToCloud({
+ complexity,
+ requiresCodeUnderstanding: analysis.requiresCodeUnderstanding,
+ requiresMultiFileChanges: analysis.requiresMultiFileChanges,
+ requiresArchitecturalDecisions: analysis.requiresArchitecturalDecisions
+ })
+
+ if (needsCloud) {
+ stats.cloudEscalations++
+ }
+
+ return {
+ complexity,
+ escalateToCloud: needsCloud,
+ reason: needsCloud
+ ? 'Task requires advanced reasoning'
+ : 'Task suitable for local execution',
+ localAnalysis: true,
+ suggestedApproach: analysis.suggestedApproach,
+ potentialRisks: analysis.potentialRisks || [],
+ suggestions: analysis.potentialRisks || []
+ }
+}
+
+/**
+ * Estimate complexity from keywords
+ * @param {string} text - Text to analyze
+ * @returns {number} - Complexity score 0-1
+ */
+function estimateComplexityFromKeywords(text) {
+ const lower = text.toLowerCase()
+
+ let score = 0.5 // Default medium
+
+ // Check high complexity keywords
+ for (const keyword of COMPLEXITY_KEYWORDS.high) {
+ if (lower.includes(keyword)) {
+ score = Math.max(score, 0.8)
+ }
+ }
+
+ // Check medium complexity keywords
+ for (const keyword of COMPLEXITY_KEYWORDS.medium) {
+ if (lower.includes(keyword)) {
+ score = Math.max(score, 0.5)
+ }
+ }
+
+ // Check low complexity keywords
+ for (const keyword of COMPLEXITY_KEYWORDS.low) {
+ if (lower.includes(keyword)) {
+ score = Math.min(score, 0.3)
+ }
+ }
+
+ // Length-based adjustment
+ if (text.length > 500) score = Math.min(score + 0.1, 1)
+ if (text.length > 1000) score = Math.min(score + 0.1, 1)
+
+ return score
+}
+
+/**
+ * Determine if a task should escalate to cloud
+ * @param {Object} analysis - Task analysis
+ * @returns {boolean} - True if should escalate
+ */
+function shouldEscalateToCloud(analysis) {
+ // Always escalate very complex tasks
+ if (analysis.complexity > COMPLEXITY_THRESHOLDS.complex) {
+ return true
+ }
+
+ // Escalate if requires architectural decisions
+ if (analysis.requiresArchitecturalDecisions) {
+ return true
+ }
+
+ // Escalate if requires multi-file changes and is moderately complex
+ if (analysis.requiresMultiFileChanges && analysis.complexity > COMPLEXITY_THRESHOLDS.medium) {
+ return true
+ }
+
+ return false
+}
+
+/**
+ * Classify a memory using local model
+ * @param {string} content - Memory content
+ * @returns {Promise} - Classification result
+ */
+async function classifyMemory(content) {
+ const lmAvailable = await lmStudio.checkLMStudioAvailable()
+
+ if (!lmAvailable) {
+ return {
+ success: false,
+ error: 'LM Studio unavailable'
+ }
+ }
+
+ const classifyPrompt = `Classify this memory into one of: fact, learning, preference, observation, decision, context.
+Also extract relevant tags.
+
+Memory: ${content.substring(0, 300)}
+
+Respond with JSON only:
+{
+ "type": "fact|learning|preference|observation|decision|context",
+ "category": "codebase|patterns|bugs|performance|other",
+ "tags": ["tag1", "tag2"],
+ "importance": 0.0-1.0
+}`
+
+ const result = await lmStudio.quickCompletion(classifyPrompt, {
+ maxTokens: 128,
+ temperature: 0.3,
+ systemPrompt: 'You are a memory classification assistant. Respond only with valid JSON.'
+ })
+
+ if (!result.success) {
+ return { success: false, error: result.error }
+ }
+
+ try {
+ const jsonMatch = result.content.match(/\{[\s\S]*\}/)
+ const classification = jsonMatch ? JSON.parse(jsonMatch[0]) : null
+
+ return {
+ success: true,
+ ...classification
+ }
+ } catch (err) {
+ return {
+ success: false,
+ error: 'Could not parse classification',
+ rawResponse: result.content
+ }
+ }
+}
+
+/**
+ * Quick completion for simple local tasks
+ * @param {string} prompt - Prompt
+ * @param {Object} options - Options
+ * @returns {Promise} - Completion result
+ */
+async function quickCompletion(prompt, options = {}) {
+ return lmStudio.quickCompletion(prompt, options)
+}
+
+/**
+ * Get thinking service statistics
+ * @returns {Object} - Statistics
+ */
+function getStats() {
+ const lmStats = {
+ localAnalyses: stats.localAnalyses,
+ cloudEscalations: stats.cloudEscalations,
+ localSuccesses: stats.localSuccesses,
+ localFailures: stats.localFailures,
+ localSuccessRate: stats.localAnalyses > 0
+ ? ((stats.localSuccesses / stats.localAnalyses) * 100).toFixed(1) + '%'
+ : '0%',
+ escalationRate: stats.localAnalyses > 0
+ ? ((stats.cloudEscalations / stats.localAnalyses) * 100).toFixed(1) + '%'
+ : '0%'
+ }
+
+ return lmStats
+}
+
+/**
+ * Reset statistics
+ */
+function resetStats() {
+ stats.localAnalyses = 0
+ stats.cloudEscalations = 0
+ stats.localSuccesses = 0
+ stats.localFailures = 0
+}
+
+/**
+ * Pre-analyze a batch of tasks
+ * @param {Array} tasks - Tasks to analyze
+ * @returns {Promise} - Analysis results
+ */
+async function analyzeTaskBatch(tasks) {
+ const results = []
+
+ for (const task of tasks) {
+ const analysis = await analyzeTask(task)
+ results.push({
+ taskId: task.id,
+ description: task.description?.substring(0, 50),
+ ...analysis
+ })
+ }
+
+ return results
+}
+
+export {
+ analyzeTask,
+ estimateComplexityFromKeywords,
+ shouldEscalateToCloud,
+ classifyMemory,
+ quickCompletion,
+ getStats,
+ resetStats,
+ analyzeTaskBatch,
+ COMPLEXITY_THRESHOLDS
+}
diff --git a/server/services/mediaService.js b/server/services/mediaService.js
new file mode 100644
index 0000000..340e46d
--- /dev/null
+++ b/server/services/mediaService.js
@@ -0,0 +1,186 @@
+import { spawn } from 'child_process';
+import { PassThrough } from 'stream';
+
+class MediaService {
+ constructor() {
+ this.videoProcess = null;
+ this.audioProcess = null;
+ this.videoStream = null;
+ this.audioStream = null;
+ this.devices = {
+ video: [],
+ audio: []
+ };
+ }
+
+ async listDevices() {
+ return new Promise((resolve, reject) => {
+ const ffmpeg = spawn('ffmpeg', [
+ '-f', 'avfoundation',
+ '-list_devices', 'true',
+ '-i', ''
+ ]);
+
+ let output = '';
+
+ ffmpeg.stderr.on('data', (data) => {
+ output += data.toString();
+ });
+
+ ffmpeg.on('close', () => {
+ const videoDevices = [];
+ const audioDevices = [];
+
+ const lines = output.split('\n');
+ let inVideoSection = false;
+ let inAudioSection = false;
+
+ for (const line of lines) {
+ if (line.includes('AVFoundation video devices:')) {
+ inVideoSection = true;
+ inAudioSection = false;
+ continue;
+ }
+ if (line.includes('AVFoundation audio devices:')) {
+ inVideoSection = false;
+ inAudioSection = true;
+ continue;
+ }
+
+ const match = line.match(/\[(\d+)\] (.+)/);
+ if (match) {
+ const [, id, name] = match;
+ if (inVideoSection && !name.includes('Capture screen')) {
+ videoDevices.push({ id, name: name.trim() });
+ } else if (inAudioSection) {
+ audioDevices.push({ id, name: name.trim() });
+ }
+ }
+ }
+
+ this.devices = { video: videoDevices, audio: audioDevices };
+ resolve(this.devices);
+ });
+
+ ffmpeg.on('error', reject);
+ });
+ }
+
+ startVideoStream(deviceId = '0') {
+ if (this.videoProcess) {
+ this.stopVideoStream();
+ }
+
+ this.videoStream = new PassThrough();
+
+ // Use MJPEG format for compatibility and low latency
+ this.videoProcess = spawn('ffmpeg', [
+ '-f', 'avfoundation',
+ '-video_size', '1280x720',
+ '-framerate', '30',
+ '-i', `${deviceId}:none`,
+ '-f', 'mjpeg',
+ '-q:v', '5',
+ '-'
+ ]);
+
+ this.videoProcess.stdout.pipe(this.videoStream);
+
+ this.videoProcess.stderr.on('data', (data) => {
+ const msg = data.toString();
+ if (!msg.includes('frame=') && !msg.includes('fps=')) {
+ console.log(`๐น FFmpeg video: ${msg.trim()}`);
+ }
+ });
+
+ this.videoProcess.on('error', (err) => {
+ console.error(`โ Video stream error: ${err.message}`);
+ });
+
+ this.videoProcess.on('close', () => {
+ console.log('๐น Video stream stopped');
+ this.videoStream = null;
+ });
+
+ return this.videoStream;
+ }
+
+ startAudioStream(deviceId = '0') {
+ if (this.audioProcess) {
+ this.stopAudioStream();
+ }
+
+ this.audioStream = new PassThrough();
+
+ // Use WebM format with Opus codec for web compatibility
+ this.audioProcess = spawn('ffmpeg', [
+ '-f', 'avfoundation',
+ '-i', `:${deviceId}`,
+ '-f', 'webm',
+ '-acodec', 'libopus',
+ '-ac', '1',
+ '-ar', '48000',
+ '-b:a', '128k',
+ '-'
+ ]);
+
+ this.audioProcess.stdout.pipe(this.audioStream);
+
+ this.audioProcess.stderr.on('data', (data) => {
+ const msg = data.toString();
+ if (!msg.includes('frame=') && !msg.includes('size=')) {
+ console.log(`๐ค FFmpeg audio: ${msg.trim()}`);
+ }
+ });
+
+ this.audioProcess.on('error', (err) => {
+ console.error(`โ Audio stream error: ${err.message}`);
+ });
+
+ this.audioProcess.on('close', () => {
+ console.log('๐ค Audio stream stopped');
+ this.audioStream = null;
+ });
+
+ return this.audioStream;
+ }
+
+ stopVideoStream() {
+ if (this.videoProcess) {
+ this.videoProcess.kill('SIGTERM');
+ this.videoProcess = null;
+ this.videoStream = null;
+ }
+ }
+
+ stopAudioStream() {
+ if (this.audioProcess) {
+ this.audioProcess.kill('SIGTERM');
+ this.audioProcess = null;
+ this.audioStream = null;
+ }
+ }
+
+ stopAll() {
+ this.stopVideoStream();
+ this.stopAudioStream();
+ }
+
+ isVideoStreaming() {
+ return this.videoProcess !== null && this.videoStream !== null;
+ }
+
+ isAudioStreaming() {
+ return this.audioProcess !== null && this.audioStream !== null;
+ }
+
+ getVideoStream() {
+ return this.videoStream;
+ }
+
+ getAudioStream() {
+ return this.audioStream;
+ }
+}
+
+export default new MediaService();
diff --git a/server/services/memory.js b/server/services/memory.js
index c8b1583..17430d7 100644
--- a/server/services/memory.js
+++ b/server/services/memory.js
@@ -5,7 +5,7 @@
* Stores facts, learnings, observations, decisions, preferences, and context.
*/
-import { readFile, writeFile, mkdir, readdir, rm } from 'fs/promises';
+import { writeFile, mkdir, readdir, rm } from 'fs/promises';
import { existsSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
@@ -13,6 +13,8 @@ import { v4 as uuidv4 } from 'uuid';
import { cosEvents, updateAgent } from './cos.js';
import { findTopK, findAboveThreshold, clusterBySimilarity, cosineSimilarity } from '../lib/vectorMath.js';
import * as notifications from './notifications.js';
+import { readJSONFile } from '../lib/fileUtils.js';
+import * as memoryBM25 from './memoryBM25.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
@@ -73,13 +75,8 @@ async function loadIndex() {
await ensureDirectories();
- if (!existsSync(INDEX_FILE)) {
- indexCache = { version: 1, lastUpdated: new Date().toISOString(), count: 0, memories: [] };
- return indexCache;
- }
-
- const content = await readFile(INDEX_FILE, 'utf-8');
- indexCache = JSON.parse(content);
+ const defaultIndex = { version: 1, lastUpdated: new Date().toISOString(), count: 0, memories: [] };
+ indexCache = await readJSONFile(INDEX_FILE, defaultIndex);
return indexCache;
}
@@ -101,13 +98,8 @@ async function loadEmbeddings() {
await ensureDirectories();
- if (!existsSync(EMBEDDINGS_FILE)) {
- embeddingsCache = { model: null, dimension: 0, vectors: {} };
- return embeddingsCache;
- }
-
- const content = await readFile(EMBEDDINGS_FILE, 'utf-8');
- embeddingsCache = JSON.parse(content);
+ const defaultEmbeddings = { model: null, dimension: 0, vectors: {} };
+ embeddingsCache = await readJSONFile(EMBEDDINGS_FILE, defaultEmbeddings);
return embeddingsCache;
}
@@ -125,10 +117,7 @@ async function saveEmbeddings(embeddings) {
*/
async function loadMemory(id) {
const memoryFile = join(MEMORIES_DIR, id, 'memory.json');
- if (!existsSync(memoryFile)) return null;
-
- const content = await readFile(memoryFile, 'utf-8');
- return JSON.parse(content);
+ return readJSONFile(memoryFile, null);
}
/**
@@ -221,6 +210,15 @@ export async function createMemory(data, embedding = null) {
await saveEmbeddings(embeddings);
}
+ // Index in BM25 for text search (async, non-blocking)
+ memoryBM25.indexMemory({
+ id: memory.id,
+ content: memory.content,
+ type: memory.type,
+ tags: memory.tags,
+ source: memory.sourceAppId
+ }).catch(err => console.error(`โ ๏ธ BM25 index error: ${err.message}`));
+
console.log(`๐ง Memory created: ${memory.type} - ${memory.summary.substring(0, 50)}...`);
cosEvents.emit('memory:created', { id, type: memory.type, summary: memory.summary });
@@ -336,6 +334,17 @@ export async function updateMemory(id, updates) {
await saveIndex(index);
}
+ // Update BM25 index if content changed
+ if (updates.content || updates.tags) {
+ memoryBM25.indexMemory({
+ id: memory.id,
+ content: memory.content,
+ type: memory.type,
+ tags: memory.tags,
+ source: memory.sourceAppId
+ }).catch(err => console.error(`โ ๏ธ BM25 index error: ${err.message}`));
+ }
+
console.log(`๐ง Memory updated: ${id}`);
cosEvents.emit('memory:updated', { id, updates });
@@ -388,9 +397,28 @@ export async function deleteMemory(id, hard = false) {
const embeddings = await loadEmbeddings();
delete embeddings.vectors[id];
await saveEmbeddings(embeddings);
+
+ // Remove from BM25 index
+ memoryBM25.removeMemoryFromIndex(id)
+ .catch(err => console.error(`โ ๏ธ BM25 remove error: ${err.message}`));
} else {
// Soft delete - mark as archived
- await updateMemory(id, { status: 'archived' });
+ // Note: We can't call updateMemory here as it would cause deadlock (both use withMemoryLock)
+ // Instead, we handle the soft delete logic directly within this lock
+ const memory = await loadMemory(id);
+ if (memory) {
+ memory.status = 'archived';
+ memory.updatedAt = new Date().toISOString();
+ await saveMemory(memory);
+
+ // Update index
+ const index = await loadIndex();
+ const idx = index.memories.findIndex(m => m.id === id);
+ if (idx !== -1) {
+ index.memories[idx].status = 'archived';
+ await saveIndex(index);
+ }
+ }
}
console.log(`๐ง Memory deleted: ${id} (hard: ${hard})`);
@@ -485,6 +513,10 @@ export async function rejectMemory(id) {
delete embeddings.vectors[id];
await saveEmbeddings(embeddings);
+ // Remove from BM25 index
+ memoryBM25.removeMemoryFromIndex(id)
+ .catch(err => console.error(`โ ๏ธ BM25 remove error: ${err.message}`));
+
console.log(`๐ง Memory rejected: ${id}`);
cosEvents.emit('memory:rejected', { id });
@@ -539,6 +571,119 @@ export async function searchMemories(queryEmbedding, options = {}) {
return { total: results.length, memories: results };
}
+/**
+ * Hybrid search combining BM25 text matching and vector similarity
+ * Uses Reciprocal Rank Fusion (RRF) to merge rankings
+ *
+ * @param {string} query - Text query for BM25
+ * @param {number[]} queryEmbedding - Vector embedding for semantic search
+ * @param {Object} options - Search options
+ * @returns {Promise<{total: number, memories: Array}>}
+ */
+export async function hybridSearchMemories(query, queryEmbedding, options = {}) {
+ const { limit = 20, minRelevance = 0.5, bm25Weight = 0.4, vectorWeight = 0.6 } = options
+
+ const index = await loadIndex()
+ const embeddings = await loadEmbeddings()
+ const indexMap = new Map(index.memories.map(m => [m.id, m]))
+
+ // Get BM25 results
+ const bm25Results = query
+ ? await memoryBM25.searchBM25(query, { limit: limit * 2, threshold: 0.05 })
+ : []
+
+ // Get vector results
+ let vectorResults = []
+ if (queryEmbedding && Object.keys(embeddings.vectors).length > 0) {
+ const similar = findAboveThreshold(queryEmbedding, embeddings.vectors, minRelevance * 0.5)
+ vectorResults = similar.slice(0, limit * 2).map(r => ({
+ id: r.id,
+ score: r.similarity
+ }))
+ }
+
+ // Apply Reciprocal Rank Fusion (RRF)
+ // RRF score = sum(1 / (k + rank)) across all rankings
+ const RRF_K = 60 // Standard RRF constant
+ const rrfScores = new Map()
+
+ // Add BM25 contributions
+ bm25Results.forEach((result, rank) => {
+ const current = rrfScores.get(result.id) || { bm25Rank: null, vectorRank: null, rrfScore: 0 }
+ current.bm25Rank = rank + 1
+ current.rrfScore += bm25Weight / (RRF_K + rank + 1)
+ rrfScores.set(result.id, current)
+ })
+
+ // Add vector contributions
+ vectorResults.forEach((result, rank) => {
+ const current = rrfScores.get(result.id) || { bm25Rank: null, vectorRank: null, rrfScore: 0 }
+ current.vectorRank = rank + 1
+ current.rrfScore += vectorWeight / (RRF_K + rank + 1)
+ rrfScores.set(result.id, current)
+ })
+
+ // Filter and sort by RRF score
+ let results = Array.from(rrfScores.entries())
+ .map(([id, data]) => {
+ const meta = indexMap.get(id)
+ if (!meta || meta.status !== 'active') return null
+
+ // Apply type/category/tag/app filters
+ if (options.types?.length > 0 && !options.types.includes(meta.type)) return null
+ if (options.categories?.length > 0 && !options.categories.includes(meta.category)) return null
+ if (options.tags?.length > 0 && !meta.tags.some(t => options.tags.includes(t))) return null
+ if (options.appId && meta.sourceAppId !== options.appId) return null
+
+ return {
+ ...meta,
+ rrfScore: data.rrfScore,
+ bm25Rank: data.bm25Rank,
+ vectorRank: data.vectorRank,
+ searchMethod: data.bm25Rank && data.vectorRank ? 'hybrid' :
+ data.bm25Rank ? 'bm25' : 'vector'
+ }
+ })
+ .filter(Boolean)
+ .sort((a, b) => b.rrfScore - a.rrfScore)
+ .slice(0, limit)
+
+ return { total: results.length, memories: results }
+}
+
+/**
+ * Rebuild the BM25 index from all memories
+ * Call this after bulk imports or to fix index inconsistencies
+ */
+export async function rebuildBM25Index() {
+ const index = await loadIndex()
+ const activeMemories = index.memories.filter(m => m.status === 'active')
+
+ // Load full content for each memory
+ const documents = []
+ for (const meta of activeMemories) {
+ const memory = await loadMemory(meta.id)
+ if (memory) {
+ documents.push({
+ id: memory.id,
+ content: memory.content,
+ type: memory.type,
+ tags: memory.tags,
+ source: memory.sourceAppId
+ })
+ }
+ }
+
+ return memoryBM25.rebuildIndex(documents)
+}
+
+/**
+ * Get BM25 index statistics
+ */
+export async function getBM25Stats() {
+ return memoryBM25.getStats()
+}
+
/**
* Get timeline data (memories grouped by date)
*/
@@ -915,3 +1060,10 @@ export function invalidateCaches() {
indexCache = null;
embeddingsCache = null;
}
+
+/**
+ * Flush BM25 index to disk
+ */
+export async function flushBM25Index() {
+ return memoryBM25.flush();
+}
diff --git a/server/services/memoryBM25.js b/server/services/memoryBM25.js
new file mode 100644
index 0000000..b6041e9
--- /dev/null
+++ b/server/services/memoryBM25.js
@@ -0,0 +1,247 @@
+/**
+ * BM25 Index Manager for Memory System
+ *
+ * Manages the BM25 inverted index for text-based memory search.
+ * Complements the vector-based semantic search with keyword matching.
+ */
+
+import { promises as fs } from 'fs'
+import path from 'path'
+import {
+ buildInvertedIndex,
+ addDocument,
+ removeDocument,
+ search,
+ createEmptyIndex,
+ serializeIndex,
+ deserializeIndex,
+ getIndexStats
+} from '../lib/bm25.js'
+
+const DATA_DIR = path.join(process.cwd(), 'data', 'cos', 'memory')
+const INDEX_FILE = path.join(DATA_DIR, 'bm25-index.json')
+
+// In-memory cache of the index
+let indexCache = null
+let isDirty = false
+
+/**
+ * Ensure the data directory exists
+ */
+async function ensureDataDir() {
+ await fs.mkdir(DATA_DIR, { recursive: true })
+}
+
+/**
+ * Load the BM25 index from disk
+ * @returns {Promise} - The BM25 index
+ */
+async function loadIndex() {
+ if (indexCache) return indexCache
+
+ await ensureDataDir()
+
+ const exists = await fs.access(INDEX_FILE).then(() => true).catch(() => false)
+
+ if (!exists) {
+ indexCache = createEmptyIndex()
+ return indexCache
+ }
+
+ const data = await fs.readFile(INDEX_FILE, 'utf-8')
+ indexCache = deserializeIndex(JSON.parse(data))
+ return indexCache
+}
+
+/**
+ * Save the BM25 index to disk
+ * @returns {Promise}
+ */
+async function saveIndex() {
+ if (!indexCache || !isDirty) return
+
+ await ensureDataDir()
+ const serialized = serializeIndex(indexCache)
+ await fs.writeFile(INDEX_FILE, JSON.stringify(serialized, null, 2))
+ isDirty = false
+
+ console.log(`๐พ BM25 index saved: ${indexCache.totalDocs} docs, ${Object.keys(indexCache.terms).length} terms`)
+}
+
+/**
+ * Build or rebuild the entire index from memories
+ * @param {Array<{id: string, content: string, type: string, tags: string[]}>} memories
+ * @returns {Promise} - Index statistics
+ */
+async function rebuildIndex(memories) {
+ await ensureDataDir()
+
+ // Convert memories to documents for indexing
+ const documents = memories.map(m => ({
+ id: m.id,
+ text: buildIndexableText(m)
+ }))
+
+ indexCache = buildInvertedIndex(documents)
+ isDirty = true
+
+ await saveIndex()
+
+ console.log(`๐ BM25 index rebuilt: ${indexCache.totalDocs} memories indexed`)
+ return getIndexStats(indexCache)
+}
+
+/**
+ * Build indexable text from a memory object
+ * Combines content, type, and tags for better matching
+ *
+ * @param {Object} memory - Memory object
+ * @returns {string} - Text for indexing
+ */
+function buildIndexableText(memory) {
+ const parts = []
+
+ if (memory.content) {
+ parts.push(memory.content)
+ }
+
+ if (memory.type) {
+ parts.push(memory.type)
+ }
+
+ if (memory.tags && Array.isArray(memory.tags)) {
+ parts.push(memory.tags.join(' '))
+ }
+
+ if (memory.source) {
+ parts.push(memory.source)
+ }
+
+ return parts.join(' ')
+}
+
+/**
+ * Add or update a memory in the index
+ * @param {Object} memory - Memory object with id, content, type, tags
+ * @returns {Promise}
+ */
+async function indexMemory(memory) {
+ const index = await loadIndex()
+
+ const text = buildIndexableText(memory)
+ addDocument(index, memory.id, text)
+ isDirty = true
+
+ // Save periodically (every 10 changes) or on explicit flush
+ if (isDirty && index.totalDocs % 10 === 0) {
+ await saveIndex()
+ }
+}
+
+/**
+ * Remove a memory from the index
+ * @param {string} memoryId - Memory ID to remove
+ * @returns {Promise}
+ */
+async function removeMemoryFromIndex(memoryId) {
+ const index = await loadIndex()
+ removeDocument(index, memoryId)
+ isDirty = true
+}
+
+/**
+ * Search memories using BM25 text matching
+ *
+ * @param {string} query - Search query
+ * @param {Object} options - Search options
+ * @param {number} options.limit - Maximum results (default 20)
+ * @param {number} options.threshold - Minimum score threshold (default 0.1)
+ * @returns {Promise>} - Ranked memory IDs with scores
+ */
+async function searchBM25(query, options = {}) {
+ const { limit = 20, threshold = 0.1 } = options
+
+ const index = await loadIndex()
+ const results = search(query, index, { limit, threshold })
+
+ return results.map(r => ({
+ id: r.docId,
+ score: r.score
+ }))
+}
+
+/**
+ * Get index statistics
+ * @returns {Promise} - Index statistics
+ */
+async function getStats() {
+ const index = await loadIndex()
+ return {
+ ...getIndexStats(index),
+ isDirty,
+ indexFile: INDEX_FILE
+ }
+}
+
+/**
+ * Flush pending changes to disk
+ * @returns {Promise}
+ */
+async function flush() {
+ await saveIndex()
+}
+
+/**
+ * Clear the entire index
+ * @returns {Promise}
+ */
+async function clearIndex() {
+ indexCache = createEmptyIndex()
+ isDirty = true
+ await saveIndex()
+ console.log('๐๏ธ BM25 index cleared')
+}
+
+/**
+ * Check if a memory exists in the index
+ * @param {string} memoryId - Memory ID
+ * @returns {Promise}
+ */
+async function hasMemory(memoryId) {
+ const index = await loadIndex()
+ return index.docIds.has(memoryId)
+}
+
+/**
+ * Batch index multiple memories efficiently
+ * @param {Array} memories - Memories to index
+ * @returns {Promise} - Number of memories indexed
+ */
+async function batchIndex(memories) {
+ const index = await loadIndex()
+
+ for (const memory of memories) {
+ const text = buildIndexableText(memory)
+ addDocument(index, memory.id, text)
+ }
+
+ isDirty = true
+ await saveIndex()
+
+ return memories.length
+}
+
+export {
+ loadIndex,
+ saveIndex,
+ rebuildIndex,
+ indexMemory,
+ removeMemoryFromIndex,
+ searchBM25,
+ getStats,
+ flush,
+ clearIndex,
+ hasMemory,
+ batchIndex,
+ buildIndexableText
+}
diff --git a/server/services/memoryExtractor.js b/server/services/memoryExtractor.js
index aa8bb47..20716ff 100644
--- a/server/services/memoryExtractor.js
+++ b/server/services/memoryExtractor.js
@@ -7,7 +7,7 @@
import { createMemory } from './memory.js';
import { generateMemoryEmbedding } from './memoryEmbeddings.js';
-import { cosEvents } from './cos.js';
+import { cosEvents } from './cosEvents.js';
import * as notifications from './notifications.js';
import { classifyMemories, isAvailable as isClassifierAvailable } from './memoryClassifier.js';
diff --git a/server/services/memoryRetriever.js b/server/services/memoryRetriever.js
index 1f14436..dda02e1 100644
--- a/server/services/memoryRetriever.js
+++ b/server/services/memoryRetriever.js
@@ -5,10 +5,13 @@
* Combines semantic search with importance scoring.
*/
-import { getMemories, searchMemories, getMemory } from './memory.js';
+import { getMemories, searchMemories, hybridSearchMemories, getMemory } from './memory.js';
import { generateQueryEmbedding, estimateTokens, truncateToTokens } from './memoryEmbeddings.js';
import { DEFAULT_MEMORY_CONFIG } from './memory.js';
+// Search mode preference
+const SEARCH_MODE = 'hybrid'; // 'hybrid' | 'vector' | 'bm25'
+
/**
* Get relevant memories for a task
* Returns formatted text ready for injection into agent prompt
@@ -20,28 +23,38 @@ export async function getRelevantMemories(task, options = {}) {
const memories = [];
let tokenCount = 0;
- // 1. Semantic search based on task description
+ // 1. Search based on task description (hybrid BM25 + vector or vector-only)
if (task.description) {
const queryEmbedding = await generateQueryEmbedding(task.description);
+ let searchResults = { memories: [] };
- if (queryEmbedding) {
- const searchResults = await searchMemories(queryEmbedding, {
+ if (SEARCH_MODE === 'hybrid' && queryEmbedding) {
+ // Use hybrid BM25 + vector search with reciprocal rank fusion
+ searchResults = await hybridSearchMemories(task.description, queryEmbedding, {
+ limit: 20,
+ minRelevance,
+ bm25Weight: 0.4,
+ vectorWeight: 0.6
+ });
+ } else if (queryEmbedding) {
+ // Fallback to vector-only search
+ searchResults = await searchMemories(queryEmbedding, {
minRelevance,
limit: 20
});
+ }
- for (const result of searchResults.memories) {
- const mem = await getMemory(result.id);
- if (mem) {
- const tokens = estimateTokens(mem.content);
- if (tokenCount + tokens <= maxTokens) {
- memories.push({
- ...mem,
- relevance: result.similarity,
- source: 'semantic'
- });
- tokenCount += tokens;
- }
+ for (const result of searchResults.memories) {
+ const mem = await getMemory(result.id);
+ if (mem) {
+ const tokens = estimateTokens(mem.content);
+ if (tokenCount + tokens <= maxTokens) {
+ memories.push({
+ ...mem,
+ relevance: result.rrfScore || result.similarity || 0.5,
+ source: result.searchMethod || 'semantic'
+ });
+ tokenCount += tokens;
}
}
}
diff --git a/server/services/missions.js b/server/services/missions.js
new file mode 100644
index 0000000..0d9a872
--- /dev/null
+++ b/server/services/missions.js
@@ -0,0 +1,420 @@
+/**
+ * Missions Service
+ *
+ * Long-term goals with sub-tasks and app ownership.
+ * Enables proactive task generation when user queue is empty.
+ */
+
+import { promises as fs } from 'fs'
+import path from 'path'
+import { v4 as uuidv4 } from 'uuid'
+import { cosEvents } from './cosEvents.js'
+
+const DATA_DIR = path.join(process.cwd(), 'data', 'cos', 'missions')
+
+// In-memory cache
+let missionsCache = null
+
+/**
+ * Ensure data directory exists
+ */
+async function ensureDataDir() {
+ await fs.mkdir(DATA_DIR, { recursive: true })
+}
+
+/**
+ * Load all missions
+ * @returns {Promise} - All missions
+ */
+async function loadMissions() {
+ if (missionsCache) return missionsCache
+
+ await ensureDataDir()
+
+ const files = await fs.readdir(DATA_DIR).catch(() => [])
+ const missions = []
+
+ for (const file of files) {
+ if (!file.endsWith('.json')) continue
+
+ const filePath = path.join(DATA_DIR, file)
+ const content = await fs.readFile(filePath, 'utf-8').catch(() => null)
+ if (content) {
+ const mission = JSON.parse(content)
+ missions.push(mission)
+ }
+ }
+
+ missionsCache = missions
+ return missions
+}
+
+/**
+ * Save a mission to disk
+ * @param {Object} mission - Mission object
+ */
+async function saveMission(mission) {
+ await ensureDataDir()
+ const filePath = path.join(DATA_DIR, `${mission.id}.json`)
+ await fs.writeFile(filePath, JSON.stringify(mission, null, 2))
+
+ // Update cache
+ if (missionsCache) {
+ const idx = missionsCache.findIndex(m => m.id === mission.id)
+ if (idx !== -1) {
+ missionsCache[idx] = mission
+ } else {
+ missionsCache.push(mission)
+ }
+ }
+}
+
+/**
+ * Create a new mission
+ * @param {Object} data - Mission data
+ * @returns {Promise} - Created mission
+ */
+async function createMission(data) {
+ const now = new Date().toISOString()
+
+ const mission = {
+ id: data.id || uuidv4(),
+ appId: data.appId,
+ name: data.name,
+ description: data.description || '',
+ goals: data.goals || [],
+ subTasks: data.subTasks || [],
+ progress: 0,
+ status: 'active', // active, paused, completed, archived
+ priority: data.priority || 'medium',
+ autonomyLevel: data.autonomyLevel || 'full', // full, notify, approval-required
+ createdAt: now,
+ updatedAt: now,
+ lastReviewedAt: null,
+ completedAt: null,
+ metrics: {
+ tasksGenerated: 0,
+ tasksCompleted: 0,
+ successRate: 0
+ }
+ }
+
+ await saveMission(mission)
+
+ console.log(`๐ฏ Mission created: ${mission.name} (${mission.appId})`)
+ cosEvents.emit('mission:created', { id: mission.id, appId: mission.appId })
+
+ return mission
+}
+
+/**
+ * Get mission by ID
+ * @param {string} id - Mission ID
+ * @returns {Promise} - Mission or null
+ */
+async function getMission(id) {
+ const missions = await loadMissions()
+ return missions.find(m => m.id === id) || null
+}
+
+/**
+ * Get missions for an app
+ * @param {string} appId - App identifier
+ * @returns {Promise} - App missions
+ */
+async function getMissionsForApp(appId) {
+ const missions = await loadMissions()
+ return missions.filter(m => m.appId === appId && m.status === 'active')
+}
+
+/**
+ * Get all active missions
+ * @returns {Promise} - Active missions
+ */
+async function getActiveMissions() {
+ const missions = await loadMissions()
+ return missions.filter(m => m.status === 'active')
+}
+
+/**
+ * Update a mission
+ * @param {string} id - Mission ID
+ * @param {Object} updates - Updates to apply
+ * @returns {Promise} - Updated mission or null
+ */
+async function updateMission(id, updates) {
+ const mission = await getMission(id)
+ if (!mission) return null
+
+ const updatableFields = [
+ 'name', 'description', 'goals', 'subTasks', 'progress',
+ 'status', 'priority', 'autonomyLevel', 'lastReviewedAt',
+ 'completedAt', 'metrics'
+ ]
+
+ for (const field of updatableFields) {
+ if (updates[field] !== undefined) {
+ mission[field] = updates[field]
+ }
+ }
+
+ mission.updatedAt = new Date().toISOString()
+ await saveMission(mission)
+
+ cosEvents.emit('mission:updated', { id: mission.id, updates })
+ return mission
+}
+
+/**
+ * Add a sub-task to a mission
+ * @param {string} missionId - Mission ID
+ * @param {Object} subTask - Sub-task data
+ * @returns {Promise} - Updated mission or null
+ */
+async function addSubTask(missionId, subTask) {
+ const mission = await getMission(missionId)
+ if (!mission) return null
+
+ const task = {
+ id: uuidv4(),
+ description: subTask.description,
+ status: 'pending', // pending, in_progress, completed, failed
+ priority: subTask.priority || 'medium',
+ createdAt: new Date().toISOString(),
+ completedAt: null,
+ result: null
+ }
+
+ mission.subTasks.push(task)
+ mission.metrics.tasksGenerated++
+ mission.updatedAt = new Date().toISOString()
+
+ await saveMission(mission)
+ return mission
+}
+
+/**
+ * Complete a sub-task
+ * @param {string} missionId - Mission ID
+ * @param {string} subTaskId - Sub-task ID
+ * @param {Object} result - Task result
+ * @returns {Promise} - Updated mission or null
+ */
+async function completeSubTask(missionId, subTaskId, result = {}) {
+ const mission = await getMission(missionId)
+ if (!mission) return null
+
+ const subTask = mission.subTasks.find(t => t.id === subTaskId)
+ if (!subTask) return null
+
+ subTask.status = result.success === false ? 'failed' : 'completed'
+ subTask.completedAt = new Date().toISOString()
+ subTask.result = result
+
+ // Update metrics
+ if (subTask.status === 'completed') {
+ mission.metrics.tasksCompleted++
+ }
+ mission.metrics.successRate = mission.metrics.tasksGenerated > 0
+ ? (mission.metrics.tasksCompleted / mission.metrics.tasksGenerated) * 100
+ : 0
+
+ // Calculate progress
+ const completed = mission.subTasks.filter(t => t.status === 'completed').length
+ const total = mission.subTasks.length
+ mission.progress = total > 0 ? (completed / total) * 100 : 0
+
+ // Check if mission is complete
+ if (mission.progress >= 100 && mission.goals.length > 0) {
+ mission.status = 'completed'
+ mission.completedAt = new Date().toISOString()
+ console.log(`๐ Mission completed: ${mission.name}`)
+ cosEvents.emit('mission:completed', { id: mission.id })
+ }
+
+ mission.updatedAt = new Date().toISOString()
+ await saveMission(mission)
+
+ return mission
+}
+
+/**
+ * Generate a task from a mission (for proactive execution)
+ * @param {string} missionId - Mission ID
+ * @returns {Promise} - Generated task or null
+ */
+async function generateMissionTask(missionId) {
+ const mission = await getMission(missionId)
+ if (!mission || mission.status !== 'active') return null
+
+ // Find pending sub-tasks
+ const pendingSubTasks = mission.subTasks.filter(t => t.status === 'pending')
+
+ if (pendingSubTasks.length > 0) {
+ // Return first pending sub-task as a COS task
+ const subTask = pendingSubTasks[0]
+ subTask.status = 'in_progress'
+ await saveMission(mission)
+
+ return {
+ id: `mission-${mission.id}-${subTask.id}`,
+ description: subTask.description,
+ metadata: {
+ missionId: mission.id,
+ missionName: mission.name,
+ subTaskId: subTask.id,
+ appId: mission.appId,
+ autonomyLevel: mission.autonomyLevel,
+ isMissionTask: true
+ },
+ priority: subTask.priority,
+ autoApprove: mission.autonomyLevel === 'full'
+ }
+ }
+
+ // No pending sub-tasks - might need to generate new ones based on goals
+ // This would typically involve AI analysis of the mission goals
+ return null
+}
+
+/**
+ * Generate proactive tasks from all active missions
+ * @param {Object} options - Generation options
+ * @returns {Promise} - Generated tasks
+ */
+async function generateProactiveTasks(options = {}) {
+ const { maxTasks = 3 } = options
+ const missions = await getActiveMissions()
+ const tasks = []
+
+ for (const mission of missions) {
+ if (tasks.length >= maxTasks) break
+
+ // Skip recently reviewed missions
+ if (mission.lastReviewedAt) {
+ const lastReview = new Date(mission.lastReviewedAt).getTime()
+ const hoursSinceReview = (Date.now() - lastReview) / (1000 * 60 * 60)
+ if (hoursSinceReview < 1) continue // Wait at least 1 hour between reviews
+ }
+
+ const task = await generateMissionTask(mission.id)
+ if (task) {
+ tasks.push(task)
+ }
+ }
+
+ return tasks
+}
+
+/**
+ * Record mission review (updates lastReviewedAt)
+ * @param {string} missionId - Mission ID
+ * @returns {Promise} - Updated mission or null
+ */
+async function recordMissionReview(missionId) {
+ return updateMission(missionId, {
+ lastReviewedAt: new Date().toISOString()
+ })
+}
+
+/**
+ * Get mission statistics
+ * @returns {Promise} - Mission statistics
+ */
+async function getStats() {
+ const missions = await loadMissions()
+
+ const byStatus = {}
+ let totalProgress = 0
+ let totalTasks = 0
+ let completedTasks = 0
+
+ for (const mission of missions) {
+ byStatus[mission.status] = (byStatus[mission.status] || 0) + 1
+ totalProgress += mission.progress
+ totalTasks += mission.subTasks.length
+ completedTasks += mission.subTasks.filter(t => t.status === 'completed').length
+ }
+
+ return {
+ totalMissions: missions.length,
+ byStatus,
+ averageProgress: missions.length > 0
+ ? (totalProgress / missions.length).toFixed(1) + '%'
+ : '0%',
+ totalSubTasks: totalTasks,
+ completedSubTasks: completedTasks,
+ overallCompletion: totalTasks > 0
+ ? ((completedTasks / totalTasks) * 100).toFixed(1) + '%'
+ : '0%'
+ }
+}
+
+/**
+ * Delete a mission
+ * @param {string} id - Mission ID
+ * @returns {Promise} - True if deleted
+ */
+async function deleteMission(id) {
+ const filePath = path.join(DATA_DIR, `${id}.json`)
+ await fs.unlink(filePath).catch(() => {})
+
+ if (missionsCache) {
+ missionsCache = missionsCache.filter(m => m.id !== id)
+ }
+
+ console.log(`๐๏ธ Mission deleted: ${id}`)
+ cosEvents.emit('mission:deleted', { id })
+ return true
+}
+
+/**
+ * Archive completed missions
+ * @returns {Promise} - Number of missions archived
+ */
+async function archiveCompletedMissions() {
+ const missions = await loadMissions()
+ let archived = 0
+
+ for (const mission of missions) {
+ if (mission.status === 'completed' && mission.completedAt) {
+ const completedDate = new Date(mission.completedAt).getTime()
+ const daysSinceCompletion = (Date.now() - completedDate) / (1000 * 60 * 60 * 24)
+
+ if (daysSinceCompletion > 7) {
+ await updateMission(mission.id, { status: 'archived' })
+ archived++
+ }
+ }
+ }
+
+ if (archived > 0) {
+ console.log(`๐ฆ Archived ${archived} completed missions`)
+ }
+
+ return archived
+}
+
+/**
+ * Invalidate cache (call after external changes)
+ */
+function invalidateCache() {
+ missionsCache = null
+}
+
+export {
+ createMission,
+ getMission,
+ getMissionsForApp,
+ getActiveMissions,
+ updateMission,
+ addSubTask,
+ completeSubTask,
+ generateMissionTask,
+ generateProactiveTasks,
+ recordMissionReview,
+ getStats,
+ deleteMission,
+ archiveCompletedMissions,
+ invalidateCache
+}
diff --git a/server/services/missions.test.js b/server/services/missions.test.js
new file mode 100644
index 0000000..615bb73
--- /dev/null
+++ b/server/services/missions.test.js
@@ -0,0 +1,375 @@
+import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
+import { promises as fs } from 'fs';
+import path from 'path';
+
+// Mock cosEvents before import
+vi.mock('./cos.js', () => ({
+ cosEvents: {
+ emit: vi.fn()
+ }
+}));
+
+import {
+ createMission,
+ getMission,
+ getMissionsForApp,
+ getActiveMissions,
+ updateMission,
+ addSubTask,
+ completeSubTask,
+ generateMissionTask,
+ generateProactiveTasks,
+ recordMissionReview,
+ getStats,
+ deleteMission,
+ archiveCompletedMissions,
+ invalidateCache
+} from './missions.js';
+
+const DATA_DIR = path.join(process.cwd(), 'data', 'cos', 'missions');
+
+describe('Missions Service', () => {
+ beforeEach(async () => {
+ invalidateCache();
+ // Clean up test missions
+ const files = await fs.readdir(DATA_DIR).catch(() => []);
+ for (const file of files) {
+ if (file.startsWith('test-')) {
+ await fs.unlink(path.join(DATA_DIR, file)).catch(() => {});
+ }
+ }
+ });
+
+ afterEach(() => {
+ invalidateCache();
+ });
+
+ describe('createMission', () => {
+ it('should create a new mission with defaults', async () => {
+ const mission = await createMission({
+ id: 'test-mission-1',
+ appId: 'test-app',
+ name: 'Test Mission'
+ });
+
+ expect(mission.id).toBe('test-mission-1');
+ expect(mission.appId).toBe('test-app');
+ expect(mission.name).toBe('Test Mission');
+ expect(mission.status).toBe('active');
+ expect(mission.progress).toBe(0);
+ expect(mission.autonomyLevel).toBe('full');
+
+ await deleteMission('test-mission-1');
+ });
+
+ it('should set custom fields', async () => {
+ const mission = await createMission({
+ id: 'test-mission-2',
+ appId: 'test-app',
+ name: 'Test Mission',
+ description: 'Test description',
+ goals: ['Goal 1', 'Goal 2'],
+ priority: 'high',
+ autonomyLevel: 'approval-required'
+ });
+
+ expect(mission.description).toBe('Test description');
+ expect(mission.goals).toEqual(['Goal 1', 'Goal 2']);
+ expect(mission.priority).toBe('high');
+ expect(mission.autonomyLevel).toBe('approval-required');
+
+ await deleteMission('test-mission-2');
+ });
+ });
+
+ describe('getMission', () => {
+ it('should retrieve a mission by ID', async () => {
+ const created = await createMission({
+ id: 'test-get-mission',
+ appId: 'test-app',
+ name: 'Get Test'
+ });
+
+ const retrieved = await getMission('test-get-mission');
+ expect(retrieved).not.toBeNull();
+ expect(retrieved.id).toBe('test-get-mission');
+
+ await deleteMission('test-get-mission');
+ });
+
+ it('should return null for non-existent mission', async () => {
+ const result = await getMission('nonexistent');
+ expect(result).toBeNull();
+ });
+ });
+
+ describe('getMissionsForApp', () => {
+ it('should get missions for a specific app', async () => {
+ await createMission({
+ id: 'test-app-mission-1',
+ appId: 'specific-app',
+ name: 'Mission 1'
+ });
+ await createMission({
+ id: 'test-app-mission-2',
+ appId: 'specific-app',
+ name: 'Mission 2'
+ });
+ await createMission({
+ id: 'test-app-mission-3',
+ appId: 'other-app',
+ name: 'Mission 3'
+ });
+
+ const missions = await getMissionsForApp('specific-app');
+ expect(missions.length).toBe(2);
+ expect(missions.every(m => m.appId === 'specific-app')).toBe(true);
+
+ await deleteMission('test-app-mission-1');
+ await deleteMission('test-app-mission-2');
+ await deleteMission('test-app-mission-3');
+ });
+ });
+
+ describe('updateMission', () => {
+ it('should update mission fields', async () => {
+ await createMission({
+ id: 'test-update-mission',
+ appId: 'test-app',
+ name: 'Update Test'
+ });
+
+ const updated = await updateMission('test-update-mission', {
+ progress: 50,
+ status: 'paused'
+ });
+
+ expect(updated.progress).toBe(50);
+ expect(updated.status).toBe('paused');
+
+ await deleteMission('test-update-mission');
+ });
+
+ it('should return null for non-existent mission', async () => {
+ const result = await updateMission('nonexistent', { progress: 50 });
+ expect(result).toBeNull();
+ });
+ });
+
+ describe('addSubTask', () => {
+ it('should add a sub-task to a mission', async () => {
+ await createMission({
+ id: 'test-subtask-mission',
+ appId: 'test-app',
+ name: 'SubTask Test'
+ });
+
+ const updated = await addSubTask('test-subtask-mission', {
+ description: 'Sub-task 1',
+ priority: 'high'
+ });
+
+ expect(updated.subTasks.length).toBe(1);
+ expect(updated.subTasks[0].description).toBe('Sub-task 1');
+ expect(updated.subTasks[0].status).toBe('pending');
+ expect(updated.metrics.tasksGenerated).toBe(1);
+
+ await deleteMission('test-subtask-mission');
+ });
+ });
+
+ describe('completeSubTask', () => {
+ it('should mark sub-task as completed', async () => {
+ await createMission({
+ id: 'test-complete-mission',
+ appId: 'test-app',
+ name: 'Complete Test'
+ });
+
+ const withTask = await addSubTask('test-complete-mission', {
+ description: 'Task to complete'
+ });
+
+ const subTaskId = withTask.subTasks[0].id;
+ const updated = await completeSubTask('test-complete-mission', subTaskId, {
+ success: true,
+ output: 'Task completed'
+ });
+
+ expect(updated.subTasks[0].status).toBe('completed');
+ expect(updated.metrics.tasksCompleted).toBe(1);
+ expect(updated.progress).toBe(100);
+
+ await deleteMission('test-complete-mission');
+ });
+
+ it('should mark sub-task as failed', async () => {
+ await createMission({
+ id: 'test-fail-mission',
+ appId: 'test-app',
+ name: 'Fail Test'
+ });
+
+ const withTask = await addSubTask('test-fail-mission', {
+ description: 'Task to fail'
+ });
+
+ const subTaskId = withTask.subTasks[0].id;
+ const updated = await completeSubTask('test-fail-mission', subTaskId, {
+ success: false,
+ error: 'Task failed'
+ });
+
+ expect(updated.subTasks[0].status).toBe('failed');
+ expect(updated.metrics.tasksCompleted).toBe(0);
+
+ await deleteMission('test-fail-mission');
+ });
+ });
+
+ describe('generateMissionTask', () => {
+ it('should generate task from pending sub-task', async () => {
+ await createMission({
+ id: 'test-generate-mission',
+ appId: 'test-app',
+ name: 'Generate Test'
+ });
+
+ await addSubTask('test-generate-mission', {
+ description: 'Pending task',
+ priority: 'high'
+ });
+
+ const task = await generateMissionTask('test-generate-mission');
+
+ expect(task).not.toBeNull();
+ expect(task.description).toBe('Pending task');
+ expect(task.metadata.missionId).toBe('test-generate-mission');
+ expect(task.metadata.isMissionTask).toBe(true);
+
+ await deleteMission('test-generate-mission');
+ });
+
+ it('should return null for mission with no pending tasks', async () => {
+ await createMission({
+ id: 'test-no-pending-mission',
+ appId: 'test-app',
+ name: 'No Pending Test'
+ });
+
+ const task = await generateMissionTask('test-no-pending-mission');
+ expect(task).toBeNull();
+
+ await deleteMission('test-no-pending-mission');
+ });
+ });
+
+ describe('generateProactiveTasks', () => {
+ it('should generate tasks from active missions', async () => {
+ await createMission({
+ id: 'test-proactive-1',
+ appId: 'test-app',
+ name: 'Proactive 1'
+ });
+
+ await addSubTask('test-proactive-1', {
+ description: 'Proactive task 1'
+ });
+
+ const tasks = await generateProactiveTasks({ maxTasks: 5 });
+ expect(tasks.length).toBeGreaterThanOrEqual(1);
+
+ await deleteMission('test-proactive-1');
+ });
+
+ it('should respect maxTasks limit', async () => {
+ await createMission({
+ id: 'test-proactive-limit',
+ appId: 'test-app',
+ name: 'Limit Test'
+ });
+
+ await addSubTask('test-proactive-limit', { description: 'Task 1' });
+ await addSubTask('test-proactive-limit', { description: 'Task 2' });
+ await addSubTask('test-proactive-limit', { description: 'Task 3' });
+
+ const tasks = await generateProactiveTasks({ maxTasks: 1 });
+ expect(tasks.length).toBeLessThanOrEqual(1);
+
+ await deleteMission('test-proactive-limit');
+ });
+ });
+
+ describe('recordMissionReview', () => {
+ it('should update lastReviewedAt', async () => {
+ await createMission({
+ id: 'test-review-mission',
+ appId: 'test-app',
+ name: 'Review Test'
+ });
+
+ const updated = await recordMissionReview('test-review-mission');
+ expect(updated.lastReviewedAt).not.toBeNull();
+
+ await deleteMission('test-review-mission');
+ });
+ });
+
+ describe('getStats', () => {
+ it('should return mission statistics', async () => {
+ await createMission({
+ id: 'test-stats-mission',
+ appId: 'test-app',
+ name: 'Stats Test'
+ });
+
+ const stats = await getStats();
+ expect(stats).toHaveProperty('totalMissions');
+ expect(stats).toHaveProperty('byStatus');
+ expect(stats).toHaveProperty('averageProgress');
+
+ await deleteMission('test-stats-mission');
+ });
+ });
+
+ describe('deleteMission', () => {
+ it('should delete a mission', async () => {
+ await createMission({
+ id: 'test-delete-mission',
+ appId: 'test-app',
+ name: 'Delete Test'
+ });
+
+ const deleted = await deleteMission('test-delete-mission');
+ expect(deleted).toBe(true);
+
+ const retrieved = await getMission('test-delete-mission');
+ expect(retrieved).toBeNull();
+ });
+ });
+
+ describe('archiveCompletedMissions', () => {
+ it('should archive old completed missions', async () => {
+ // Create a mission and mark it completed
+ await createMission({
+ id: 'test-archive-mission',
+ appId: 'test-app',
+ name: 'Archive Test'
+ });
+
+ await updateMission('test-archive-mission', {
+ status: 'completed',
+ completedAt: new Date(Date.now() - 8 * 24 * 60 * 60 * 1000).toISOString() // 8 days ago
+ });
+
+ const archived = await archiveCompletedMissions();
+ expect(archived).toBeGreaterThanOrEqual(0);
+
+ const mission = await getMission('test-archive-mission');
+ if (mission) {
+ expect(mission.status).toBe('archived');
+ await deleteMission('test-archive-mission');
+ }
+ });
+ });
+});
diff --git a/server/services/notifications.js b/server/services/notifications.js
index dc4290d..445fe8c 100644
--- a/server/services/notifications.js
+++ b/server/services/notifications.js
@@ -8,16 +8,13 @@
* - Health issues
*/
-import { readFile, writeFile, mkdir } from 'fs/promises';
-import { existsSync } from 'fs';
-import { join, dirname } from 'path';
-import { fileURLToPath } from 'url';
+import { writeFile } from 'fs/promises';
+import { join } from 'path';
import { v4 as uuidv4 } from 'uuid';
import { EventEmitter } from 'events';
+import { ensureDir, PATHS, readJSONFile } from '../lib/fileUtils.js';
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = dirname(__filename);
-const DATA_DIR = join(__dirname, '../../data');
+const DATA_DIR = PATHS.data;
const NOTIFICATIONS_FILE = join(DATA_DIR, 'notifications.json');
// Event emitter for notification changes
@@ -46,9 +43,7 @@ export const PRIORITY_LEVELS = {
* Ensure data directory exists
*/
async function ensureDirectory() {
- if (!existsSync(DATA_DIR)) {
- await mkdir(DATA_DIR, { recursive: true });
- }
+ await ensureDir(DATA_DIR);
}
/**
@@ -59,13 +54,7 @@ async function loadNotifications() {
await ensureDirectory();
- if (!existsSync(NOTIFICATIONS_FILE)) {
- notificationsCache = { version: 1, notifications: [] };
- return notificationsCache;
- }
-
- const content = await readFile(NOTIFICATIONS_FILE, 'utf-8');
- notificationsCache = JSON.parse(content);
+ notificationsCache = await readJSONFile(NOTIFICATIONS_FILE, { version: 1, notifications: [] });
return notificationsCache;
}
diff --git a/server/services/pm2.js b/server/services/pm2.js
index 3bcec44..576ecb5 100644
--- a/server/services/pm2.js
+++ b/server/services/pm2.js
@@ -1,5 +1,6 @@
import pm2 from 'pm2';
import { spawn } from 'child_process';
+import { existsSync } from 'fs';
/**
* Connect to PM2 daemon and run an action
@@ -229,3 +230,46 @@ export async function startWithCommand(name, cwd, command) {
});
});
}
+
+/**
+ * Start app(s) using ecosystem.config.cjs/js file
+ * This properly uses all env vars, scripts, args defined in the config
+ * @param {string} cwd Working directory containing ecosystem config
+ * @param {string[]} processNames Optional: specific processes to start (--only flag)
+ */
+export async function startFromEcosystem(cwd, processNames = []) {
+ return new Promise((resolve, reject) => {
+ const ecosystemFile = ['ecosystem.config.cjs', 'ecosystem.config.js']
+ .find(f => existsSync(`${cwd}/${f}`));
+
+ if (!ecosystemFile) {
+ return reject(new Error('No ecosystem.config.cjs or ecosystem.config.js found'));
+ }
+
+ const args = ['start', ecosystemFile];
+ if (processNames.length > 0) {
+ args.push('--only', processNames.join(','));
+ }
+
+ const child = spawn('pm2', args, { cwd, shell: false });
+ let stdout = '';
+ let stderr = '';
+
+ child.stdout.on('data', (data) => {
+ stdout += data.toString();
+ });
+
+ child.stderr.on('data', (data) => {
+ stderr += data.toString();
+ });
+
+ child.on('close', (code) => {
+ if (code !== 0) {
+ return reject(new Error(stderr || `pm2 start exited with code ${code}`));
+ }
+ resolve({ success: true, output: stdout });
+ });
+
+ child.on('error', reject);
+ });
+}
diff --git a/server/services/promptService.js b/server/services/promptService.js
index 60452fc..49b749c 100644
--- a/server/services/promptService.js
+++ b/server/services/promptService.js
@@ -1,183 +1,76 @@
-import { readFile, writeFile, mkdir } from 'fs/promises';
-import { existsSync } from 'fs';
-import { join, dirname } from 'path';
-import { fileURLToPath } from 'url';
-
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = dirname(__filename);
-const PROMPTS_DIR = join(__dirname, '../../data/prompts');
-
-let stageConfig = null;
-let variables = null;
-
/**
- * Load or reload prompts configuration
+ * Compatibility shim for PortOS services that import from promptService.js
+ * Re-exports toolkit prompts service functions
*/
-export async function loadPrompts() {
- const configPath = join(PROMPTS_DIR, 'stage-config.json');
- const varsPath = join(PROMPTS_DIR, 'variables.json');
- if (existsSync(configPath)) {
- stageConfig = JSON.parse(await readFile(configPath, 'utf-8'));
- } else {
- stageConfig = { stages: {} };
- }
+// This will be initialized by server/index.js and set via setAIToolkit()
+let aiToolkitInstance = null;
- if (existsSync(varsPath)) {
- variables = JSON.parse(await readFile(varsPath, 'utf-8'));
- } else {
- variables = { variables: {} };
- }
+export function setAIToolkit(toolkit) {
+ aiToolkitInstance = toolkit;
+}
- console.log(`๐ Loaded ${Object.keys(stageConfig.stages || {}).length} prompt stages`);
+export async function loadPrompts() {
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.init();
}
-/**
- * Get all stages with their metadata
- */
export function getStages() {
- return stageConfig?.stages || {};
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.getStages();
}
-/**
- * Get stage configuration
- */
export function getStage(stageName) {
- return stageConfig?.stages?.[stageName] || null;
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.getStage(stageName);
}
-/**
- * Get stage template content
- */
export async function getStageTemplate(stageName) {
- const templatePath = join(PROMPTS_DIR, 'stages', `${stageName}.md`);
- if (!existsSync(templatePath)) return null;
- return readFile(templatePath, 'utf-8');
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.getStageTemplate(stageName);
}
-/**
- * Update stage template
- */
export async function updateStageTemplate(stageName, content) {
- const stagesDir = join(PROMPTS_DIR, 'stages');
- if (!existsSync(stagesDir)) await mkdir(stagesDir, { recursive: true });
- await writeFile(join(stagesDir, `${stageName}.md`), content);
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.updateStageTemplate(stageName, content);
}
-/**
- * Update stage configuration
- */
export async function updateStageConfig(stageName, config) {
- if (!stageConfig) await loadPrompts();
- stageConfig.stages[stageName] = { ...stageConfig.stages[stageName], ...config };
- await writeFile(join(PROMPTS_DIR, 'stage-config.json'), JSON.stringify(stageConfig, null, 2));
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.updateStageConfig(stageName, config);
}
-/**
- * Get all variables
- */
export function getVariables() {
- return variables?.variables || {};
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.getVariables();
}
-/**
- * Get a single variable
- */
export function getVariable(key) {
- return variables?.variables?.[key] || null;
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.getVariable(key);
}
-/**
- * Update a variable
- */
export async function updateVariable(key, data) {
- if (!variables) await loadPrompts();
- variables.variables[key] = { ...variables.variables[key], ...data };
- await writeFile(join(PROMPTS_DIR, 'variables.json'), JSON.stringify(variables, null, 2));
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.updateVariable(key, data);
}
-/**
- * Create a new variable
- */
export async function createVariable(key, data) {
- if (!variables) await loadPrompts();
- if (variables.variables[key]) {
- throw new Error(`Variable ${key} already exists`);
- }
- variables.variables[key] = data;
- await writeFile(join(PROMPTS_DIR, 'variables.json'), JSON.stringify(variables, null, 2));
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.createVariable(key, data);
}
-/**
- * Delete a variable
- */
export async function deleteVariable(key) {
- if (!variables) await loadPrompts();
- delete variables.variables[key];
- await writeFile(join(PROMPTS_DIR, 'variables.json'), JSON.stringify(variables, null, 2));
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.deleteVariable(key);
}
-/**
- * Build a prompt from template and variables
- */
-export async function buildPrompt(stageName, data = {}) {
- const stage = getStage(stageName);
- if (!stage) throw new Error(`Stage ${stageName} not found`);
-
- let template = await getStageTemplate(stageName);
- if (!template) throw new Error(`Template for ${stageName} not found`);
-
- // Collect all variables needed for this stage
- const allVars = { ...data };
- for (const varName of stage.variables || []) {
- const v = getVariable(varName);
- if (v) allVars[varName] = v.content;
- }
-
- // Apply Mustache-like template substitution
- return applyTemplate(template, allVars);
+export async function buildPrompt(stageName, data) {
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.buildPrompt(stageName, data);
}
-/**
- * Apply Mustache-like template substitution
- */
-function applyTemplate(template, data) {
- let result = template;
-
- // Handle sections {{#key}}...{{/key}}
- result = result.replace(/\{\{#(\w+)\}\}([\s\S]*?)\{\{\/\1\}\}/g, (match, key, content) => {
- const value = data[key];
- if (!value) return '';
- if (Array.isArray(value)) {
- return value.map(item => {
- if (typeof item === 'object') {
- return applyTemplate(content, item);
- }
- return content.replace(/\{\{\.\}\}/g, item);
- }).join('');
- }
- return applyTemplate(content, data);
- });
-
- // Handle inverted sections {{^key}}...{{/key}}
- result = result.replace(/\{\{\^(\w+)\}\}([\s\S]*?)\{\{\/\1\}\}/g, (match, key, content) => {
- return data[key] ? '' : content;
- });
-
- // Handle simple variables {{key}}
- result = result.replace(/\{\{(\w+)\}\}/g, (match, key) => {
- return data[key] !== undefined ? String(data[key]) : '';
- });
-
- return result.trim();
+export async function previewPrompt(stageName, testData) {
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.prompts.previewPrompt(stageName, testData);
}
-
-/**
- * Preview a prompt with test data
- */
-export async function previewPrompt(stageName, testData = {}) {
- return buildPrompt(stageName, testData);
-}
-
-// Load prompts on module init
-loadPrompts().catch(err => console.error(`โ Failed to load prompts: ${err.message}`));
diff --git a/server/services/providerStatus.js b/server/services/providerStatus.js
new file mode 100644
index 0000000..a26c95e
--- /dev/null
+++ b/server/services/providerStatus.js
@@ -0,0 +1,105 @@
+/**
+ * Provider Status Service
+ *
+ * Thin wrapper around portos-ai-toolkit's createProviderStatusService
+ * that provides backwards-compatible exports for PortOS.
+ */
+
+import { createProviderStatusService } from 'portos-ai-toolkit/server';
+import { join, dirname } from 'path';
+import { fileURLToPath } from 'url';
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = dirname(__filename);
+const DATA_DIR = join(__dirname, '../../data');
+
+// Create the provider status service from ai-toolkit
+const providerStatusService = createProviderStatusService({
+ dataDir: DATA_DIR,
+ statusFile: 'provider-status.json',
+ defaultFallbackPriority: ['claude-code', 'codex', 'lmstudio', 'local-lm-studio', 'ollama', 'gemini-cli'],
+ onStatusChange: (eventData) => {
+ // Re-emit on the exported events emitter for backwards compatibility
+ providerStatusEvents.emit('status:changed', eventData);
+ }
+});
+
+// Export the events emitter for Socket.IO integration
+export const providerStatusEvents = providerStatusService.events;
+
+/**
+ * Initialize status cache
+ */
+export async function initProviderStatus() {
+ await providerStatusService.init();
+ console.log('๐ Provider status service initialized');
+}
+
+/**
+ * Get status for a specific provider
+ */
+export function getProviderStatus(providerId) {
+ return providerStatusService.getStatus(providerId);
+}
+
+/**
+ * Get all provider statuses
+ */
+export function getAllProviderStatuses() {
+ return providerStatusService.getAllStatuses();
+}
+
+/**
+ * Check if a provider is available
+ */
+export function isProviderAvailable(providerId) {
+ return providerStatusService.isAvailable(providerId);
+}
+
+/**
+ * Mark a provider as unavailable due to usage limit
+ */
+export async function markProviderUsageLimit(providerId, errorInfo) {
+ const status = await providerStatusService.markUsageLimit(providerId, errorInfo);
+ console.log(`โ ๏ธ Provider ${providerId} marked unavailable: usage limit (retry after ${errorInfo?.waitTime || '24h'})`);
+ return status;
+}
+
+/**
+ * Mark a provider as unavailable due to rate limiting (temporary)
+ */
+export async function markProviderRateLimited(providerId) {
+ return providerStatusService.markRateLimited(providerId);
+}
+
+/**
+ * Mark a provider as available (recovered)
+ */
+export async function markProviderAvailable(providerId) {
+ const status = await providerStatusService.markAvailable(providerId);
+ console.log(`โ
Provider ${providerId} marked available`);
+ return status;
+}
+
+/**
+ * Get the best available fallback provider
+ * Returns null if no fallback is available
+ *
+ * Priority order:
+ * 1. Task-level fallback (task.metadata.fallbackProvider)
+ * 2. Provider-level fallback (provider.fallbackProvider)
+ * 3. System default priority list
+ */
+export function getFallbackProvider(primaryProviderId, providers, taskFallbackId = null) {
+ return providerStatusService.getFallbackProvider(primaryProviderId, providers, taskFallbackId);
+}
+
+/**
+ * Get human-readable time until provider recovery
+ */
+export function getTimeUntilRecovery(providerId) {
+ return providerStatusService.getTimeUntilRecovery(providerId);
+}
+
+// Export the underlying service for direct access if needed
+export { providerStatusService };
diff --git a/server/services/providers.js b/server/services/providers.js
index eb63436..87df099 100644
--- a/server/services/providers.js
+++ b/server/services/providers.js
@@ -1,206 +1,56 @@
-import { readFile, writeFile, mkdir } from 'fs/promises';
-import { existsSync } from 'fs';
-import { join, dirname } from 'path';
-import { fileURLToPath } from 'url';
-import { exec } from 'child_process';
-import { promisify } from 'util';
+/**
+ * Compatibility shim for PortOS services that import from providers.js
+ * Re-exports toolkit provider service functions
+ */
-const execAsync = promisify(exec);
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = dirname(__filename);
-const DATA_DIR = join(__dirname, '../../data');
-const PROVIDERS_FILE = join(DATA_DIR, 'providers.json');
-const SAMPLE_FILE = join(__dirname, '../../data.sample/providers.json');
+// This will be initialized by server/index.js and set via setAIToolkit()
+let aiToolkitInstance = null;
-async function ensureDataDir() {
- if (!existsSync(DATA_DIR)) {
- await mkdir(DATA_DIR, { recursive: true });
- }
-}
-
-async function loadProviders() {
- await ensureDataDir();
-
- if (!existsSync(PROVIDERS_FILE)) {
- // Copy from sample if exists
- if (existsSync(SAMPLE_FILE)) {
- const sample = await readFile(SAMPLE_FILE, 'utf-8');
- await writeFile(PROVIDERS_FILE, sample);
- return JSON.parse(sample);
- }
- return { activeProvider: null, providers: {} };
- }
-
- const content = await readFile(PROVIDERS_FILE, 'utf-8');
- return JSON.parse(content);
-}
-
-async function saveProviders(data) {
- await ensureDataDir();
- await writeFile(PROVIDERS_FILE, JSON.stringify(data, null, 2));
+export function setAIToolkit(toolkit) {
+ aiToolkitInstance = toolkit;
}
export async function getAllProviders() {
- const data = await loadProviders();
- return {
- activeProvider: data.activeProvider,
- providers: Object.values(data.providers)
- };
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.providers.getAllProviders();
}
export async function getProviderById(id) {
- const data = await loadProviders();
- return data.providers[id] || null;
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.providers.getProviderById(id);
}
export async function getActiveProvider() {
- const data = await loadProviders();
- if (!data.activeProvider) return null;
- return data.providers[data.activeProvider] || null;
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.providers.getActiveProvider();
}
export async function setActiveProvider(id) {
- const data = await loadProviders();
- if (!data.providers[id]) {
- return null;
- }
- data.activeProvider = id;
- await saveProviders(data);
- return data.providers[id];
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.providers.setActiveProvider(id);
}
-export async function createProvider(providerData) {
- const data = await loadProviders();
- const id = providerData.id || providerData.name.toLowerCase().replace(/[^a-z0-9]/g, '-');
-
- if (data.providers[id]) {
- throw new Error('Provider with this ID already exists');
- }
-
- const provider = {
- id,
- name: providerData.name,
- type: providerData.type || 'cli', // cli | api
- command: providerData.command || null,
- args: providerData.args || [],
- endpoint: providerData.endpoint || null,
- apiKey: providerData.apiKey || '',
- models: providerData.models || [],
- defaultModel: providerData.defaultModel || null,
- timeout: providerData.timeout || 300000,
- enabled: providerData.enabled !== false,
- envVars: providerData.envVars || {}
- };
-
- data.providers[id] = provider;
-
- // Set as active if it's the first provider
- if (!data.activeProvider) {
- data.activeProvider = id;
- }
-
- await saveProviders(data);
- return provider;
+export async function createProvider(data) {
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.providers.createProvider(data);
}
-export async function updateProvider(id, updates) {
- const data = await loadProviders();
-
- if (!data.providers[id]) {
- return null;
- }
-
- const provider = {
- ...data.providers[id],
- ...updates,
- id // Prevent ID override
- };
-
- data.providers[id] = provider;
- await saveProviders(data);
- return provider;
+export async function updateProvider(id, data) {
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.providers.updateProvider(id, data);
}
export async function deleteProvider(id) {
- const data = await loadProviders();
-
- if (!data.providers[id]) {
- return false;
- }
-
- delete data.providers[id];
-
- // Clear active if it was deleted
- if (data.activeProvider === id) {
- const remaining = Object.keys(data.providers);
- data.activeProvider = remaining.length > 0 ? remaining[0] : null;
- }
-
- await saveProviders(data);
- return true;
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.providers.deleteProvider(id);
}
export async function testProvider(id) {
- const provider = await getProviderById(id);
- if (!provider) {
- return { success: false, error: 'Provider not found' };
- }
-
- if (provider.type === 'cli') {
- // Test CLI availability
- const { stdout, stderr } = await execAsync(`which ${provider.command}`).catch(() => ({ stdout: '', stderr: 'not found' }));
-
- if (!stdout.trim()) {
- return { success: false, error: `Command '${provider.command}' not found in PATH` };
- }
-
- // Try to get version or help
- const { stdout: versionOut } = await execAsync(`${provider.command} --version 2>/dev/null || ${provider.command} -v 2>/dev/null || echo "available"`).catch(() => ({ stdout: 'available' }));
-
- return {
- success: true,
- path: stdout.trim(),
- version: versionOut.trim()
- };
- }
-
- if (provider.type === 'api') {
- // Test API endpoint
- const modelsUrl = `${provider.endpoint}/models`;
- const response = await fetch(modelsUrl, {
- headers: provider.apiKey ? { 'Authorization': `Bearer ${provider.apiKey}` } : {}
- }).catch(err => ({ ok: false, error: err.message }));
-
- if (!response.ok) {
- return { success: false, error: `API not reachable: ${response.error || response.status}` };
- }
-
- const models = await response.json().catch(() => ({ data: [] }));
- return {
- success: true,
- endpoint: provider.endpoint,
- models: models.data?.map(m => m.id) || []
- };
- }
-
- return { success: false, error: 'Unknown provider type' };
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.providers.testProvider(id);
}
export async function refreshProviderModels(id) {
- const provider = await getProviderById(id);
- if (!provider || provider.type !== 'api') {
- return null;
- }
-
- const modelsUrl = `${provider.endpoint}/models`;
- const response = await fetch(modelsUrl, {
- headers: provider.apiKey ? { 'Authorization': `Bearer ${provider.apiKey}` } : {}
- }).catch(() => null);
-
- if (!response?.ok) return null;
-
- const data = await response.json().catch(() => ({ data: [] }));
- const models = data.data?.map(m => m.id) || [];
-
- return updateProvider(id, { models });
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.providers.refreshProviderModels(id);
}
diff --git a/server/services/runner.js b/server/services/runner.js
index e2b33bc..9eaab84 100644
--- a/server/services/runner.js
+++ b/server/services/runner.js
@@ -1,756 +1,66 @@
-import { mkdir, writeFile, readFile, readdir, rm } from 'fs/promises';
-import { existsSync } from 'fs';
-import { join, dirname } from 'path';
-import { fileURLToPath } from 'url';
-import { v4 as uuidv4 } from 'uuid';
-import { getProviderById } from './providers.js';
-import { errorEvents } from '../lib/errorHandler.js';
-import { recordSession, recordMessages } from './usage.js';
-import {
- isRunnerAvailable,
- executeCliRunViaRunner,
- isRunActiveInRunner,
- stopRunViaRunner,
- initCosRunnerConnection,
- onCosRunnerEvent,
- getActiveRunsFromRunner
-} from './cosRunnerClient.js';
-
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = dirname(__filename);
-const DATA_DIR = join(__dirname, '../../data');
-const RUNS_DIR = join(DATA_DIR, 'runs');
-
/**
- * Safe JSON parse with fallback to empty object
- * Logs parse errors for debugging corrupted metadata files
+ * Compatibility shim for PortOS services that import from runner.js
+ * Re-exports toolkit runner service functions
*/
-function safeJsonParse(str, fallback = {}) {
- try {
- return JSON.parse(str);
- } catch (err) {
- // Log only if str is not empty/default - empty strings are expected from .catch(() => '{}')
- if (str && str !== '{}') {
- console.warn(`โ ๏ธ JSON parse failed: ${err.message} (input: ${str.slice(0, 100)}...)`);
- }
- return fallback;
- }
-}
-
-// Track active runs for cancellation (only used for API runs, CLI runs are in cos-runner)
-const activeRuns = new Map();
-
-// Track pending callbacks for runs delegated to cos-runner
-const pendingRunCallbacks = new Map();
-
-// Initialize cos-runner connection and event handlers
-let cosRunnerInitialized = false;
-
-function initRunnerEvents() {
- if (cosRunnerInitialized) return;
- cosRunnerInitialized = true;
-
- initCosRunnerConnection();
-
- // Handle reconnection - sync active runs to detect completions we may have missed
- onCosRunnerEvent('connection:ready', async () => {
- await syncActiveRuns();
- });
-
- // Handle run output from cos-runner
- onCosRunnerEvent('run:data', ({ runId, text }) => {
- const callbacks = pendingRunCallbacks.get(runId);
- if (callbacks?.onData) {
- callbacks.onData(text);
- }
- });
-
- // Handle run completion from cos-runner
- onCosRunnerEvent('run:complete', async ({ runId, exitCode, success, duration }) => {
- const callbacks = pendingRunCallbacks.get(runId);
- if (callbacks) {
- // Read the final metadata and update it
- const metadataPath = join(RUNS_DIR, runId, 'metadata.json');
- const outputPath = join(RUNS_DIR, runId, 'output.txt');
-
- const metadata = safeJsonParse(await readFile(metadataPath, 'utf-8').catch(() => '{}'));
- const output = await readFile(outputPath, 'utf-8').catch(() => '');
-
- metadata.endTime = new Date().toISOString();
- metadata.duration = duration;
- metadata.exitCode = exitCode;
- metadata.success = success;
- metadata.outputSize = Buffer.byteLength(output);
-
- // Record usage for successful runs (estimate ~4 chars per token)
- if (success && metadata.providerId && metadata.model) {
- const estimatedTokens = Math.ceil(output.length / 4);
- recordMessages(metadata.providerId, metadata.model, 1, estimatedTokens).catch(err => {
- console.error(`โ Failed to record usage: ${err.message}`);
- });
- }
-
- // Extract and store error details for failed runs
- if (!success) {
- const errorDetails = extractErrorDetails(output, exitCode);
- const { category, suggestion } = categorizeError(output, exitCode);
- metadata.error = errorDetails;
- metadata.errorDetails = errorDetails;
- metadata.errorCategory = category;
- metadata.suggestedFix = suggestion;
- console.log(`๐ด Run ${runId} failed with exit code ${exitCode}, category: ${category}`);
- emitProviderError(metadata, errorDetails, output);
- }
- await writeFile(metadataPath, JSON.stringify(metadata, null, 2));
+// This will be initialized by server/index.js and set via setAIToolkit()
+let aiToolkitInstance = null;
- if (callbacks.onComplete) {
- callbacks.onComplete(metadata);
- }
-
- pendingRunCallbacks.delete(runId);
- }
- });
-
- // Handle run errors from cos-runner
- onCosRunnerEvent('run:error', async ({ runId, error }) => {
- const callbacks = pendingRunCallbacks.get(runId);
- if (callbacks) {
- const metadataPath = join(RUNS_DIR, runId, 'metadata.json');
- const metadata = safeJsonParse(await readFile(metadataPath, 'utf-8').catch(() => '{}'));
-
- metadata.endTime = new Date().toISOString();
- metadata.success = false;
- metadata.error = error;
- metadata.errorDetails = error;
- const { category, suggestion } = categorizeError(error, -1);
- metadata.errorCategory = category;
- metadata.suggestedFix = suggestion;
-
- emitProviderError(metadata, error, '');
-
- await writeFile(metadataPath, JSON.stringify(metadata, null, 2));
-
- if (callbacks.onComplete) {
- callbacks.onComplete(metadata);
- }
-
- pendingRunCallbacks.delete(runId);
- }
- });
-}
-
-/**
- * Categorize errors into actionable types
- */
-function categorizeError(output, exitCode) {
- const lowerOutput = output.toLowerCase();
-
- // API/Model errors - check specific patterns first
- // Model not found requires BOTH "model:" AND a not-found pattern to avoid false positives
- // from legitimate output that might contain just "model:" in a different context
- if (lowerOutput.includes('not_found_error') && lowerOutput.includes('model:')) {
- return { category: 'model_not_found', suggestion: 'The specified model does not exist - check AI provider settings and select a valid model' };
- }
- if (lowerOutput.includes('model:') && (lowerOutput.includes('not_found') || lowerOutput.includes('not found'))) {
- return { category: 'model_not_found', suggestion: 'Check AI provider settings - the model may be invalid or deprecated' };
- }
- if (lowerOutput.includes('api error') || lowerOutput.includes('api_error')) {
- return { category: 'api_error', suggestion: 'API request failed - check provider endpoint and API key' };
- }
-
- // Authentication errors
- if (lowerOutput.includes('unauthorized') || lowerOutput.includes('401') || lowerOutput.includes('invalid_api_key') || lowerOutput.includes('authentication failed')) {
- return { category: 'auth_error', suggestion: 'Authentication failed - verify API key is valid and has correct permissions' };
- }
-
- // Rate limiting
- if (lowerOutput.includes('rate limit') || lowerOutput.includes('429') || lowerOutput.includes('too many requests') || lowerOutput.includes('rate_limit_error')) {
- return { category: 'rate_limit', suggestion: 'Rate limited - wait before retrying or upgrade API plan' };
- }
-
- // Quota/billing
- if (lowerOutput.includes('quota') || lowerOutput.includes('billing') || lowerOutput.includes('exceeded') || lowerOutput.includes('insufficient')) {
- return { category: 'quota_exceeded', suggestion: 'Quota or billing issue - check your API account' };
- }
-
- // Network errors
- if (lowerOutput.includes('connection refused') || lowerOutput.includes('econnrefused') || lowerOutput.includes('network error')) {
- return { category: 'network_error', suggestion: 'Network error - check internet connection and endpoint URL' };
- }
- if (lowerOutput.includes('timeout') || lowerOutput.includes('etimedout')) {
- return { category: 'timeout', suggestion: 'Request timed out - try again or increase timeout setting' };
- }
-
- // Command/CLI errors - be more specific to avoid false positives
- if (lowerOutput.includes('command not found') || lowerOutput.includes('enoent')) {
- return { category: 'command_not_found', suggestion: 'Command not found - verify CLI tool is installed' };
- }
- if (lowerOutput.includes('permission denied')) {
- return { category: 'permission_denied', suggestion: 'Permission denied - check file/directory permissions' };
- }
-
- // Process signals
- if (exitCode === 143) {
- return { category: 'terminated', suggestion: 'Process was terminated (SIGTERM) - likely hit timeout or was manually stopped' };
- }
- if (exitCode === 137) {
- return { category: 'killed', suggestion: 'Process was killed (SIGKILL) - likely out of memory or force stopped' };
- }
- if (exitCode === 130) {
- return { category: 'interrupted', suggestion: 'Process was interrupted (SIGINT)' };
- }
-
- return { category: 'unknown', suggestion: 'Check the output above for specific error details' };
-}
-
-/**
- * Extract meaningful error details from CLI output
- * Looks for common error patterns and extracts actionable info
- */
-function extractErrorDetails(output, exitCode) {
- const lines = output.split('\n').filter(l => l.trim());
- const lastLines = lines.slice(-20); // Last 20 lines often contain error info
-
- // Try to extract error message from JSON-like structure
- // This handles API responses like: {"type":"error","error":{"type":"not_found_error","message":"model: codex"}}
- const messageMatch = output.match(/"message"\s*:\s*"([^"]+)"/);
- if (messageMatch) {
- // Also try to get the error type for more context
- const typeMatch = output.match(/"type"\s*:\s*"([^"]+_error)"/);
- if (typeMatch) {
- return `${typeMatch[1]}: ${messageMatch[1]}`;
- }
- return messageMatch[1];
- }
-
- // Look for common error patterns
- const errorPatterns = [
- /API Error[:\s]+\d+\s*(.+)/i, // "API Error: 404 {...}"
- /API Error[:\s]+(.+)/i,
- /error[:\s]*\{(.+)\}/i,
- /error[:\s]+(.+)/i,
- /failed[:\s]+(.+)/i,
- /exception[:\s]+(.+)/i,
- /fatal[:\s]+(.+)/i,
- /not found[:\s]+(.+)/i,
- /permission denied[:\s]+(.+)/i,
- /connection refused/i,
- /timeout/i,
- /rate limit/i,
- /invalid.*key/i,
- /unauthorized/i,
- /authentication failed/i,
- /"message"[:\s]*"([^"]+)"/i,
- /"error"[:\s]*"([^"]+)"/i
- ];
-
- const matchedErrors = [];
- for (const line of lastLines) {
- for (const pattern of errorPatterns) {
- if (pattern.test(line)) {
- matchedErrors.push(line.trim());
- break;
- }
- }
- }
-
- // If we found specific errors, return them; otherwise return last few lines
- if (matchedErrors.length > 0) {
- return matchedErrors.slice(0, 5).join('\n');
- }
-
- // Return last 5 non-empty lines as fallback
- return lastLines.slice(-5).join('\n') || `Process exited with code ${exitCode}`;
-}
-
-/**
- * Emit an AI provider execution error for autofix handling
- */
-function emitProviderError(metadata, errorDetails, output) {
- console.log(`๐ด AI provider execution failed: ${metadata.providerName} - ${metadata.error}`);
-
- errorEvents.emit('error', {
- code: 'AI_PROVIDER_EXECUTION_FAILED',
- message: `AI provider ${metadata.providerName} execution failed: ${metadata.error}`,
- severity: 'error',
- canAutoFix: true,
- timestamp: Date.now(),
- context: {
- runId: metadata.id,
- provider: metadata.providerName,
- providerId: metadata.providerId,
- model: metadata.model,
- exitCode: metadata.exitCode,
- duration: metadata.duration,
- workspacePath: metadata.workspacePath,
- workspaceName: metadata.workspaceName,
- errorDetails: errorDetails,
- errorCategory: metadata.errorCategory,
- suggestedFix: metadata.suggestedFix,
- promptPreview: metadata.prompt,
- outputTail: output ? output.slice(-2000) : null // Last 2KB of output for context
- }
- });
-}
-
-async function ensureRunsDir() {
- if (!existsSync(RUNS_DIR)) {
- await mkdir(RUNS_DIR, { recursive: true });
- }
+export function setAIToolkit(toolkit) {
+ aiToolkitInstance = toolkit;
}
export async function createRun(options) {
- const {
- providerId,
- model,
- prompt,
- workspacePath,
- workspaceName,
- timeout
- } = options;
-
- const provider = await getProviderById(providerId);
- if (!provider) {
- throw new Error('Provider not found');
- }
-
- if (!provider.enabled) {
- throw new Error('Provider is disabled');
- }
-
- await ensureRunsDir();
-
- const runId = uuidv4();
- const runDir = join(RUNS_DIR, runId);
- await mkdir(runDir);
-
- const metadata = {
- id: runId,
- type: 'ai',
- providerId,
- providerName: provider.name,
- model: model || provider.defaultModel,
- workspacePath,
- workspaceName,
- prompt: prompt.substring(0, 500), // Store truncated prompt in metadata
- startTime: new Date().toISOString(),
- endTime: null,
- duration: null,
- exitCode: null,
- success: null,
- error: null,
- outputSize: 0
- };
-
- await writeFile(join(runDir, 'metadata.json'), JSON.stringify(metadata, null, 2));
- await writeFile(join(runDir, 'prompt.txt'), prompt);
- await writeFile(join(runDir, 'output.txt'), '');
-
- // Record usage session
- recordSession(providerId, provider.name, model || provider.defaultModel).catch(err => {
- console.error(`โ Failed to record usage session: ${err.message}`);
- });
-
- // Use user-specified timeout or fall back to provider default
- const effectiveTimeout = timeout || provider.timeout;
-
- return { runId, runDir, provider, metadata, timeout: effectiveTimeout };
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.runner.createRun(options);
}
export async function executeCliRun(runId, provider, prompt, workspacePath, onData, onComplete, timeout) {
- // Initialize runner events if not already done
- initRunnerEvents();
-
- // Check if cos-runner is available
- const runnerAvailable = await isRunnerAvailable().catch(() => false);
-
- if (!runnerAvailable) {
- // Fall back to error - cos-runner must be running
- console.error(`โ CoS runner not available for run ${runId}`);
- const metadataPath = join(RUNS_DIR, runId, 'metadata.json');
- const metadata = safeJsonParse(await readFile(metadataPath, 'utf-8').catch(() => '{}'));
- metadata.endTime = new Date().toISOString();
- metadata.success = false;
- metadata.error = 'CoS runner service not available';
- metadata.errorDetails = 'The portos-cos process is not running. Start it with: pm2 start ecosystem.config.cjs';
- metadata.errorCategory = 'service_unavailable';
- metadata.suggestedFix = 'Start the portos-cos process using PM2';
- await writeFile(metadataPath, JSON.stringify(metadata, null, 2));
- onComplete?.(metadata);
- return runId;
- }
-
- // Build command args (without prompt - cos-runner adds it)
- const args = [...(provider.args || [])];
-
- console.log(`๐ง Delegating run ${runId} to cos-runner: ${provider.command} ${args.join(' ')}`);
-
- // Store callbacks for when cos-runner reports completion
- pendingRunCallbacks.set(runId, { onData, onComplete });
-
- // Execute via cos-runner
- await executeCliRunViaRunner({
- runId,
- command: provider.command,
- args,
- prompt,
- workspacePath: workspacePath || process.cwd(),
- envVars: provider.envVars || {},
- timeout: timeout || provider.timeout
- }).catch(async (err) => {
- console.error(`โ Failed to delegate run ${runId} to cos-runner: ${err.message}`);
- pendingRunCallbacks.delete(runId);
-
- const metadataPath = join(RUNS_DIR, runId, 'metadata.json');
- const metadata = safeJsonParse(await readFile(metadataPath, 'utf-8').catch(() => '{}'));
- metadata.endTime = new Date().toISOString();
- metadata.success = false;
- metadata.error = err.message;
- metadata.errorDetails = err.message;
- const { category, suggestion } = categorizeError(err.message, -1);
- metadata.errorCategory = category;
- metadata.suggestedFix = suggestion;
- await writeFile(metadataPath, JSON.stringify(metadata, null, 2));
- onComplete?.(metadata);
- });
-
- return runId;
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.runner.executeCliRun(runId, provider, prompt, workspacePath, onData, onComplete, timeout);
}
-export async function executeApiRun(runId, provider, model, prompt, workspacePath, onData, onComplete) {
- const runDir = join(RUNS_DIR, runId);
- const outputPath = join(runDir, 'output.txt');
- const metadataPath = join(runDir, 'metadata.json');
-
- const startTime = Date.now();
- let output = '';
-
- const headers = {
- 'Content-Type': 'application/json'
- };
- if (provider.apiKey) {
- headers['Authorization'] = `Bearer ${provider.apiKey}`;
- }
-
- const controller = new AbortController();
- activeRuns.set(runId, controller);
-
- const response = await fetch(`${provider.endpoint}/chat/completions`, {
- method: 'POST',
- headers,
- signal: controller.signal,
- body: JSON.stringify({
- model: model || provider.defaultModel,
- messages: [{ role: 'user', content: prompt }],
- stream: true
- })
- }).catch(err => ({ ok: false, error: err.message }));
-
- if (!response.ok) {
- activeRuns.delete(runId);
- const metadata = safeJsonParse(await readFile(metadataPath, 'utf-8').catch(() => '{}'));
- metadata.endTime = new Date().toISOString();
- metadata.duration = Date.now() - startTime;
- metadata.success = false;
-
- // Try to extract detailed error info from response
- let errorDetails = response.error || `API error: ${response.status}`;
- let responseBody = null;
- if (response.text) {
- responseBody = await response.text().catch(() => null);
- if (responseBody) {
- // Try to parse as JSON for structured error
- let parsed = null;
- try {
- parsed = JSON.parse(responseBody);
- } catch {
- // Not valid JSON, ignore
- }
- if (parsed?.error?.message) {
- errorDetails = `${response.status}: ${parsed.error.message}`;
- } else if (parsed?.message) {
- errorDetails = `${response.status}: ${parsed.message}`;
- } else if (responseBody.length < 500) {
- errorDetails = `${response.status}: ${responseBody}`;
- }
- }
- }
-
- metadata.error = errorDetails;
- metadata.errorDetails = errorDetails;
- const { category, suggestion } = categorizeError(errorDetails, response.status || -1);
- metadata.errorCategory = category;
- metadata.suggestedFix = suggestion;
-
- // Emit error event for autofix system
- emitProviderError(metadata, errorDetails, responseBody);
-
- await writeFile(metadataPath, JSON.stringify(metadata, null, 2));
- onComplete?.(metadata);
- return runId;
- }
-
- // Handle streaming response
- const reader = response.body.getReader();
- const decoder = new TextDecoder();
-
- const processStream = async () => {
- while (true) {
- const { done, value } = await reader.read();
- if (done) break;
-
- const chunk = decoder.decode(value);
- const lines = chunk.split('\n').filter(line => line.startsWith('data: '));
-
- for (const line of lines) {
- const data = line.slice(6);
- if (data === '[DONE]') continue;
-
- let parsed = null;
- try {
- parsed = JSON.parse(data);
- } catch {
- // Not valid JSON, skip this chunk
- continue;
- }
- if (parsed?.choices?.[0]?.delta?.content) {
- const text = parsed.choices[0].delta.content;
- output += text;
- onData?.(text);
- }
- }
- }
-
- await writeFile(outputPath, output);
- activeRuns.delete(runId);
-
- const metadata = safeJsonParse(await readFile(metadataPath, 'utf-8').catch(() => '{}'));
- metadata.endTime = new Date().toISOString();
- metadata.duration = Date.now() - startTime;
- metadata.exitCode = 0;
- metadata.success = true;
- metadata.outputSize = Buffer.byteLength(output);
- await writeFile(metadataPath, JSON.stringify(metadata, null, 2));
-
- // Record usage for API run (estimate ~4 chars per token)
- const estimatedTokens = Math.ceil(output.length / 4);
- recordMessages(metadata.providerId, metadata.model, 1, estimatedTokens).catch(err => {
- console.error(`โ Failed to record usage: ${err.message}`);
- });
-
- onComplete?.(metadata);
- };
-
- processStream().catch(async (err) => {
- activeRuns.delete(runId);
- const metadata = safeJsonParse(await readFile(metadataPath, 'utf-8').catch(() => '{}'));
- metadata.endTime = new Date().toISOString();
- metadata.duration = Date.now() - startTime;
- metadata.success = false;
- metadata.error = err.message;
- metadata.errorDetails = err.message;
- const { category, suggestion } = categorizeError(err.message, -1);
- metadata.errorCategory = category;
- metadata.suggestedFix = suggestion;
-
- // Emit error event for autofix system
- emitProviderError(metadata, err.message, output);
-
- await writeFile(metadataPath, JSON.stringify(metadata, null, 2));
- onComplete?.(metadata);
- });
-
- return runId;
+export async function executeApiRun(runId, provider, model, prompt, workspacePath, screenshots, onData, onComplete) {
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.runner.executeApiRun(runId, provider, model, prompt, workspacePath, screenshots, onData, onComplete);
}
export async function stopRun(runId) {
- // Check if it's a local API run
- const active = activeRuns.get(runId);
- if (active) {
- if (active.kill) {
- // It's a child process
- active.kill('SIGTERM');
- } else if (active.abort) {
- // It's an AbortController
- active.abort();
- }
- activeRuns.delete(runId);
- return true;
- }
-
- // Check if it's a CLI run in cos-runner
- const inRunner = await isRunActiveInRunner(runId).catch(() => false);
- if (inRunner) {
- await stopRunViaRunner(runId).catch(() => {});
- pendingRunCallbacks.delete(runId);
- return true;
- }
-
- return false;
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.runner.stopRun(runId);
}
export async function getRun(runId) {
- const runDir = join(RUNS_DIR, runId);
- if (!existsSync(runDir)) {
- return null;
- }
-
- const metadata = safeJsonParse(await readFile(join(runDir, 'metadata.json'), 'utf-8').catch(() => '{}'));
- return metadata;
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.runner.getRun(runId);
}
export async function getRunOutput(runId) {
- const runDir = join(RUNS_DIR, runId);
- if (!existsSync(runDir)) {
- return null;
- }
-
- return readFile(join(runDir, 'output.txt'), 'utf-8');
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.runner.getRunOutput(runId);
}
export async function getRunPrompt(runId) {
- const runDir = join(RUNS_DIR, runId);
- if (!existsSync(runDir)) {
- return null;
- }
-
- return readFile(join(runDir, 'prompt.txt'), 'utf-8');
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.runner.getRunPrompt(runId);
}
-export async function listRuns(limit = 50, offset = 0, source = 'all') {
- await ensureRunsDir();
-
- const entries = await readdir(RUNS_DIR, { withFileTypes: true });
- const runIds = entries
- .filter(e => e.isDirectory())
- .map(e => e.name);
-
- // Load all metadata and sort by start time
- const runs = [];
- for (const runId of runIds) {
- const metadataPath = join(RUNS_DIR, runId, 'metadata.json');
- if (existsSync(metadataPath)) {
- const metadata = safeJsonParse(await readFile(metadataPath, 'utf-8').catch(() => '{}'));
- if (metadata.id) runs.push(metadata);
- }
- }
-
- // Filter by source if specified
- let filteredRuns = runs;
- if (source !== 'all') {
- filteredRuns = runs.filter(run => {
- const runSource = run.source || 'devtools'; // Legacy runs without source are from devtools
- return runSource === source;
- });
- }
-
- filteredRuns.sort((a, b) => new Date(b.startTime) - new Date(a.startTime));
-
- return {
- total: filteredRuns.length,
- runs: filteredRuns.slice(offset, offset + limit)
- };
+export async function listRuns(limit, offset, source) {
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.runner.listRuns(limit, offset, source);
}
export async function deleteRun(runId) {
- const runDir = join(RUNS_DIR, runId);
- if (!existsSync(runDir)) {
- return false;
- }
-
- await rm(runDir, { recursive: true });
- return true;
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.runner.deleteRun(runId);
}
export async function deleteFailedRuns() {
- await ensureRunsDir();
-
- const entries = await readdir(RUNS_DIR, { withFileTypes: true });
- const runIds = entries.filter(e => e.isDirectory()).map(e => e.name);
-
- let deletedCount = 0;
- for (const runId of runIds) {
- const metadataPath = join(RUNS_DIR, runId, 'metadata.json');
- if (existsSync(metadataPath)) {
- const metadata = safeJsonParse(await readFile(metadataPath, 'utf-8').catch(() => '{}'));
- if (metadata.success === false) {
- await rm(join(RUNS_DIR, runId), { recursive: true });
- deletedCount++;
- }
- }
- }
-
- return deletedCount;
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.runner.deleteFailedRuns();
}
export async function isRunActive(runId) {
- // Check local API runs first
- if (activeRuns.has(runId)) {
- return true;
- }
- // Check if it's a CLI run in cos-runner
- if (pendingRunCallbacks.has(runId)) {
- return true;
- }
- // Double-check with cos-runner (in case of reconnect)
- return isRunActiveInRunner(runId).catch(() => false);
-}
-
-/**
- * Sync active runs from cos-runner on reconnection
- * This handles runs that may have completed while we were disconnected
- */
-async function syncActiveRuns() {
- // Check all pending runs to see if they're still active in the runner
- const pendingRunIds = Array.from(pendingRunCallbacks.keys());
- if (pendingRunIds.length === 0) {
- return;
- }
-
- console.log(`๐ Syncing ${pendingRunIds.length} pending runs with cos-runner...`);
-
- // Get list of active runs from the runner
- const activeInRunner = await getActiveRunsFromRunner().catch(() => []);
- const activeRunIds = new Set(activeInRunner.map(r => r.id));
-
- // Check for runs that have completed while we were disconnected
- for (const runId of pendingRunIds) {
- if (!activeRunIds.has(runId)) {
- // This run is no longer active in the runner - it may have completed
- // Check if we have output written to disk (cos-runner writes output.txt on completion)
- const outputPath = join(RUNS_DIR, runId, 'output.txt');
- const metadataPath = join(RUNS_DIR, runId, 'metadata.json');
-
- if (existsSync(outputPath) && existsSync(metadataPath)) {
- const metadata = safeJsonParse(await readFile(metadataPath, 'utf-8').catch(() => '{}'));
-
- // If endTime is set, the run completed - we missed the event
- if (metadata.endTime) {
- console.log(`๐ฅ Recovered completed run ${runId} (completed while disconnected)`);
- const callbacks = pendingRunCallbacks.get(runId);
- if (callbacks?.onComplete) {
- callbacks.onComplete(metadata);
- }
- pendingRunCallbacks.delete(runId);
- } else {
- // Run output exists but no endTime - run is likely stuck or orphaned
- // Mark it as failed
- console.log(`โ ๏ธ Run ${runId} appears orphaned - marking as failed`);
- metadata.endTime = new Date().toISOString();
- metadata.success = false;
- metadata.error = 'Run was orphaned (process ended without proper completion)';
- metadata.errorCategory = 'orphaned';
- metadata.suggestedFix = 'The run process ended unexpectedly. Try again.';
- await writeFile(metadataPath, JSON.stringify(metadata, null, 2));
-
- const callbacks = pendingRunCallbacks.get(runId);
- if (callbacks?.onComplete) {
- callbacks.onComplete(metadata);
- }
- pendingRunCallbacks.delete(runId);
- }
- }
- }
- }
+ if (!aiToolkitInstance) throw new Error('AI Toolkit not initialized');
+ return aiToolkitInstance.services.runner.isRunActive(runId);
}
diff --git a/server/services/scriptRunner.js b/server/services/scriptRunner.js
index 0bcd3b9..233b31c 100644
--- a/server/services/scriptRunner.js
+++ b/server/services/scriptRunner.js
@@ -12,7 +12,7 @@ import { writeFile, readFile, mkdir, readdir, rm } from 'fs/promises';
import { existsSync } from 'fs';
import { v4 as uuidv4 } from 'uuid';
import Cron from 'croner';
-import { cosEvents } from './cos.js';
+import { cosEvents } from './cosEvents.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
diff --git a/server/services/selfImprovement.js b/server/services/selfImprovement.js
index 787e5dd..a340f14 100644
--- a/server/services/selfImprovement.js
+++ b/server/services/selfImprovement.js
@@ -66,7 +66,7 @@ function emitLog(level, message, data = {}) {
message,
...data
};
- console.log(`${level === 'error' ? 'โ' : level === 'warn' ? 'โ ๏ธ' : level === 'success' ? 'โ
' : 'โน๏ธ'} [SelfImprovement] ${message}`);
+ console.log(`${level === 'error' ? 'โ' : level === 'warn' ? 'โ ๏ธ' : level === 'success' ? 'โ
' : level === 'debug' ? '๐' : 'โน๏ธ'} ๐ง SelfImprovement: ${message}`);
cosEvents.emit('log', logEntry);
}
diff --git a/server/services/sessionDelta.js b/server/services/sessionDelta.js
new file mode 100644
index 0000000..ba9c96b
--- /dev/null
+++ b/server/services/sessionDelta.js
@@ -0,0 +1,348 @@
+/**
+ * Session Delta Tracker
+ *
+ * Tracks changes (deltas) within agent sessions for efficient memory and context updates.
+ * Inspired by OpenClaw's session delta tracking pattern.
+ *
+ * Tracks per session:
+ * - pendingBytes: Bytes of data pending processing
+ * - pendingMessages: Count of messages pending processing
+ * - newMemories: Memories created this session
+ * - modifiedMemories: Memories updated this session
+ * - toolCalls: Tool calls made this session
+ */
+
+// In-memory session delta storage
+const sessions = new Map()
+
+// Session TTL (2 hours)
+const SESSION_TTL_MS = 2 * 60 * 60 * 1000
+
+// Cleanup interval (15 minutes)
+const CLEANUP_INTERVAL_MS = 15 * 60 * 1000
+
+let cleanupTimer = null
+
+/**
+ * Create a new session delta tracker
+ * @returns {Object} - Fresh session delta object
+ */
+function createSessionDelta() {
+ return {
+ createdAt: Date.now(),
+ lastActivityAt: Date.now(),
+ pendingBytes: 0,
+ pendingMessages: 0,
+ processedBytes: 0,
+ processedMessages: 0,
+ newMemories: [],
+ modifiedMemories: [],
+ toolCalls: [],
+ events: []
+ }
+}
+
+/**
+ * Get or create session delta
+ * @param {string} sessionId - Session identifier (usually agentId)
+ * @returns {Object} - Session delta object
+ */
+function getSession(sessionId) {
+ if (!sessions.has(sessionId)) {
+ sessions.set(sessionId, createSessionDelta())
+ }
+ const session = sessions.get(sessionId)
+ session.lastActivityAt = Date.now()
+ return session
+}
+
+/**
+ * Add pending bytes to session
+ * @param {string} sessionId - Session ID
+ * @param {number} bytes - Bytes to add
+ */
+function addPendingBytes(sessionId, bytes) {
+ const session = getSession(sessionId)
+ session.pendingBytes += bytes
+ session.pendingMessages++
+}
+
+/**
+ * Mark bytes as processed
+ * @param {string} sessionId - Session ID
+ * @param {number} bytes - Bytes processed
+ */
+function markBytesProcessed(sessionId, bytes) {
+ const session = getSession(sessionId)
+ session.pendingBytes = Math.max(0, session.pendingBytes - bytes)
+ session.pendingMessages = Math.max(0, session.pendingMessages - 1)
+ session.processedBytes += bytes
+ session.processedMessages++
+}
+
+/**
+ * Record a new memory created in this session
+ * @param {string} sessionId - Session ID
+ * @param {string} memoryId - Memory ID
+ * @param {string} type - Memory type
+ */
+function recordNewMemory(sessionId, memoryId, type) {
+ const session = getSession(sessionId)
+ session.newMemories.push({
+ id: memoryId,
+ type,
+ createdAt: Date.now()
+ })
+}
+
+/**
+ * Record a memory modification in this session
+ * @param {string} sessionId - Session ID
+ * @param {string} memoryId - Memory ID
+ * @param {string} changeType - Type of change (update, archive, etc.)
+ */
+function recordModifiedMemory(sessionId, memoryId, changeType) {
+ const session = getSession(sessionId)
+ session.modifiedMemories.push({
+ id: memoryId,
+ changeType,
+ modifiedAt: Date.now()
+ })
+}
+
+/**
+ * Record a tool call in this session
+ * @param {string} sessionId - Session ID
+ * @param {string} toolName - Name of tool called
+ * @param {Object} metadata - Optional metadata about the call
+ */
+function recordToolCall(sessionId, toolName, metadata = {}) {
+ const session = getSession(sessionId)
+ session.toolCalls.push({
+ tool: toolName,
+ calledAt: Date.now(),
+ ...metadata
+ })
+}
+
+/**
+ * Record a custom event in this session
+ * @param {string} sessionId - Session ID
+ * @param {string} eventType - Event type
+ * @param {Object} data - Event data
+ */
+function recordEvent(sessionId, eventType, data = {}) {
+ const session = getSession(sessionId)
+ session.events.push({
+ type: eventType,
+ timestamp: Date.now(),
+ data
+ })
+}
+
+/**
+ * Get session summary
+ * @param {string} sessionId - Session ID
+ * @returns {Object} - Session summary
+ */
+function getSessionSummary(sessionId) {
+ if (!sessions.has(sessionId)) {
+ return null
+ }
+
+ const session = sessions.get(sessionId)
+ return {
+ sessionId,
+ createdAt: session.createdAt,
+ lastActivityAt: session.lastActivityAt,
+ durationMs: session.lastActivityAt - session.createdAt,
+ pending: {
+ bytes: session.pendingBytes,
+ messages: session.pendingMessages
+ },
+ processed: {
+ bytes: session.processedBytes,
+ messages: session.processedMessages
+ },
+ memories: {
+ new: session.newMemories.length,
+ modified: session.modifiedMemories.length
+ },
+ toolCalls: session.toolCalls.length,
+ events: session.events.length
+ }
+}
+
+/**
+ * Get all pending data for a session
+ * @param {string} sessionId - Session ID
+ * @returns {Object} - Pending data
+ */
+function getPendingDelta(sessionId) {
+ if (!sessions.has(sessionId)) {
+ return { bytes: 0, messages: 0 }
+ }
+
+ const session = sessions.get(sessionId)
+ return {
+ bytes: session.pendingBytes,
+ messages: session.pendingMessages
+ }
+}
+
+/**
+ * Get memories created in this session
+ * @param {string} sessionId - Session ID
+ * @returns {Array} - New memory references
+ */
+function getNewMemories(sessionId) {
+ if (!sessions.has(sessionId)) return []
+ return [...sessions.get(sessionId).newMemories]
+}
+
+/**
+ * Get tool call history for this session
+ * @param {string} sessionId - Session ID
+ * @returns {Array} - Tool calls
+ */
+function getToolCalls(sessionId) {
+ if (!sessions.has(sessionId)) return []
+ return [...sessions.get(sessionId).toolCalls]
+}
+
+/**
+ * End a session and return final summary
+ * @param {string} sessionId - Session ID
+ * @returns {Object} - Final session summary
+ */
+function endSession(sessionId) {
+ const summary = getSessionSummary(sessionId)
+ sessions.delete(sessionId)
+ return summary
+}
+
+/**
+ * Clean up expired sessions
+ * @returns {number} - Number of sessions cleaned up
+ */
+function cleanupExpiredSessions() {
+ const now = Date.now()
+ let cleaned = 0
+
+ for (const [sessionId, session] of sessions.entries()) {
+ if (now - session.lastActivityAt > SESSION_TTL_MS) {
+ sessions.delete(sessionId)
+ cleaned++
+ }
+ }
+
+ if (cleaned > 0) {
+ console.log(`๐งน Cleaned up ${cleaned} expired session deltas`)
+ }
+
+ return cleaned
+}
+
+/**
+ * Start periodic cleanup
+ */
+function startCleanupTimer() {
+ if (cleanupTimer) return
+
+ cleanupTimer = setInterval(() => {
+ cleanupExpiredSessions()
+ }, CLEANUP_INTERVAL_MS)
+
+ // Don't prevent process exit
+ cleanupTimer.unref()
+}
+
+/**
+ * Stop periodic cleanup
+ */
+function stopCleanupTimer() {
+ if (cleanupTimer) {
+ clearInterval(cleanupTimer)
+ cleanupTimer = null
+ }
+}
+
+/**
+ * Get all active sessions
+ * @returns {Array} - Active session IDs
+ */
+function getActiveSessions() {
+ return Array.from(sessions.keys())
+}
+
+/**
+ * Get total stats across all sessions
+ * @returns {Object} - Aggregate statistics
+ */
+function getGlobalStats() {
+ let totalPendingBytes = 0
+ let totalPendingMessages = 0
+ let totalProcessedBytes = 0
+ let totalProcessedMessages = 0
+ let totalNewMemories = 0
+ let totalToolCalls = 0
+
+ for (const session of sessions.values()) {
+ totalPendingBytes += session.pendingBytes
+ totalPendingMessages += session.pendingMessages
+ totalProcessedBytes += session.processedBytes
+ totalProcessedMessages += session.processedMessages
+ totalNewMemories += session.newMemories.length
+ totalToolCalls += session.toolCalls.length
+ }
+
+ return {
+ activeSessions: sessions.size,
+ pending: {
+ bytes: totalPendingBytes,
+ messages: totalPendingMessages
+ },
+ processed: {
+ bytes: totalProcessedBytes,
+ messages: totalProcessedMessages
+ },
+ totalNewMemories,
+ totalToolCalls
+ }
+}
+
+/**
+ * Reset a session's pending counters
+ * @param {string} sessionId - Session ID
+ */
+function resetPending(sessionId) {
+ if (!sessions.has(sessionId)) return
+
+ const session = sessions.get(sessionId)
+ session.pendingBytes = 0
+ session.pendingMessages = 0
+}
+
+// Start cleanup on module load
+startCleanupTimer()
+
+export {
+ getSession,
+ addPendingBytes,
+ markBytesProcessed,
+ recordNewMemory,
+ recordModifiedMemory,
+ recordToolCall,
+ recordEvent,
+ getSessionSummary,
+ getPendingDelta,
+ getNewMemories,
+ getToolCalls,
+ endSession,
+ cleanupExpiredSessions,
+ startCleanupTimer,
+ stopCleanupTimer,
+ getActiveSessions,
+ getGlobalStats,
+ resetPending
+}
diff --git a/server/services/socket.js b/server/services/socket.js
index bd6d625..fcce3b3 100644
--- a/server/services/socket.js
+++ b/server/services/socket.js
@@ -1,11 +1,12 @@
import { spawn } from 'child_process';
import { streamDetection } from './streamingDetect.js';
-import { cosEvents } from './cos.js';
+import { cosEvents } from './cosEvents.js';
import { appsEvents } from './apps.js';
import { errorEvents } from '../lib/errorHandler.js';
import { handleErrorRecovery } from './autoFixer.js';
import * as pm2Standardizer from './pm2Standardizer.js';
import { notificationEvents } from './notifications.js';
+import { providerStatusEvents } from './providerStatus.js';
// Store active log streams per socket
const activeStreams = new Map();
@@ -237,6 +238,9 @@ export function initSocket(io) {
// Set up notification event forwarding
setupNotificationEventForwarding();
+
+ // Set up provider status event forwarding
+ setupProviderStatusEventForwarding();
}
function cleanupStream(socketId) {
@@ -345,3 +349,12 @@ function setupNotificationEventForwarding() {
notificationEvents.on('count-changed', (count) => broadcastToNotifications('notifications:count', count));
notificationEvents.on('cleared', () => broadcastToNotifications('notifications:cleared', {}));
}
+
+// Set up provider status event forwarding - broadcast to all clients
+function setupProviderStatusEventForwarding() {
+ providerStatusEvents.on('status:changed', (data) => {
+ if (ioInstance) {
+ ioInstance.emit('provider:status:changed', data);
+ }
+ });
+}
diff --git a/server/services/streamingDetect.js b/server/services/streamingDetect.js
index d1f60e0..7045b99 100644
--- a/server/services/streamingDetect.js
+++ b/server/services/streamingDetect.js
@@ -3,6 +3,7 @@ import { existsSync } from 'fs';
import { join, basename } from 'path';
import { exec } from 'child_process';
import { promisify } from 'util';
+import { safeJSONParse } from '../lib/fileUtils.js';
const execAsync = promisify(exec);
@@ -344,29 +345,33 @@ export async function streamDetection(socket, dirPath) {
if (existsSync(pkgPath)) {
const content = await readFile(pkgPath, 'utf-8').catch(() => null);
if (content) {
- const pkg = JSON.parse(content);
- result.name = pkg.name || result.name;
- result.description = pkg.description || '';
-
- // Detect project type
- const deps = { ...pkg.dependencies, ...pkg.devDependencies };
- if (deps.vite && deps.express) result.type = 'vite+express';
- else if (deps.vite || deps.react || deps.vue) result.type = 'vite';
- else if (deps.express || deps.fastify || deps.koa) result.type = 'single-node-server';
- else if (deps.next) result.type = 'nextjs';
-
- // Get start commands
- const scripts = pkg.scripts || {};
- if (scripts.dev) result.startCommands.push('npm run dev');
- if (scripts.start && !scripts.dev) result.startCommands.push('npm start');
-
- emit('package', 'done', {
- message: `Found: ${result.name}`,
- name: result.name,
- description: result.description,
- type: result.type,
- startCommands: result.startCommands
- });
+ const pkg = safeJSONParse(content, null);
+ if (!pkg) {
+ emit('package', 'error', { message: 'Invalid package.json format' });
+ } else {
+ result.name = pkg.name || result.name;
+ result.description = pkg.description || '';
+
+ // Detect project type
+ const deps = { ...pkg.dependencies, ...pkg.devDependencies };
+ if (deps.vite && deps.express) result.type = 'vite+express';
+ else if (deps.vite || deps.react || deps.vue) result.type = 'vite';
+ else if (deps.express || deps.fastify || deps.koa) result.type = 'single-node-server';
+ else if (deps.next) result.type = 'nextjs';
+
+ // Get start commands
+ const scripts = pkg.scripts || {};
+ if (scripts.dev) result.startCommands.push('npm run dev');
+ if (scripts.start && !scripts.dev) result.startCommands.push('npm start');
+
+ emit('package', 'done', {
+ message: `Found: ${result.name}`,
+ name: result.name,
+ description: result.description,
+ type: result.type,
+ startCommands: result.startCommands
+ });
+ }
}
} else {
emit('package', 'done', { message: 'No package.json found' });
@@ -461,7 +466,7 @@ export async function streamDetection(socket, dirPath) {
// Step 5: Check PM2 status
emit('pm2', 'running', { message: 'Checking PM2 processes...' });
const { stdout } = await execAsync('pm2 jlist').catch(() => ({ stdout: '[]' }));
- const pm2Processes = JSON.parse(stdout);
+ const pm2Processes = safeJSONParse(stdout, []);
// Look for processes that might be this app
const possibleNames = [
diff --git a/server/services/subAgentSpawner.js b/server/services/subAgentSpawner.js
index fded512..d1d4ee4 100644
--- a/server/services/subAgentSpawner.js
+++ b/server/services/subAgentSpawner.js
@@ -6,7 +6,7 @@
* and usage tracking.
*/
-import { spawn } from 'child_process';
+import { spawn, execSync } from 'child_process';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import { writeFile, mkdir, readFile } from 'fs/promises';
@@ -15,13 +15,20 @@ import { homedir } from 'os';
import { v4 as uuidv4 } from 'uuid';
import { cosEvents, registerAgent, updateAgent, completeAgent, appendAgentOutput, getConfig, updateTask, addTask, emitLog } from './cos.js';
import { startAppCooldown, markAppReviewCompleted } from './appActivity.js';
-import { isRunnerAvailable, spawnAgentViaRunner, terminateAgentViaRunner, killAgentViaRunner, getAgentStatsFromRunner, initCosRunnerConnection, onCosRunnerEvent, getActiveAgentsFromRunner } from './cosRunnerClient.js';
-import { getActiveProvider } from './providers.js';
+import { isRunnerAvailable, spawnAgentViaRunner, terminateAgentViaRunner, killAgentViaRunner, getAgentStatsFromRunner, initCosRunnerConnection, onCosRunnerEvent, getActiveAgentsFromRunner, getRunnerHealth } from './cosRunnerClient.js';
+import { getActiveProvider, getProviderById, getAllProviders } from './providers.js';
import { recordSession, recordMessages } from './usage.js';
+import { isProviderAvailable, markProviderUsageLimit, markProviderRateLimited, getFallbackProvider, getProviderStatus, initProviderStatus } from './providerStatus.js';
import { buildPrompt } from './promptService.js';
import { registerSpawnedAgent, unregisterSpawnedAgent } from './agents.js';
import { getMemorySection } from './memoryRetriever.js';
import { extractAndStoreMemories } from './memoryExtractor.js';
+import { getDigitalTwinForPrompt } from './digital-twin.js';
+import { suggestModelTier } from './taskLearning.js';
+import { readJSONFile } from '../lib/fileUtils.js';
+import { createToolExecution, startExecution, updateExecution, completeExecution, errorExecution, getExecution, getStats as getToolStats } from './toolStateMachine.js';
+import { resolveThinkingLevel, getModelForLevel, isLocalPreferred } from './thinkingLevels.js';
+import { determineLane, acquire, release, hasCapacity, waitForLane } from './executionLanes.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
@@ -30,16 +37,41 @@ const AGENTS_DIR = join(__dirname, '../../data/cos/agents');
const RUNS_DIR = join(__dirname, '../../data/runs');
/**
- * Select optimal model for a task based on complexity analysis
+ * Extract task type key for learning lookup
+ * Matches the format used in taskLearning.js for consistency
+ */
+function extractTaskTypeKey(task) {
+ if (task?.metadata?.analysisType) {
+ return `self-improve:${task.metadata.analysisType}`;
+ }
+ if (task?.metadata?.reviewType === 'idle') {
+ return 'idle-review';
+ }
+ const desc = (task?.description || '').toLowerCase();
+ if (desc.includes('[self-improvement]')) {
+ const typeMatch = desc.match(/\[self-improvement\]\s*(\w+)/i);
+ if (typeMatch) return `self-improve:${typeMatch[1]}`;
+ }
+ if (task?.taskType === 'user') return 'user-task';
+ return 'unknown';
+}
+
+/**
+ * Select optimal model for a task based on complexity analysis and historical performance
* User can override by specifying Model: and/or Provider: in task metadata
+ *
+ * Enhanced with:
+ * - Thinking levels hierarchy (task โ agent โ provider)
+ * - Learning-based model suggestions from historical success rates
+ * - Automatic upgrades when task type has <60% success rate
*/
-function selectModelForTask(task, provider) {
+async function selectModelForTask(task, provider, agent = {}) {
const desc = (task.description || '').toLowerCase();
const context = task.metadata?.context || '';
const contextLen = context.length;
const priority = task.priority || 'MEDIUM';
- // Check for user-specified model preference
+ // Check for user-specified model preference (highest priority)
const userModel = task.metadata?.model;
const userProvider = task.metadata?.provider;
@@ -53,6 +85,24 @@ function selectModelForTask(task, provider) {
};
}
+ // Check thinking level hierarchy (task โ agent โ provider)
+ // This resolves the appropriate thinking level based on configuration hierarchy
+ const thinkingResult = resolveThinkingLevel(task, agent, provider);
+ if (thinkingResult.resolvedFrom !== 'default') {
+ const modelFromLevel = getModelForLevel(thinkingResult.level, provider);
+ if (modelFromLevel) {
+ const isLocal = isLocalPreferred(thinkingResult.level);
+ console.log(`๐ง Thinking level: ${thinkingResult.level} โ ${modelFromLevel} (from ${thinkingResult.resolvedFrom}${isLocal ? ', local-preferred' : ''})`);
+ return {
+ model: modelFromLevel,
+ tier: thinkingResult.level,
+ reason: `thinking-level-${thinkingResult.resolvedFrom}`,
+ thinkingLevel: thinkingResult.level,
+ localPreferred: isLocal
+ };
+ }
+ }
+
// Image/visual analysis โ would route to gemini if available
if (/image|screenshot|visual|photo|picture/.test(desc)) {
return { model: provider.heavyModel || provider.defaultModel, tier: 'heavy', reason: 'visual-analysis' };
@@ -86,8 +136,22 @@ function selectModelForTask(task, provider) {
return { model: provider.lightModel || provider.defaultModel, tier: 'light', reason: 'documentation-task' };
}
- // Standard tasks โ sonnet/medium (default)
- return { model: provider.mediumModel || provider.defaultModel, tier: 'medium', reason: 'standard-task' };
+ // Check historical performance for this task type and potentially upgrade model
+ const taskTypeKey = extractTaskTypeKey(task);
+ const learningSuggestion = await suggestModelTier(taskTypeKey).catch(() => null);
+
+ if (learningSuggestion && learningSuggestion.suggested === 'heavy') {
+ console.log(`๐ Learning-based upgrade: ${taskTypeKey} โ heavy (${learningSuggestion.reason})`);
+ return {
+ model: provider.heavyModel || provider.defaultModel,
+ tier: 'heavy',
+ reason: 'learning-suggested',
+ learningReason: learningSuggestion.reason
+ };
+ }
+
+ // Standard tasks โ use provider's default model
+ return { model: provider.defaultModel, tier: 'default', reason: 'standard-task' };
}
/**
@@ -135,6 +199,22 @@ async function createAgentRun(agentId, task, model, provider, workspacePath) {
return { runId, runDir };
}
+/**
+ * Check if a commit was made with the task ID
+ * Returns true if a recent commit contains [task-{taskId}]
+ * Returns false if git command fails (not a repo, git not available, etc.)
+ */
+function checkForTaskCommit(taskId, workspacePath = ROOT_DIR) {
+ // Check if it's a git repo first
+ const gitDir = join(workspacePath, '.git');
+ if (!existsSync(gitDir)) return false;
+
+ const searchPattern = `[task-${taskId}]`;
+ const gitLogCmd = `git log --all --oneline --grep="${searchPattern}" -1 2>/dev/null || true`;
+ const result = execSync(gitLogCmd, { cwd: workspacePath, encoding: 'utf-8' }).trim();
+ return result.length > 0;
+}
+
/**
* Complete a run entry with final results
*/
@@ -144,13 +224,24 @@ async function completeAgentRun(runId, output, exitCode, duration, errorAnalysis
const runDir = join(RUNS_DIR, runId);
const metaPath = join(runDir, 'metadata.json');
- if (!existsSync(metaPath)) return;
+ const metadata = await readJSONFile(metaPath, null);
+ if (!metadata) return;
- const metadata = JSON.parse(await readFile(metaPath, 'utf-8'));
metadata.endTime = new Date().toISOString();
metadata.duration = duration;
metadata.exitCode = exitCode;
- metadata.success = exitCode === 0;
+
+ // Post-execution validation: check for task commit even if exit code is non-zero
+ let success = exitCode === 0;
+ if (!success && metadata.taskId && metadata.workspacePath) {
+ const commitFound = checkForTaskCommit(metadata.taskId, metadata.workspacePath);
+ if (commitFound) {
+ console.log(`โ ๏ธ Agent ${metadata.agentId} reported failure (exit ${exitCode}) but work completed - commit found for task ${metadata.taskId}`);
+ success = true;
+ }
+ }
+
+ metadata.success = success;
metadata.outputSize = Buffer.byteLength(output || '');
// Store error details - extract from output if no analysis provided
@@ -334,6 +425,23 @@ const ERROR_PATTERNS = [
suggestedFix: 'Wait and retry - temporary rate limiting'
})
},
+ {
+ pattern: /(?:hit your usage limit|usage limit|quota exceeded|Upgrade to Pro)/i,
+ category: 'usage-limit',
+ actionable: true, // Need to switch provider
+ extract: (match, output) => {
+ // Try to extract the wait time from the message
+ // e.g., "try again in 1 day 1 hour 33 minutes"
+ const timeMatch = output.match(/try again in\s+(.+?)(?:\.|$)/i);
+ const waitTime = timeMatch ? timeMatch[1].trim() : null;
+ return {
+ message: `Usage limit exceeded${waitTime ? ` - retry in ${waitTime}` : ''}`,
+ suggestedFix: 'Provider usage limit reached. Using fallback provider or wait for limit reset.',
+ waitTime,
+ requiresFallback: true
+ };
+ }
+ },
{
pattern: /API Error: 5\d{2}|server error|internal error/i,
category: 'server-error',
@@ -434,6 +542,11 @@ let useRunner = false;
* Initialize the spawner - listen for task:ready events
*/
export async function initSpawner() {
+ // Initialize provider status tracking
+ await initProviderStatus().catch(err => {
+ console.error(`โ ๏ธ Failed to initialize provider status: ${err.message}`);
+ });
+
// Check if CoS Runner is available
useRunner = await isRunnerAvailable();
@@ -562,24 +675,130 @@ async function syncRunnerAgents() {
export async function spawnAgentForTask(task) {
const agentId = `agent-${uuidv4().slice(0, 8)}`;
+ // Determine execution lane and acquire slot
+ const laneName = determineLane(task);
+ if (!hasCapacity(laneName)) {
+ // Wait for lane availability (max 30 seconds)
+ const laneResult = await waitForLane(laneName, agentId, { timeoutMs: 30000, metadata: { taskId: task.id } });
+ if (!laneResult.success) {
+ emitLog('warning', `Lane ${laneName} unavailable for task ${task.id}, deferring`, { taskId: task.id, lane: laneName });
+ cosEvents.emit('agent:deferred', { taskId: task.id, reason: 'lane-capacity', lane: laneName });
+ return null;
+ }
+ } else {
+ const laneResult = acquire(laneName, agentId, { taskId: task.id });
+ if (!laneResult.success) {
+ emitLog('warning', `Failed to acquire lane ${laneName}: ${laneResult.error}`, { taskId: task.id });
+ return null;
+ }
+ }
+
+ // Create tool execution for state tracking
+ const toolExecution = createToolExecution('agent-spawn', agentId, {
+ taskId: task.id,
+ lane: laneName,
+ priority: task.priority
+ });
+ startExecution(toolExecution.id);
+
+ // Helper to cleanup on early exit
+ const cleanupOnError = (error) => {
+ release(agentId);
+ errorExecution(toolExecution.id, { message: error });
+ completeExecution(toolExecution.id, { success: false });
+ };
+
// Get configuration
const config = await getConfig();
- const provider = await getActiveProvider();
+ let provider = await getActiveProvider();
if (!provider) {
+ cleanupOnError('No active AI provider configured');
cosEvents.emit('agent:error', { taskId: task.id, error: 'No active AI provider configured' });
return null;
}
- // Select optimal model for this task
- const modelSelection = selectModelForTask(task, provider);
- const selectedModel = modelSelection.model;
+ // Check provider availability (usage limits, rate limits, etc.)
+ const providerAvailable = isProviderAvailable(provider.id);
+ if (!providerAvailable) {
+ const status = getProviderStatus(provider.id);
+ emitLog('warning', `Provider ${provider.id} unavailable: ${status.message}`, {
+ taskId: task.id,
+ providerId: provider.id,
+ reason: status.reason
+ });
+
+ // Try to get a fallback provider (check task-level, then provider-level, then system default)
+ const allProviders = await getAllProviders();
+ const taskFallbackId = task.metadata?.fallbackProvider;
+ const fallbackResult = await getFallbackProvider(provider.id, allProviders, taskFallbackId);
+
+ if (fallbackResult) {
+ emitLog('info', `Using fallback provider: ${fallbackResult.provider.id} (source: ${fallbackResult.source})`, {
+ taskId: task.id,
+ primaryProvider: provider.id,
+ fallbackProvider: fallbackResult.provider.id,
+ fallbackSource: fallbackResult.source
+ });
+ provider = fallbackResult.provider;
+ } else {
+ // No fallback available - emit error and defer task
+ const errorMsg = `Provider ${provider.id} unavailable (${status.message}) and no fallback available`;
+ cleanupOnError(errorMsg);
+ cosEvents.emit('agent:error', {
+ taskId: task.id,
+ error: errorMsg,
+ providerId: provider.id,
+ providerStatus: status
+ });
+ // Don't spawn - task will retry later when provider recovers
+ return null;
+ }
+ }
- emitLog('info', `Model selection: ${selectedModel} (${modelSelection.reason})`, {
+ // Check if user specified a different provider in task metadata
+ const userProviderId = task.metadata?.provider;
+ if (userProviderId && userProviderId !== provider.id) {
+ const userProvider = await getProviderById(userProviderId);
+ if (userProvider) {
+ emitLog('info', `Using user-specified provider: ${userProviderId}`, { taskId: task.id });
+ provider = userProvider;
+ } else {
+ emitLog('warning', `User-specified provider "${userProviderId}" not found, using active provider`, { taskId: task.id });
+ }
+ }
+
+ // Select optimal model for this task (async to allow learning-based suggestions)
+ const modelSelection = await selectModelForTask(task, provider);
+ let selectedModel = modelSelection.model;
+
+ // Validate model is compatible with provider
+ if (selectedModel && provider.models && provider.models.length > 0) {
+ const modelIsValid = provider.models.includes(selectedModel);
+ if (!modelIsValid) {
+ emitLog('warning', `Model "${selectedModel}" not valid for provider "${provider.id}", falling back to provider default`, {
+ taskId: task.id,
+ requestedModel: selectedModel,
+ providerId: provider.id,
+ validModels: provider.models
+ });
+ // Fall back to the appropriate tier model for this provider
+ selectedModel = modelSelection.tier === 'heavy' ? provider.heavyModel :
+ modelSelection.tier === 'light' ? provider.lightModel :
+ modelSelection.tier === 'medium' ? provider.mediumModel :
+ provider.defaultModel;
+ }
+ }
+
+ const logMessage = modelSelection.learningReason
+ ? `Model selection: ${selectedModel} (${modelSelection.reason} - ${modelSelection.learningReason})`
+ : `Model selection: ${selectedModel} (${modelSelection.reason})`;
+ emitLog('info', logMessage, {
taskId: task.id,
model: selectedModel,
tier: modelSelection.tier,
- reason: modelSelection.reason
+ reason: modelSelection.reason,
+ ...(modelSelection.learningReason && { learningReason: modelSelection.learningReason })
});
// Determine workspace path
@@ -621,33 +840,70 @@ export async function spawnAgentForTask(task) {
// Mark the task as in_progress to prevent re-spawning
await updateTask(task.id, { status: 'in_progress' }, task.taskType || 'user');
- // Spawn the Claude CLI process using full path for PM2 compatibility
- const claudePath = process.env.CLAUDE_PATH || '/Users/antic/.nvm/versions/node/v25.2.1/bin/claude';
+ // Build CLI-specific spawn configuration
+ const cliConfig = buildCliSpawnConfig(provider, selectedModel);
- emitLog('success', `Spawning agent for task ${task.id}`, { agentId, model: selectedModel, mode: useRunner ? 'runner' : 'direct' });
+ emitLog('success', `Spawning agent for task ${task.id}`, { agentId, model: selectedModel, mode: useRunner ? 'runner' : 'direct', cli: cliConfig.command, lane: laneName });
// Use CoS Runner if available, otherwise spawn directly
if (useRunner) {
- return spawnViaRunner(agentId, task, prompt, workspacePath, selectedModel, provider, runId, claudePath);
+ return spawnViaRunner(agentId, task, prompt, workspacePath, selectedModel, provider, runId, cliConfig, toolExecution.id, laneName);
}
// Direct spawn mode (fallback)
- return spawnDirectly(agentId, task, prompt, workspacePath, selectedModel, provider, runId, claudePath, agentDir);
+ return spawnDirectly(agentId, task, prompt, workspacePath, selectedModel, provider, runId, cliConfig, agentDir, toolExecution.id, laneName);
+}
+
+/**
+ * Minimum runner uptime (seconds) before spawning agents.
+ * Prevents race condition during rolling restarts where server starts
+ * before runner, spawns an agent, then runner restarts and orphans it.
+ */
+const RUNNER_MIN_UPTIME_SECONDS = 10;
+
+/**
+ * Wait for runner to be stable (sufficient uptime) before spawning
+ */
+async function waitForRunnerStability() {
+ const maxWaitMs = 15000;
+ const checkIntervalMs = 1000;
+ const startTime = Date.now();
+
+ while (Date.now() - startTime < maxWaitMs) {
+ const health = await getRunnerHealth();
+ if (health.available && health.uptime >= RUNNER_MIN_UPTIME_SECONDS) {
+ return true;
+ }
+ if (health.available && health.uptime < RUNNER_MIN_UPTIME_SECONDS) {
+ const waitTime = Math.ceil(RUNNER_MIN_UPTIME_SECONDS - health.uptime);
+ emitLog('info', `Waiting ${waitTime}s for runner stability (uptime: ${Math.floor(health.uptime)}s)`, { uptime: health.uptime });
+ }
+ await new Promise(resolve => setTimeout(resolve, checkIntervalMs));
+ }
+
+ emitLog('warning', 'Runner stability check timed out, proceeding anyway', {});
+ return false;
}
/**
* Spawn agent via CoS Runner (isolated PM2 process)
*/
-async function spawnViaRunner(agentId, task, prompt, workspacePath, model, provider, runId, claudePath) {
+async function spawnViaRunner(agentId, task, prompt, workspacePath, model, provider, runId, cliConfig, executionId, laneName) {
+ // Wait for runner to be stable to prevent orphaned agents during rolling restarts
+ await waitForRunnerStability();
+
// Store tracking info for runner-spawned agents
const agentInfo = {
taskId: task.id,
task,
runId,
model,
+ providerId: provider.id,
hasStartedWorking: false,
startedAt: Date.now(),
- initializationTimeout: null
+ initializationTimeout: null,
+ executionId,
+ laneName
};
runnerAgents.set(agentId, agentInfo);
@@ -668,7 +924,8 @@ async function spawnViaRunner(agentId, task, prompt, workspacePath, model, provi
workspacePath,
model,
envVars: provider.envVars,
- claudePath
+ cliCommand: cliConfig.command,
+ cliArgs: cliConfig.args
});
// Store PID in persisted state for zombie detection
@@ -688,7 +945,22 @@ async function handleAgentCompletion(agentId, exitCode, success, duration) {
return;
}
- const { task, runId, model } = agent;
+ const { task, runId, model, executionId, laneName } = agent;
+
+ // Release execution lane
+ if (laneName) {
+ release(agentId);
+ }
+
+ // Complete tool execution tracking
+ if (executionId) {
+ if (success) {
+ completeExecution(executionId, { success: true, duration });
+ } else {
+ errorExecution(executionId, { message: `Agent exited with code ${exitCode}`, code: exitCode });
+ completeExecution(executionId, { success: false });
+ }
+ }
// Read output from agent directory
const agentDir = join(AGENTS_DIR, agentId);
@@ -718,11 +990,34 @@ async function handleAgentCompletion(agentId, exitCode, success, duration) {
const newStatus = success ? 'completed' : 'pending';
await updateTask(task.id, { status: newStatus }, task.taskType || 'user');
- // On failure, create investigation task if actionable
- if (!success && errorAnalysis?.actionable) {
- await createInvestigationTask(agentId, task, errorAnalysis).catch(err => {
- emitLog('warn', `Failed to create investigation task: ${err.message}`, { agentId });
- });
+ // On failure, handle provider status updates and create investigation task if actionable
+ if (!success && errorAnalysis) {
+ // Mark provider unavailable if usage limit hit
+ if (errorAnalysis.category === 'usage-limit' && errorAnalysis.requiresFallback) {
+ const providerId = agent.providerId || (await getActiveProvider())?.id;
+ if (providerId) {
+ await markProviderUsageLimit(providerId, errorAnalysis).catch(err => {
+ emitLog('warn', `Failed to mark provider unavailable: ${err.message}`, { providerId });
+ });
+ }
+ }
+
+ // Mark provider rate limited (temporary)
+ if (errorAnalysis.category === 'rate-limit') {
+ const providerId = agent.providerId || (await getActiveProvider())?.id;
+ if (providerId) {
+ await markProviderRateLimited(providerId).catch(err => {
+ emitLog('warn', `Failed to mark provider rate limited: ${err.message}`, { providerId });
+ });
+ }
+ }
+
+ // Create investigation task if actionable
+ if (errorAnalysis.actionable) {
+ await createInvestigationTask(agentId, task, errorAnalysis).catch(err => {
+ emitLog('warn', `Failed to create investigation task: ${err.message}`, { agentId });
+ });
+ }
}
// Process memory extraction and app cooldown
@@ -734,14 +1029,13 @@ async function handleAgentCompletion(agentId, exitCode, success, duration) {
/**
* Spawn agent directly (fallback when runner not available)
*/
-async function spawnDirectly(agentId, task, prompt, workspacePath, model, provider, runId, claudePath, agentDir) {
- const spawnArgs = buildSpawnArgs(null, model);
- const fullCommand = `${claudePath} ${spawnArgs.join(' ')} <<< "${(task.description || '').substring(0, 100)}..."`;
+async function spawnDirectly(agentId, task, prompt, workspacePath, model, provider, runId, cliConfig, agentDir, executionId, laneName) {
+ const fullCommand = `${cliConfig.command} ${cliConfig.args.join(' ')} <<< "${(task.description || '').substring(0, 100)}..."`;
// Ensure workspacePath is valid
const cwd = workspacePath && typeof workspacePath === 'string' ? workspacePath : ROOT_DIR;
- const claudeProcess = spawn(claudePath, spawnArgs, {
+ const claudeProcess = spawn(cliConfig.command, cliConfig.args, {
cwd,
shell: false,
stdio: ['pipe', 'pipe', 'pipe'],
@@ -768,7 +1062,10 @@ async function spawnDirectly(agentId, task, prompt, workspacePath, model, provid
taskId: task.id,
startedAt: Date.now(),
runId,
- pid: claudeProcess.pid
+ pid: claudeProcess.pid,
+ providerId: provider.id,
+ executionId,
+ laneName
});
// Store PID in persisted state for zombie detection
@@ -811,6 +1108,18 @@ async function spawnDirectly(agentId, task, prompt, workspacePath, model, provid
claudeProcess.on('error', async (err) => {
clearTimeout(initializationTimeout);
console.error(`โ Agent ${agentId} spawn error: ${err.message}`);
+
+ // Release execution lane
+ if (laneName) {
+ release(agentId);
+ }
+
+ // Complete tool execution tracking with error
+ if (executionId) {
+ errorExecution(executionId, { message: err.message, category: 'spawn-error' });
+ completeExecution(executionId, { success: false });
+ }
+
cosEvents.emit('agent:error', { agentId, error: err.message });
await completeAgent(agentId, { success: false, error: err.message });
await completeAgentRun(runId, outputBuffer, 1, 0, { message: err.message, category: 'spawn-error' });
@@ -824,6 +1133,21 @@ async function spawnDirectly(agentId, task, prompt, workspacePath, model, provid
const agentData = activeAgents.get(agentId);
const duration = Date.now() - (agentData?.startedAt || Date.now());
+ // Release execution lane
+ if (agentData?.laneName) {
+ release(agentId);
+ }
+
+ // Complete tool execution tracking
+ if (agentData?.executionId) {
+ if (success) {
+ completeExecution(agentData.executionId, { success: true, duration });
+ } else {
+ errorExecution(agentData.executionId, { message: `Agent exited with code ${code}`, code });
+ completeExecution(agentData.executionId, { success: false });
+ }
+ }
+
await writeFile(outputFile, outputBuffer).catch(() => {});
const errorAnalysis = success ? null : analyzeAgentFailure(outputBuffer, task, model);
@@ -841,10 +1165,34 @@ async function spawnDirectly(agentId, task, prompt, workspacePath, model, provid
const newStatus = success ? 'completed' : 'pending';
await updateTask(task.id, { status: newStatus }, task.taskType || 'user');
- if (!success && errorAnalysis?.actionable) {
- await createInvestigationTask(agentId, task, errorAnalysis).catch(err => {
- emitLog('warn', `Failed to create investigation task: ${err.message}`, { agentId });
- });
+ // On failure, handle provider status updates and create investigation task if actionable
+ if (!success && errorAnalysis) {
+ // Mark provider unavailable if usage limit hit
+ if (errorAnalysis.category === 'usage-limit' && errorAnalysis.requiresFallback) {
+ const providerId = agentData?.providerId || provider.id;
+ if (providerId) {
+ await markProviderUsageLimit(providerId, errorAnalysis).catch(err => {
+ emitLog('warn', `Failed to mark provider unavailable: ${err.message}`, { providerId });
+ });
+ }
+ }
+
+ // Mark provider rate limited (temporary)
+ if (errorAnalysis.category === 'rate-limit') {
+ const providerId = agentData?.providerId || provider.id;
+ if (providerId) {
+ await markProviderRateLimited(providerId).catch(err => {
+ emitLog('warn', `Failed to mark provider rate limited: ${err.message}`, { providerId });
+ });
+ }
+ }
+
+ // Create investigation task if actionable
+ if (errorAnalysis.actionable) {
+ await createInvestigationTask(agentId, task, errorAnalysis).catch(err => {
+ emitLog('warn', `Failed to create investigation task: ${err.message}`, { agentId });
+ });
+ }
}
// Process memory extraction and app cooldown
@@ -857,8 +1205,45 @@ async function spawnDirectly(agentId, task, prompt, workspacePath, model, provid
return agentId;
}
+/**
+ * Build spawn command and arguments for a CLI provider
+ * Returns { command, args, stdinMode } based on provider type
+ */
+function buildCliSpawnConfig(provider, model) {
+ const providerId = provider?.id || 'claude-code';
+
+ // Codex CLI uses different invocation pattern
+ if (providerId === 'codex') {
+ const args = ['exec'];
+ if (model) {
+ args.push('--model', model);
+ }
+ return {
+ command: provider?.command || 'codex',
+ args,
+ stdinMode: 'prompt' // codex exec reads prompt from stdin
+ };
+ }
+
+ // Default: Claude Code CLI
+ const args = [
+ '--dangerously-skip-permissions', // Unrestricted mode
+ '--print', // Print output and exit
+ ];
+ if (model) {
+ args.push('--model', model);
+ }
+
+ return {
+ command: process.env.CLAUDE_PATH || '/Users/antic/.nvm/versions/node/v25.2.1/bin/claude',
+ args,
+ stdinMode: 'prompt'
+ };
+}
+
/**
* Build spawn arguments for Claude CLI
+ * @deprecated Use buildCliSpawnConfig instead
*/
function buildSpawnArgs(config, model) {
// Note: MCP server config via --mcp-config requires a file path, not inline JSON
@@ -953,12 +1338,22 @@ async function buildAgentPrompt(task, config, workspaceDir) {
return null;
});
+ // Get digital twin context for persona alignment
+ const digitalTwinSection = await getDigitalTwinForPrompt({
+ maxTokens: config.digitalTwin?.maxContextTokens || config.soul?.maxContextTokens || 2000
+ }).catch(err => {
+ console.log(`โ ๏ธ Digital twin context retrieval failed: ${err.message}`);
+ return null;
+ });
+
// Try to use the prompt template system
const promptData = await buildPrompt('cos-agent-briefing', {
task,
config,
memorySection,
claudeMdSection,
+ digitalTwinSection,
+ soulSection: digitalTwinSection, // Backwards compatibility for prompt templates
timestamp: new Date().toISOString()
}).catch(() => null);
@@ -1009,13 +1404,11 @@ Begin working on the task now.`;
async function getAppWorkspace(appName) {
const appsFile = join(ROOT_DIR, 'data/apps.json');
- if (!existsSync(appsFile)) {
+ const data = await readJSONFile(appsFile, null);
+ if (!data) {
return ROOT_DIR;
}
- const content = await readFile(appsFile, 'utf-8');
- const data = JSON.parse(content);
-
// Handle both object format { apps: { id: {...} } } and array format [...]
const apps = data.apps || data;
diff --git a/server/services/taskEnhancer.js b/server/services/taskEnhancer.js
new file mode 100644
index 0000000..ec0d1c5
--- /dev/null
+++ b/server/services/taskEnhancer.js
@@ -0,0 +1,146 @@
+/**
+ * Task Enhancer Service
+ *
+ * Uses AI to enhance task descriptions/prompts to be more detailed,
+ * actionable, and comprehensive for agent execution.
+ *
+ * Uses the 'cos-task-enhance' prompt stage for provider/model configuration.
+ */
+
+import { executeApiRun, createRun } from './runner.js';
+import { getActiveProvider, getProviderById } from './providers.js';
+import { getStage, buildPrompt } from './promptService.js';
+
+const STAGE_NAME = 'cos-task-enhance';
+
+/**
+ * Fallback enhancement prompt template (used if stage template not found)
+ */
+const FALLBACK_PROMPT = `You are a task prompt enhancer for an AI agent system. Your job is to take a brief task description and expand it into a comprehensive, detailed prompt that an AI coding agent can execute effectively.
+
+## Guidelines
+
+1. **Preserve the original intent** - Don't change what the user wants, just make it clearer and more actionable
+2. **Add specific steps** - Break down the task into clear, sequential steps when appropriate
+3. **Include relevant context** - Mention file paths, patterns, or conventions that should be followed
+4. **Define success criteria** - What does "done" look like?
+5. **Anticipate edge cases** - Mention potential issues to watch out for
+6. **Keep it focused** - Don't add unrelated tasks or scope creep
+
+## Original Task Description
+{description}
+
+{contextSection}
+
+## Your Enhanced Prompt
+
+Provide an enhanced version of this task that an AI agent can execute. Output ONLY the enhanced prompt text, nothing else. Do not include any preamble like "Here is the enhanced prompt:" - just output the prompt itself.`;
+
+/**
+ * Enhance a task prompt using AI
+ *
+ * @param {string} description - The original task description
+ * @param {string} context - Optional additional context
+ * @returns {Promise<{enhancedDescription: string, originalDescription: string, model: string, provider: string}>}
+ */
+export async function enhanceTaskPrompt(description, context = '') {
+ console.log(`โจ Enhancing task prompt: "${description.substring(0, 50)}..."`);
+
+ // Get prompt stage configuration for cos-task-enhance
+ const stage = getStage(STAGE_NAME);
+
+ // Determine provider and model from stage config or fallback
+ let provider;
+ let model;
+
+ if (stage?.provider) {
+ // Use stage-configured provider
+ provider = await getProviderById(stage.provider).catch(() => null);
+ model = stage.model || provider?.defaultModel;
+ }
+
+ // Fallback to active provider if stage provider not available
+ if (!provider) {
+ provider = await getActiveProvider();
+ model = stage?.model || provider?.defaultModel || provider?.models?.[0];
+ }
+
+ if (!provider) {
+ throw new Error('No AI provider available for enhancement');
+ }
+
+ // Build the enhancement prompt using stage template or fallback
+ let fullPrompt;
+ const templatePrompt = await buildPrompt(STAGE_NAME, { description, context }).catch(() => null);
+
+ if (templatePrompt) {
+ fullPrompt = templatePrompt;
+ } else {
+ // Fallback to hardcoded template
+ const contextSection = context ? `## Additional Context\n${context}` : '';
+ fullPrompt = FALLBACK_PROMPT
+ .replace('{description}', description)
+ .replace('{contextSection}', contextSection);
+ }
+
+ // Create a run for this enhancement
+ const { runId } = await createRun({
+ providerId: provider.id,
+ model,
+ prompt: fullPrompt,
+ source: 'task-enhancement'
+ });
+
+ // Collect the response
+ let enhancedDescription = '';
+
+ await new Promise((resolve, reject) => {
+ executeApiRun(
+ runId,
+ provider,
+ model,
+ fullPrompt,
+ process.cwd(),
+ [], // No screenshots needed
+ (data) => {
+ if (data?.text) {
+ enhancedDescription += data.text;
+ }
+ },
+ (result) => {
+ if (result?.error) {
+ reject(new Error(result.error));
+ } else {
+ resolve(result);
+ }
+ }
+ );
+ });
+
+ // Clean up the response - remove any leading/trailing whitespace and common prefixes
+ enhancedDescription = enhancedDescription.trim();
+
+ // Remove common AI response prefixes
+ const prefixesToRemove = [
+ /^Here is the enhanced prompt[:\s]*/i,
+ /^Enhanced prompt[:\s]*/i,
+ /^Here's the enhanced version[:\s]*/i,
+ /^Certainly[!,.\s]*/i,
+ /^Sure[!,.\s]*/i
+ ];
+
+ for (const prefix of prefixesToRemove) {
+ enhancedDescription = enhancedDescription.replace(prefix, '');
+ }
+
+ enhancedDescription = enhancedDescription.trim();
+
+ console.log(`โ
Enhanced task prompt (${enhancedDescription.length} chars) using ${provider.name}/${model}`);
+
+ return {
+ enhancedDescription,
+ originalDescription: description,
+ model,
+ provider: provider.id
+ };
+}
diff --git a/server/services/taskLearning.js b/server/services/taskLearning.js
index aed336d..d4910d4 100644
--- a/server/services/taskLearning.js
+++ b/server/services/taskLearning.js
@@ -6,11 +6,12 @@
* to provide smarter task prioritization and model selection.
*/
-import { readFile, writeFile, mkdir } from 'fs/promises';
+import { writeFile, mkdir } from 'fs/promises';
import { existsSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import { cosEvents, emitLog } from './cos.js';
+import { readJSONFile } from '../lib/fileUtils.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
@@ -51,12 +52,7 @@ async function loadLearningData() {
await mkdir(DATA_DIR, { recursive: true });
}
- if (!existsSync(LEARNING_FILE)) {
- return { ...DEFAULT_LEARNING_DATA };
- }
-
- const content = await readFile(LEARNING_FILE, 'utf-8');
- return JSON.parse(content);
+ return readJSONFile(LEARNING_FILE, { ...DEFAULT_LEARNING_DATA });
}
/**
@@ -589,6 +585,168 @@ export async function shouldSkipTaskType(taskType) {
return result.skip;
}
+/**
+ * Check if any skipped task types are eligible for automatic rehabilitation
+ * Task types that have been skipped for a grace period get a "fresh start" opportunity
+ *
+ * Auto-rehabilitation rules:
+ * - Task must have been skipped (success rate < 30% with 5+ attempts)
+ * - Must have been at least rehabilitationGracePeriodMs since last completion
+ * - Reset the task type's learning data to give it a fresh chance
+ *
+ * This allows CoS to automatically retry previously-failing task types
+ * after enough time has passed for fixes to be applied.
+ *
+ * @param {number} gracePeriodMs - Minimum time since last attempt (default: 7 days)
+ * @returns {Object} Summary of rehabilitated task types
+ */
+export async function checkAndRehabilitateSkippedTasks(gracePeriodMs = 7 * 24 * 60 * 60 * 1000) {
+ const data = await loadLearningData();
+ const rehabilitated = [];
+ const now = Date.now();
+
+ for (const [taskType, metrics] of Object.entries(data.byTaskType)) {
+ // Only consider task types that would be skipped (< 30% success with 5+ attempts)
+ if (metrics.completed < 5 || metrics.successRate >= 30) {
+ continue;
+ }
+
+ // Check if enough time has passed since last attempt
+ const lastCompletedTime = metrics.lastCompleted
+ ? new Date(metrics.lastCompleted).getTime()
+ : 0;
+ const timeSinceLastAttempt = now - lastCompletedTime;
+
+ if (timeSinceLastAttempt >= gracePeriodMs) {
+ // This task type is eligible for rehabilitation
+ emitLog('info', `Auto-rehabilitating ${taskType} (was ${metrics.successRate}% success, ${Math.round(timeSinceLastAttempt / (24 * 60 * 60 * 1000))} days since last attempt)`, {
+ taskType,
+ previousSuccessRate: metrics.successRate,
+ previousAttempts: metrics.completed,
+ daysSinceLastAttempt: Math.round(timeSinceLastAttempt / (24 * 60 * 60 * 1000))
+ }, '๐ TaskLearning');
+
+ // Reset this task type's data
+ await resetTaskTypeLearning(taskType);
+
+ rehabilitated.push({
+ taskType,
+ previousSuccessRate: metrics.successRate,
+ previousAttempts: metrics.completed,
+ daysSinceLastAttempt: Math.round(timeSinceLastAttempt / (24 * 60 * 60 * 1000))
+ });
+ }
+ }
+
+ if (rehabilitated.length > 0) {
+ emitLog('success', `Auto-rehabilitated ${rehabilitated.length} skipped task type(s)`, {
+ rehabilitated: rehabilitated.map(r => r.taskType)
+ }, '๐ TaskLearning');
+ }
+
+ return { rehabilitated, count: rehabilitated.length };
+}
+
+/**
+ * Get all skipped task types with their rehabilitation eligibility status
+ * Useful for UI display and debugging
+ * @param {number} gracePeriodMs - Grace period for rehabilitation eligibility
+ * @returns {Array} List of skipped task types with status info
+ */
+export async function getSkippedTaskTypesWithStatus(gracePeriodMs = 7 * 24 * 60 * 60 * 1000) {
+ const data = await loadLearningData();
+ const skipped = [];
+ const now = Date.now();
+
+ for (const [taskType, metrics] of Object.entries(data.byTaskType)) {
+ // Only include task types that would be skipped
+ if (metrics.completed < 5 || metrics.successRate >= 30) {
+ continue;
+ }
+
+ const lastCompletedTime = metrics.lastCompleted
+ ? new Date(metrics.lastCompleted).getTime()
+ : 0;
+ const timeSinceLastAttempt = now - lastCompletedTime;
+ const eligibleForRehabilitation = timeSinceLastAttempt >= gracePeriodMs;
+ const timeUntilEligible = eligibleForRehabilitation
+ ? 0
+ : gracePeriodMs - timeSinceLastAttempt;
+
+ skipped.push({
+ taskType,
+ successRate: metrics.successRate,
+ completed: metrics.completed,
+ lastCompleted: metrics.lastCompleted,
+ daysSinceLastAttempt: Math.round(timeSinceLastAttempt / (24 * 60 * 60 * 1000)),
+ eligibleForRehabilitation,
+ daysUntilEligible: Math.ceil(timeUntilEligible / (24 * 60 * 60 * 1000))
+ });
+ }
+
+ return skipped;
+}
+
+/**
+ * Reset learning data for a specific task type
+ * Used when a previously-failing task type has been fixed and should be retried
+ * Subtracts the task type's metrics from totals and removes the task type entry
+ * @param {string} taskType - The task type to reset (e.g., 'self-improve:ui')
+ * @returns {Object} Summary of what was reset
+ */
+export async function resetTaskTypeLearning(taskType) {
+ const data = await loadLearningData();
+
+ const metrics = data.byTaskType[taskType];
+ if (!metrics) {
+ return { reset: false, reason: 'task-type-not-found', taskType };
+ }
+
+ // Subtract this task type's contribution from totals
+ data.totals.completed -= metrics.completed;
+ data.totals.succeeded -= metrics.succeeded;
+ data.totals.failed -= metrics.failed;
+ data.totals.totalDurationMs -= metrics.totalDurationMs;
+ data.totals.avgDurationMs = data.totals.completed > 0
+ ? Math.round(data.totals.totalDurationMs / data.totals.completed)
+ : 0;
+
+ // Clean up error patterns referencing this task type
+ for (const [category, pattern] of Object.entries(data.errorPatterns)) {
+ const taskTypeCount = pattern.taskTypes[taskType] || 0;
+ if (taskTypeCount > 0) {
+ pattern.count -= taskTypeCount;
+ delete pattern.taskTypes[taskType];
+ }
+ // Remove empty error categories
+ if (pattern.count <= 0) {
+ delete data.errorPatterns[category];
+ }
+ }
+
+ // Remove the task type entry
+ delete data.byTaskType[taskType];
+
+ await saveLearningData(data);
+
+ emitLog('info', `Reset learning data for ${taskType} (was ${metrics.successRate}% success after ${metrics.completed} attempts)`, {
+ taskType,
+ previousSuccessRate: metrics.successRate,
+ previousAttempts: metrics.completed
+ }, '๐ TaskLearning');
+
+ return {
+ reset: true,
+ taskType,
+ previousMetrics: {
+ completed: metrics.completed,
+ succeeded: metrics.succeeded,
+ failed: metrics.failed,
+ successRate: metrics.successRate
+ }
+ };
+}
+
/**
* Get estimated duration for a task based on historical averages
* @param {string} taskDescription - The task description to analyze
@@ -684,11 +842,11 @@ export function initTaskLearning() {
};
await recordTaskCompletion(agent, task).catch(err => {
- console.error(`โ [TaskLearning] Failed to record completion: ${err.message}`);
+ console.error(`โ ๐ TaskLearning: Failed to record completion: ${err.message}`);
});
});
- emitLog('info', 'Task Learning System initialized', {}, '[TaskLearning]');
+ emitLog('info', 'Task Learning System initialized', {}, '๐ TaskLearning');
}
/**
@@ -714,6 +872,6 @@ export async function backfillFromHistory() {
}
}
- emitLog('info', `Backfilled ${backfilled} completed tasks into learning system`, { backfilled }, '[TaskLearning]');
+ emitLog('info', `Backfilled ${backfilled} completed tasks into learning system`, { backfilled }, '๐ TaskLearning');
return backfilled;
}
diff --git a/server/services/taskLearning.test.js b/server/services/taskLearning.test.js
new file mode 100644
index 0000000..ae245b2
--- /dev/null
+++ b/server/services/taskLearning.test.js
@@ -0,0 +1,240 @@
+import { describe, it, expect, vi, beforeEach } from 'vitest';
+
+// Mock fs/promises and fs before importing the module
+vi.mock('fs/promises', () => ({
+ readFile: vi.fn(),
+ writeFile: vi.fn(),
+ mkdir: vi.fn()
+}));
+
+vi.mock('fs', () => ({
+ existsSync: vi.fn(() => true)
+}));
+
+// Mock cos.js to avoid circular dependency
+vi.mock('./cos.js', () => ({
+ cosEvents: { on: vi.fn(), emit: vi.fn() },
+ emitLog: vi.fn()
+}));
+
+// Mock fileUtils.js to use our mocked fs/promises
+vi.mock('../lib/fileUtils.js', async (importOriginal) => {
+ const fsPromises = await import('fs/promises');
+ const fs = await import('fs');
+ return {
+ readJSONFile: vi.fn(async (filePath, defaultValue) => {
+ if (!fs.existsSync(filePath)) return defaultValue;
+ const content = await fsPromises.readFile(filePath, 'utf-8');
+ if (!content || !content.trim()) return defaultValue;
+ return JSON.parse(content);
+ })
+ };
+});
+
+import { readFile, writeFile } from 'fs/promises';
+import { resetTaskTypeLearning, getSkippedTaskTypes } from './taskLearning.js';
+
+const makeLearningData = (overrides = {}) => ({
+ version: 1,
+ lastUpdated: '2026-01-26T00:00:00.000Z',
+ byTaskType: {
+ 'self-improve:ui': {
+ completed: 200,
+ succeeded: 10,
+ failed: 190,
+ totalDurationMs: 2000000,
+ avgDurationMs: 10000,
+ lastCompleted: '2026-01-25T00:00:00.000Z',
+ successRate: 5
+ },
+ 'user-task': {
+ completed: 40,
+ succeeded: 30,
+ failed: 10,
+ totalDurationMs: 4000000,
+ avgDurationMs: 100000,
+ lastCompleted: '2026-01-26T00:00:00.000Z',
+ successRate: 75
+ }
+ },
+ byModelTier: {
+ 'user-specified': {
+ completed: 240,
+ succeeded: 40,
+ failed: 200,
+ totalDurationMs: 6000000,
+ avgDurationMs: 25000
+ }
+ },
+ errorPatterns: {
+ 'server-error': {
+ count: 190,
+ taskTypes: { 'self-improve:ui': 185, 'user-task': 5 },
+ lastOccurred: '2026-01-25T00:00:00.000Z'
+ },
+ 'unknown': {
+ count: 10,
+ taskTypes: { 'self-improve:ui': 10 },
+ lastOccurred: '2026-01-24T00:00:00.000Z'
+ }
+ },
+ totals: {
+ completed: 240,
+ succeeded: 40,
+ failed: 200,
+ totalDurationMs: 6000000,
+ avgDurationMs: 25000
+ },
+ ...overrides
+});
+
+describe('TaskLearning - resetTaskTypeLearning', () => {
+ let savedData;
+
+ beforeEach(() => {
+ vi.clearAllMocks();
+ savedData = null;
+ writeFile.mockImplementation(async (_path, content) => {
+ savedData = JSON.parse(content);
+ });
+ });
+
+ it('should return not-found when task type does not exist', async () => {
+ readFile.mockResolvedValue(JSON.stringify(makeLearningData()));
+
+ const result = await resetTaskTypeLearning('nonexistent-type');
+
+ expect(result.reset).toBe(false);
+ expect(result.reason).toBe('task-type-not-found');
+ });
+
+ it('should remove the task type from byTaskType', async () => {
+ readFile.mockResolvedValue(JSON.stringify(makeLearningData()));
+
+ const result = await resetTaskTypeLearning('self-improve:ui');
+
+ expect(result.reset).toBe(true);
+ expect(result.taskType).toBe('self-improve:ui');
+ expect(savedData.byTaskType['self-improve:ui']).toBeUndefined();
+ expect(savedData.byTaskType['user-task']).toBeDefined();
+ });
+
+ it('should subtract task type metrics from totals', async () => {
+ readFile.mockResolvedValue(JSON.stringify(makeLearningData()));
+
+ await resetTaskTypeLearning('self-improve:ui');
+
+ // Original totals: completed=240, succeeded=40, failed=200, totalDurationMs=6000000
+ // self-improve:ui: completed=200, succeeded=10, failed=190, totalDurationMs=2000000
+ // After reset: completed=40, succeeded=30, failed=10, totalDurationMs=4000000
+ expect(savedData.totals.completed).toBe(40);
+ expect(savedData.totals.succeeded).toBe(30);
+ expect(savedData.totals.failed).toBe(10);
+ expect(savedData.totals.totalDurationMs).toBe(4000000);
+ expect(savedData.totals.avgDurationMs).toBe(100000); // 4000000 / 40
+ });
+
+ it('should clean up error patterns referencing the task type', async () => {
+ readFile.mockResolvedValue(JSON.stringify(makeLearningData()));
+
+ await resetTaskTypeLearning('self-improve:ui');
+
+ // server-error had 190 total (185 from ui, 5 from user-task) โ should now have 5
+ expect(savedData.errorPatterns['server-error'].count).toBe(5);
+ expect(savedData.errorPatterns['server-error'].taskTypes['self-improve:ui']).toBeUndefined();
+ expect(savedData.errorPatterns['server-error'].taskTypes['user-task']).toBe(5);
+
+ // unknown had 10 total, all from ui โ should be removed entirely
+ expect(savedData.errorPatterns['unknown']).toBeUndefined();
+ });
+
+ it('should return previous metrics in result', async () => {
+ readFile.mockResolvedValue(JSON.stringify(makeLearningData()));
+
+ const result = await resetTaskTypeLearning('self-improve:ui');
+
+ expect(result.previousMetrics).toEqual({
+ completed: 200,
+ succeeded: 10,
+ failed: 190,
+ successRate: 5
+ });
+ });
+
+ it('should handle totals going to zero gracefully', async () => {
+ const data = makeLearningData({
+ byTaskType: {
+ 'self-improve:ui': {
+ completed: 100, succeeded: 5, failed: 95,
+ totalDurationMs: 500000, avgDurationMs: 5000,
+ lastCompleted: '2026-01-25T00:00:00.000Z', successRate: 5
+ }
+ },
+ errorPatterns: {},
+ totals: {
+ completed: 100, succeeded: 5, failed: 95,
+ totalDurationMs: 500000, avgDurationMs: 5000
+ }
+ });
+ readFile.mockResolvedValue(JSON.stringify(data));
+
+ await resetTaskTypeLearning('self-improve:ui');
+
+ expect(savedData.totals.completed).toBe(0);
+ expect(savedData.totals.avgDurationMs).toBe(0);
+ });
+});
+
+describe('TaskLearning - getSkippedTaskTypes', () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ });
+
+ it('should return task types with <30% success and 5+ attempts', async () => {
+ readFile.mockResolvedValue(JSON.stringify(makeLearningData()));
+
+ const skipped = await getSkippedTaskTypes();
+
+ expect(skipped).toHaveLength(1);
+ expect(skipped[0].taskType).toBe('self-improve:ui');
+ expect(skipped[0].successRate).toBe(5);
+ });
+
+ it('should not include task types with >= 30% success', async () => {
+ readFile.mockResolvedValue(JSON.stringify(makeLearningData()));
+
+ const skipped = await getSkippedTaskTypes();
+
+ const userTask = skipped.find(s => s.taskType === 'user-task');
+ expect(userTask).toBeUndefined();
+ });
+
+ it('should return empty after resetting a skipped type', async () => {
+ const data = makeLearningData({
+ byTaskType: {
+ 'self-improve:ui': {
+ completed: 100, succeeded: 5, failed: 95,
+ totalDurationMs: 500000, avgDurationMs: 5000,
+ lastCompleted: '2026-01-25T00:00:00.000Z', successRate: 5
+ }
+ },
+ errorPatterns: {},
+ totals: {
+ completed: 100, succeeded: 5, failed: 95,
+ totalDurationMs: 500000, avgDurationMs: 5000
+ }
+ });
+
+ // Track what was written so subsequent reads return updated data
+ let currentData = JSON.stringify(data);
+ readFile.mockImplementation(async () => currentData);
+ writeFile.mockImplementation(async (_path, content) => {
+ currentData = content;
+ });
+
+ await resetTaskTypeLearning('self-improve:ui');
+ const skipped = await getSkippedTaskTypes();
+
+ expect(skipped).toHaveLength(0);
+ });
+});
diff --git a/server/services/taskSchedule.js b/server/services/taskSchedule.js
index 2790f8a..e20377f 100644
--- a/server/services/taskSchedule.js
+++ b/server/services/taskSchedule.js
@@ -16,11 +16,12 @@
* - 'custom': Custom interval in milliseconds
*/
-import { readFile, writeFile, mkdir } from 'fs/promises';
+import { writeFile, mkdir } from 'fs/promises';
import { existsSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import { cosEvents, emitLog } from './cos.js';
+import { readJSONFile } from '../lib/fileUtils.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
@@ -42,34 +43,537 @@ const HOUR = 60 * 60 * 1000;
const DAY = 24 * HOUR;
const WEEK = 7 * DAY;
+// Default prompts for self-improvement task types
+const DEFAULT_SELF_IMPROVEMENT_PROMPTS = {
+ 'ui-bugs': `[Self-Improvement] UI Bug Analysis
+
+Use Playwright MCP (browser_navigate, browser_snapshot, browser_console_messages) to analyze PortOS UI:
+
+1. Navigate to http://localhost:5555/
+2. Check each main route: /, /apps, /cos, /cos/tasks, /cos/agents, /devtools, /devtools/history, /providers, /usage
+3. For each route:
+ - Take a browser_snapshot to see the page structure
+ - Check browser_console_messages for JavaScript errors
+ - Look for broken UI elements, missing data, failed requests
+4. Fix any bugs found in the React components or API routes
+5. Run tests and commit changes`,
+
+ 'mobile-responsive': `[Self-Improvement] Mobile Responsiveness Analysis
+
+Use Playwright MCP to test PortOS at different viewport sizes:
+
+1. browser_resize to mobile (375x812), then navigate to http://localhost:5555/
+2. Take browser_snapshot and analyze for:
+ - Text overflow or truncation
+ - Buttons too small to tap (< 44px)
+ - Horizontal scrolling issues
+ - Elements overlapping
+ - Navigation usability
+3. Repeat at tablet (768x1024) and desktop (1440x900)
+4. Fix Tailwind CSS responsive classes (sm:, md:, lg:) as needed
+5. Test fixes and commit changes
+
+Focus on these routes: /cos, /cos/tasks, /devtools, /providers`,
+
+ 'security': `[Self-Improvement] Security Audit
+
+Analyze PortOS codebase for security vulnerabilities:
+
+1. Review server/routes/*.js for:
+ - Command injection in exec/spawn calls
+ - Path traversal in file operations
+ - Missing input validation
+ - XSS in rendered content
+
+2. Review server/services/*.js for:
+ - Unsafe eval() or Function()
+ - Hardcoded credentials
+ - SQL/NoSQL injection
+
+3. Review client/src/ for:
+ - XSS vulnerabilities in React
+ - Sensitive data in localStorage
+ - CSRF protection
+
+4. Check server/lib/commandAllowlist.js is comprehensive
+
+Fix any vulnerabilities and commit with security advisory notes.`,
+
+ 'code-quality': `[Self-Improvement] Code Quality Review
+
+Analyze PortOS codebase for maintainability:
+
+1. Find DRY violations - similar code in multiple places
+2. Identify functions >50 lines that should be split
+3. Look for missing error handling
+4. Find dead code and unused imports
+5. Check for console.log that should be removed
+6. Look for TODO/FIXME that need addressing
+
+Focus on:
+- server/services/*.js
+- client/src/pages/*.jsx
+- client/src/components/*.jsx
+
+Refactor issues found and commit improvements.`,
+
+ 'console-errors': `[Self-Improvement] Console Error Investigation
+
+Use Playwright MCP to find and fix console errors:
+
+1. Navigate to http://localhost:5555/
+2. Call browser_console_messages with level: "error"
+3. Visit each route and capture errors:
+ - /, /apps, /cos, /cos/tasks, /cos/agents
+ - /devtools, /devtools/history, /devtools/runner
+ - /providers, /usage, /prompts
+
+4. For each error:
+ - Identify the source file and line
+ - Understand the root cause
+ - Implement a fix
+
+5. Test fixes and commit changes`,
+
+ 'performance': `[Self-Improvement] Performance Analysis
+
+Analyze PortOS for performance issues:
+
+1. Review React components for:
+ - Unnecessary re-renders
+ - Missing useMemo/useCallback
+ - Large component files that should be split
+
+2. Review server code for:
+ - N+1 query patterns
+ - Missing caching opportunities
+ - Inefficient file operations
+
+3. Review client bundle for:
+ - Missing code splitting
+ - Large dependencies that could be tree-shaken
+
+4. Check Socket.IO for:
+ - Event handler memory leaks
+ - Unnecessary broadcasts
+
+Optimize and commit improvements.`,
+
+ 'cos-enhancement': `[Self-Improvement] Enhance CoS Capabilities
+
+Review the CoS system and add new capabilities:
+
+1. Read data/COS-GOALS.md to understand the mission and goals
+2. Review server/services/cos.js for improvement opportunities:
+ - Better task prioritization logic
+ - Smarter model selection
+ - More informative status messages
+ - Better error recovery
+
+3. Review the self-improvement task prompts:
+ - Are they comprehensive enough?
+ - Do they lead to quality fixes?
+ - What new analysis types could be added?
+
+4. Consider adding:
+ - New MCP server integrations
+ - Better metrics tracking
+ - Learning from completed tasks
+ - Smarter cooldown logic
+
+5. Implement ONE meaningful enhancement and commit it
+
+Focus on making CoS more autonomous and effective.`,
+
+ 'test-coverage': `[Self-Improvement] Improve Test Coverage
+
+Analyze and improve test coverage for PortOS:
+
+1. Check existing tests in server/tests/ and client/tests/
+2. Identify untested critical paths:
+ - API routes without tests
+ - Services with complex logic
+ - Error handling paths
+
+3. Add tests for:
+ - CoS task evaluation logic
+ - Agent spawning and lifecycle
+ - Socket.IO event handlers
+ - API endpoints
+
+4. Ensure tests:
+ - Follow existing patterns
+ - Use appropriate mocks
+ - Test edge cases
+
+5. Run npm test to verify all tests pass
+6. Commit test additions with clear message describing what's covered`,
+
+ 'documentation': `[Self-Improvement] Update Documentation
+
+Review and improve PortOS documentation:
+
+1. Update PLAN.md:
+ - Mark completed milestones
+ - Add any new features implemented
+ - Document architectural decisions
+
+2. Check docs/ folder:
+ - Are all features documented?
+ - Is the information current?
+ - Add any missing guides
+
+3. Review code comments:
+ - Add JSDoc to exported functions
+ - Document complex algorithms
+ - Explain non-obvious code
+
+4. Update README.md if needed:
+ - Installation instructions
+ - Quick start guide
+ - Feature overview
+
+5. Consider adding:
+ - Architecture diagrams
+ - API documentation
+ - Troubleshooting guide
+
+Commit documentation improvements.`,
+
+ 'feature-ideas': `[Self-Improvement] Brainstorm and Implement Feature
+
+Think about ways to make PortOS more useful:
+
+1. Read data/COS-GOALS.md for context on user goals
+2. Review recent completed tasks to understand patterns
+3. Consider these areas:
+ - User task management improvements
+ - Better progress visualization
+ - New automation capabilities
+ - Enhanced monitoring features
+
+4. Choose ONE small, high-impact feature to implement:
+ - Something that saves user time
+ - Improves the developer experience
+ - Makes CoS more helpful
+
+5. Implement it:
+ - Write clean, tested code
+ - Follow existing patterns
+ - Update relevant documentation
+
+6. Commit with a clear description of the new feature
+
+Think creatively but implement practically.`,
+
+ 'accessibility': `[Self-Improvement] Accessibility Audit
+
+Use Playwright MCP to audit PortOS accessibility:
+
+1. Navigate to http://localhost:5555/
+2. Use browser_snapshot to get accessibility tree
+3. Check each main route for:
+ - Missing ARIA labels
+ - Missing alt text on images
+ - Insufficient color contrast
+ - Keyboard navigation issues
+ - Focus indicators
+
+4. Fix accessibility issues in React components
+5. Add appropriate aria-* attributes
+6. Test and commit changes`,
+
+ 'dependency-updates': `[Self-Improvement] Dependency Updates and Security Audit
+
+Check PortOS dependencies for updates and security vulnerabilities:
+
+1. Run npm audit in both server/ and client/ directories
+2. Check for outdated packages with npm outdated
+3. Review CRITICAL and HIGH severity vulnerabilities
+4. For each vulnerability:
+ - Assess the actual risk (is the vulnerable code path used?)
+ - Check if an update is available
+ - Test that updates don't break functionality
+
+5. Update dependencies carefully:
+ - Update patch versions first (safest)
+ - Then minor versions
+ - Major versions need more careful review
+
+6. After updating:
+ - Run npm test in server/
+ - Run npm run build in client/
+ - Verify the app starts correctly
+
+7. Commit with clear changelog of what was updated and why`
+};
+
// Default interval settings for self-improvement task types
const DEFAULT_SELF_IMPROVEMENT_INTERVALS = {
- 'ui-bugs': { type: INTERVAL_TYPES.ROTATION, enabled: true },
- 'mobile-responsive': { type: INTERVAL_TYPES.WEEKLY, enabled: true },
- 'security': { type: INTERVAL_TYPES.WEEKLY, enabled: true },
- 'code-quality': { type: INTERVAL_TYPES.ROTATION, enabled: true },
- 'console-errors': { type: INTERVAL_TYPES.ROTATION, enabled: true },
- 'performance': { type: INTERVAL_TYPES.WEEKLY, enabled: true },
- 'cos-enhancement': { type: INTERVAL_TYPES.ROTATION, enabled: true },
- 'test-coverage': { type: INTERVAL_TYPES.WEEKLY, enabled: true },
- 'documentation': { type: INTERVAL_TYPES.WEEKLY, enabled: true },
- 'feature-ideas': { type: INTERVAL_TYPES.DAILY, enabled: true },
- 'accessibility': { type: INTERVAL_TYPES.WEEKLY, enabled: true },
- 'dependency-updates': { type: INTERVAL_TYPES.WEEKLY, enabled: true }
+ 'ui-bugs': { type: INTERVAL_TYPES.ROTATION, enabled: true, providerId: null, model: null, prompt: null },
+ 'mobile-responsive': { type: INTERVAL_TYPES.WEEKLY, enabled: true, providerId: null, model: null, prompt: null },
+ 'security': { type: INTERVAL_TYPES.WEEKLY, enabled: true, providerId: null, model: null, prompt: null },
+ 'code-quality': { type: INTERVAL_TYPES.ROTATION, enabled: true, providerId: null, model: null, prompt: null },
+ 'console-errors': { type: INTERVAL_TYPES.ROTATION, enabled: true, providerId: null, model: null, prompt: null },
+ 'performance': { type: INTERVAL_TYPES.WEEKLY, enabled: true, providerId: null, model: null, prompt: null },
+ 'cos-enhancement': { type: INTERVAL_TYPES.ROTATION, enabled: true, providerId: null, model: null, prompt: null },
+ 'test-coverage': { type: INTERVAL_TYPES.WEEKLY, enabled: true, providerId: null, model: null, prompt: null },
+ 'documentation': { type: INTERVAL_TYPES.WEEKLY, enabled: true, providerId: null, model: null, prompt: null },
+ 'feature-ideas': { type: INTERVAL_TYPES.DAILY, enabled: true, providerId: null, model: null, prompt: null },
+ 'accessibility': { type: INTERVAL_TYPES.WEEKLY, enabled: true, providerId: null, model: null, prompt: null },
+ 'dependency-updates': { type: INTERVAL_TYPES.WEEKLY, enabled: true, providerId: null, model: null, prompt: null }
+};
+
+// Default prompts for app improvement task types (templates with {appName} and {repoPath} variables)
+const DEFAULT_APP_IMPROVEMENT_PROMPTS = {
+ 'security-audit': `[App Improvement: {appName}] Security Audit
+
+Analyze the {appName} codebase for security vulnerabilities:
+
+Repository: {repoPath}
+
+1. Review routes/controllers for:
+ - Command injection in exec/spawn calls
+ - Path traversal in file operations
+ - Missing input validation
+ - XSS vulnerabilities
+ - SQL/NoSQL injection
+
+2. Review services for:
+ - Unsafe eval() or Function()
+ - Hardcoded credentials
+ - Insecure dependencies
+
+3. Review client code for:
+ - XSS vulnerabilities
+ - Sensitive data in localStorage
+ - CSRF protection
+
+4. Check authentication and authorization:
+ - Secure password handling
+ - Token management
+ - Access control
+
+Fix any vulnerabilities found and commit with security advisory notes.`,
+
+ 'code-quality': `[App Improvement: {appName}] Code Quality Review
+
+Analyze {appName} for maintainability improvements:
+
+Repository: {repoPath}
+
+1. Find DRY violations - similar code in multiple places
+2. Identify functions >50 lines that should be split
+3. Look for missing error handling
+4. Find dead code and unused imports
+5. Check for console.log that should be removed
+6. Look for TODO/FIXME that need addressing
+7. Identify magic numbers that should be constants
+
+Focus on the main source directories. Refactor issues found and commit improvements.`,
+
+ 'test-coverage': `[App Improvement: {appName}] Improve Test Coverage
+
+Analyze and improve test coverage for {appName}:
+
+Repository: {repoPath}
+
+1. Check existing tests and identify untested critical paths
+2. Look for:
+ - API routes without tests
+ - Services with complex logic
+ - Error handling paths
+ - Edge cases
+
+3. Add tests following existing patterns in the project
+4. Ensure tests:
+ - Use appropriate mocks
+ - Test edge cases
+ - Follow naming conventions
+
+5. Run tests to verify all pass
+6. Commit test additions with clear message describing coverage`,
+
+ 'performance': `[App Improvement: {appName}] Performance Analysis
+
+Analyze {appName} for performance issues:
+
+Repository: {repoPath}
+
+1. Review components/views for:
+ - Unnecessary re-renders
+ - Missing memoization
+ - Large files that should be split
+
+2. Review backend for:
+ - N+1 query patterns
+ - Missing caching opportunities
+ - Inefficient file operations
+ - Slow API endpoints
+
+3. Review build/bundle for:
+ - Missing code splitting
+ - Large dependencies that could be optimized
+
+4. Check for:
+ - Memory leaks
+ - Unnecessary broadcasts/events
+
+Optimize and commit improvements.`,
+
+ 'accessibility': `[App Improvement: {appName}] Accessibility Audit
+
+Audit {appName} for accessibility issues:
+
+Repository: {repoPath}
+
+If the app has a web UI:
+1. Navigate to the app's UI
+2. Check for:
+ - Missing ARIA labels
+ - Missing alt text on images
+ - Insufficient color contrast
+ - Keyboard navigation issues
+ - Focus indicators
+ - Semantic HTML usage
+
+3. Fix accessibility issues in components
+4. Add appropriate aria-* attributes
+5. Test and commit changes`,
+
+ 'console-errors': `[App Improvement: {appName}] Console Error Investigation
+
+Find and fix console errors in {appName}:
+
+Repository: {repoPath}
+
+1. If the app has a UI, check browser console for errors
+2. Check server logs for errors
+3. For each error:
+ - Identify the source file and line
+ - Understand the root cause
+ - Implement a fix
+
+4. Test fixes and commit changes`,
+
+ 'dependency-updates': `[App Improvement: {appName}] Dependency Updates
+
+Check {appName} dependencies for updates and security vulnerabilities:
+
+Repository: {repoPath}
+
+1. Run npm audit (or equivalent package manager)
+2. Check for outdated packages
+3. Review CRITICAL and HIGH severity vulnerabilities
+4. For each vulnerability:
+ - Assess actual risk
+ - Check if update available
+ - Test updates don't break functionality
+
+5. Update dependencies carefully:
+ - Patch versions first (safest)
+ - Then minor versions
+ - Major versions need careful review
+
+6. After updating:
+ - Run tests
+ - Verify the app starts correctly
+
+7. Commit with clear changelog
+
+IMPORTANT: Only update one major version bump at a time.`,
+
+ 'documentation': `[App Improvement: {appName}] Update Documentation
+
+Review and improve {appName} documentation:
+
+Repository: {repoPath}
+
+1. Check README.md:
+ - Installation instructions current?
+ - Quick start guide clear?
+ - Feature overview complete?
+
+2. Review inline documentation:
+ - Add JSDoc to exported functions
+ - Document complex algorithms
+ - Explain non-obvious code
+
+3. Check for docs/ folder:
+ - Are all features documented?
+ - Is information current?
+ - Add missing guides if needed
+
+4. Update PLAN.md or similar if present:
+ - Mark completed milestones
+ - Document architectural decisions
+
+Commit documentation improvements.`,
+
+ 'error-handling': `[App Improvement: {appName}] Improve Error Handling
+
+Enhance error handling in {appName}:
+
+Repository: {repoPath}
+
+1. Review code for:
+ - Missing try-catch blocks where needed
+ - Silent failures (empty catch blocks)
+ - Errors that should be logged
+ - User-facing error messages
+
+2. Add error handling for:
+ - Network requests
+ - File operations
+ - Database queries
+ - External API calls
+
+3. Ensure errors are:
+ - Logged appropriately
+ - Have clear messages
+ - Include relevant context
+ - Don't expose sensitive data
+
+4. Test error paths and commit improvements`,
+
+ 'typing': `[App Improvement: {appName}] TypeScript Type Improvements
+
+Improve TypeScript types in {appName}:
+
+Repository: {repoPath}
+
+1. Review TypeScript files for:
+ - 'any' types that should be specific
+ - Missing type annotations
+ - Type assertions that could be avoided
+ - Missing interfaces/types for objects
+
+2. Add types for:
+ - Function parameters and returns
+ - Component props
+ - API responses
+ - Configuration objects
+
+3. Ensure:
+ - Types are properly exported
+ - No implicit any
+ - Types are reusable
+
+4. Run type checking and commit improvements`
};
// Default interval settings for managed app improvement task types
const DEFAULT_APP_IMPROVEMENT_INTERVALS = {
- 'security-audit': { type: INTERVAL_TYPES.WEEKLY, enabled: true },
- 'code-quality': { type: INTERVAL_TYPES.ROTATION, enabled: true },
- 'test-coverage': { type: INTERVAL_TYPES.WEEKLY, enabled: true },
- 'performance': { type: INTERVAL_TYPES.WEEKLY, enabled: true },
- 'accessibility': { type: INTERVAL_TYPES.ONCE, enabled: true },
- 'console-errors': { type: INTERVAL_TYPES.ROTATION, enabled: true },
- 'dependency-updates': { type: INTERVAL_TYPES.WEEKLY, enabled: true },
- 'documentation': { type: INTERVAL_TYPES.ONCE, enabled: true },
- 'error-handling': { type: INTERVAL_TYPES.ROTATION, enabled: true },
- 'typing': { type: INTERVAL_TYPES.ONCE, enabled: true }
+ 'security-audit': { type: INTERVAL_TYPES.WEEKLY, enabled: true, providerId: null, model: null, prompt: null },
+ 'code-quality': { type: INTERVAL_TYPES.ROTATION, enabled: true, providerId: null, model: null, prompt: null },
+ 'test-coverage': { type: INTERVAL_TYPES.WEEKLY, enabled: true, providerId: null, model: null, prompt: null },
+ 'performance': { type: INTERVAL_TYPES.WEEKLY, enabled: true, providerId: null, model: null, prompt: null },
+ 'accessibility': { type: INTERVAL_TYPES.ONCE, enabled: true, providerId: null, model: null, prompt: null },
+ 'console-errors': { type: INTERVAL_TYPES.ROTATION, enabled: true, providerId: null, model: null, prompt: null },
+ 'dependency-updates': { type: INTERVAL_TYPES.WEEKLY, enabled: true, providerId: null, model: null, prompt: null },
+ 'documentation': { type: INTERVAL_TYPES.ONCE, enabled: true, providerId: null, model: null, prompt: null },
+ 'error-handling': { type: INTERVAL_TYPES.ROTATION, enabled: true, providerId: null, model: null, prompt: null },
+ 'typing': { type: INTERVAL_TYPES.ONCE, enabled: true, providerId: null, model: null, prompt: null }
};
/**
@@ -113,15 +617,13 @@ async function ensureDir() {
export async function loadSchedule() {
await ensureDir();
- if (!existsSync(SCHEDULE_FILE)) {
+ const loaded = await readJSONFile(SCHEDULE_FILE, null);
+ if (!loaded) {
return { ...DEFAULT_SCHEDULE };
}
- const content = await readFile(SCHEDULE_FILE, 'utf-8');
- const loaded = JSON.parse(content);
-
// Merge with defaults to ensure all task types have settings
- return {
+ const schedule = {
...DEFAULT_SCHEDULE,
...loaded,
selfImprovement: {
@@ -135,6 +637,29 @@ export async function loadSchedule() {
executions: loaded.executions || {},
templates: loaded.templates || []
};
+
+ // Populate prompts from defaults if they don't exist
+ let needsSave = false;
+ for (const [taskType, config] of Object.entries(schedule.selfImprovement)) {
+ if (!config.prompt && DEFAULT_SELF_IMPROVEMENT_PROMPTS[taskType]) {
+ config.prompt = DEFAULT_SELF_IMPROVEMENT_PROMPTS[taskType];
+ needsSave = true;
+ }
+ }
+
+ for (const [taskType, config] of Object.entries(schedule.appImprovement)) {
+ if (!config.prompt && DEFAULT_APP_IMPROVEMENT_PROMPTS[taskType]) {
+ config.prompt = DEFAULT_APP_IMPROVEMENT_PROMPTS[taskType];
+ needsSave = true;
+ }
+ }
+
+ // Save if we populated any prompts
+ if (needsSave) {
+ await saveSchedule(schedule);
+ }
+
+ return schedule;
}
/**
@@ -151,7 +676,7 @@ async function saveSchedule(schedule) {
*/
export async function getSelfImprovementInterval(taskType) {
const schedule = await loadSchedule();
- return schedule.selfImprovement[taskType] || { type: INTERVAL_TYPES.ROTATION, enabled: true };
+ return schedule.selfImprovement[taskType] || { type: INTERVAL_TYPES.ROTATION, enabled: true, providerId: null, model: null };
}
/**
@@ -159,7 +684,7 @@ export async function getSelfImprovementInterval(taskType) {
*/
export async function getAppImprovementInterval(taskType) {
const schedule = await loadSchedule();
- return schedule.appImprovement[taskType] || { type: INTERVAL_TYPES.ROTATION, enabled: true };
+ return schedule.appImprovement[taskType] || { type: INTERVAL_TYPES.ROTATION, enabled: true, providerId: null, model: null };
}
/**
@@ -169,7 +694,7 @@ export async function updateSelfImprovementInterval(taskType, settings) {
const schedule = await loadSchedule();
if (!schedule.selfImprovement[taskType]) {
- schedule.selfImprovement[taskType] = { type: INTERVAL_TYPES.ROTATION, enabled: true };
+ schedule.selfImprovement[taskType] = { type: INTERVAL_TYPES.ROTATION, enabled: true, providerId: null, model: null };
}
schedule.selfImprovement[taskType] = {
@@ -178,7 +703,7 @@ export async function updateSelfImprovementInterval(taskType, settings) {
};
await saveSchedule(schedule);
- emitLog('info', `Updated self-improvement interval for ${taskType}`, { taskType, settings }, '[TaskSchedule]');
+ emitLog('info', `Updated self-improvement interval for ${taskType}`, { taskType, settings }, '๐
TaskSchedule');
cosEvents.emit('schedule:changed', { category: 'selfImprovement', taskType, settings });
return schedule.selfImprovement[taskType];
@@ -191,7 +716,7 @@ export async function updateAppImprovementInterval(taskType, settings) {
const schedule = await loadSchedule();
if (!schedule.appImprovement[taskType]) {
- schedule.appImprovement[taskType] = { type: INTERVAL_TYPES.ROTATION, enabled: true };
+ schedule.appImprovement[taskType] = { type: INTERVAL_TYPES.ROTATION, enabled: true, providerId: null, model: null };
}
schedule.appImprovement[taskType] = {
@@ -200,7 +725,7 @@ export async function updateAppImprovementInterval(taskType, settings) {
};
await saveSchedule(schedule);
- emitLog('info', `Updated app improvement interval for ${taskType}`, { taskType, settings }, '[TaskSchedule]');
+ emitLog('info', `Updated app improvement interval for ${taskType}`, { taskType, settings }, '๐
TaskSchedule');
cosEvents.emit('schedule:changed', { category: 'appImprovement', taskType, settings });
return schedule.appImprovement[taskType];
@@ -525,7 +1050,7 @@ export async function addTemplateTask(template) {
schedule.templates.push(newTemplate);
await saveSchedule(schedule);
- emitLog('info', `Added template task: ${newTemplate.name}`, { templateId: newTemplate.id }, '[TaskSchedule]');
+ emitLog('info', `Added template task: ${newTemplate.name}`, { templateId: newTemplate.id }, '๐
TaskSchedule');
return newTemplate;
}
@@ -551,7 +1076,7 @@ export async function deleteTemplateTask(templateId) {
const deleted = schedule.templates.splice(index, 1)[0];
await saveSchedule(schedule);
- emitLog('info', `Deleted template task: ${deleted.name}`, { templateId }, '[TaskSchedule]');
+ emitLog('info', `Deleted template task: ${deleted.name}`, { templateId }, '๐
TaskSchedule');
return { success: true, deleted };
}
@@ -579,7 +1104,7 @@ export async function triggerOnDemandTask(taskType, category = 'selfImprovement'
schedule.onDemandRequests.push(request);
await saveSchedule(schedule);
- emitLog('info', `On-demand task requested: ${taskType}`, { category, appId }, '[TaskSchedule]');
+ emitLog('info', `On-demand task requested: ${taskType}`, { category, appId }, '๐
TaskSchedule');
cosEvents.emit('task:on-demand-requested', request);
return request;
@@ -673,7 +1198,42 @@ export async function resetExecutionHistory(taskType, category = 'selfImprovemen
}
await saveSchedule(schedule);
- emitLog('info', `Reset execution history for ${taskType}`, { category, appId }, '[TaskSchedule]');
+ emitLog('info', `Reset execution history for ${taskType}`, { category, appId }, '๐
TaskSchedule');
return { success: true, taskType, appId };
}
+
+/**
+ * Get default prompt for a self-improvement task type
+ */
+export function getDefaultSelfImprovementPrompt(taskType) {
+ return DEFAULT_SELF_IMPROVEMENT_PROMPTS[taskType] || null;
+}
+
+/**
+ * Get default prompt for an app improvement task type (with template variables)
+ */
+export function getDefaultAppImprovementPrompt(taskType) {
+ return DEFAULT_APP_IMPROVEMENT_PROMPTS[taskType] || null;
+}
+
+/**
+ * Get the prompt for a self-improvement task type
+ */
+export async function getSelfImprovementPrompt(taskType) {
+ const interval = await getSelfImprovementInterval(taskType);
+ return interval.prompt || `[Self-Improvement] ${taskType} analysis`;
+}
+
+/**
+ * Get the prompt for an app improvement task type
+ */
+export async function getAppImprovementPrompt(taskType) {
+ const interval = await getAppImprovementInterval(taskType);
+ return interval.prompt || `[App Improvement] ${taskType} analysis
+
+Repository: {repoPath}
+
+Perform ${taskType} analysis on {appName}.
+Analyze the codebase and make improvements. Commit changes with clear descriptions.`;
+}
diff --git a/server/services/thinkingLevels.js b/server/services/thinkingLevels.js
new file mode 100644
index 0000000..c40490c
--- /dev/null
+++ b/server/services/thinkingLevels.js
@@ -0,0 +1,344 @@
+/**
+ * Thinking Levels Service
+ *
+ * Dynamic model selection based on thinking levels.
+ * Hierarchy: task โ hooks โ agent โ provider defaults
+ */
+
+import { cosEvents } from './cosEvents.js'
+
+// Thinking level definitions
+const THINKING_LEVELS = {
+ off: {
+ name: 'off',
+ model: null,
+ maxTokens: 0,
+ description: 'No extended thinking, use defaults'
+ },
+ minimal: {
+ name: 'minimal',
+ model: 'local-small',
+ maxTokens: 256,
+ localPreferred: true,
+ description: 'Quick local analysis only'
+ },
+ low: {
+ name: 'low',
+ model: 'local-medium',
+ maxTokens: 1024,
+ localPreferred: true,
+ description: 'Basic reasoning with local model'
+ },
+ medium: {
+ name: 'medium',
+ model: 'provider-default',
+ maxTokens: 4096,
+ localPreferred: false,
+ description: 'Standard cloud model thinking'
+ },
+ high: {
+ name: 'high',
+ model: 'provider-heavy',
+ maxTokens: 8192,
+ localPreferred: false,
+ description: 'Advanced reasoning with heavy model'
+ },
+ xhigh: {
+ name: 'xhigh',
+ model: 'opus',
+ maxTokens: 16384,
+ localPreferred: false,
+ description: 'Maximum reasoning with Opus'
+ }
+}
+
+// Default thresholds for automatic level selection
+const AUTO_THRESHOLDS = {
+ contextLength: {
+ minimal: 500,
+ low: 1000,
+ medium: 3000,
+ high: 6000,
+ xhigh: 10000
+ },
+ complexity: {
+ minimal: 0.2,
+ low: 0.4,
+ medium: 0.6,
+ high: 0.8,
+ xhigh: 0.95
+ }
+}
+
+// Task type to default level mapping
+const TASK_TYPE_LEVELS = {
+ // Simple tasks
+ 'format': 'minimal',
+ 'rename': 'minimal',
+ 'typo': 'minimal',
+ 'comment': 'low',
+
+ // Medium tasks
+ 'fix-bug': 'medium',
+ 'implement': 'medium',
+ 'update': 'medium',
+
+ // Complex tasks
+ 'refactor': 'high',
+ 'security': 'high',
+ 'optimize': 'high',
+ 'architect': 'xhigh',
+ 'audit': 'xhigh',
+ 'migration': 'xhigh'
+}
+
+// Usage tracking
+const levelUsage = {
+ off: 0,
+ minimal: 0,
+ low: 0,
+ medium: 0,
+ high: 0,
+ xhigh: 0
+}
+
+/**
+ * Resolve thinking level for a task
+ * Checks hierarchy: task metadata โ hooks โ agent config โ provider defaults
+ *
+ * @param {Object} task - Task object
+ * @param {Object} agent - Agent configuration
+ * @param {Object} provider - Provider configuration
+ * @returns {Object} - Resolved thinking level configuration
+ */
+function resolveThinkingLevel(task, agent = {}, provider = {}) {
+ let level = 'medium' // Default
+
+ // 1. Check task metadata for explicit level
+ if (task?.metadata?.thinkingLevel) {
+ level = task.metadata.thinkingLevel
+ }
+ // 2. Check task priority
+ else if (task?.priority) {
+ const priority = task.priority.toUpperCase()
+ if (priority === 'URGENT' || priority === 'CRITICAL') {
+ level = 'high'
+ } else if (priority === 'LOW' || priority === 'IDLE') {
+ level = 'low'
+ }
+ }
+ // 3. Check task type
+ else if (task?.metadata?.taskType) {
+ level = TASK_TYPE_LEVELS[task.metadata.taskType] || level
+ }
+ // 4. Check agent default
+ else if (agent?.defaultThinkingLevel) {
+ level = agent.defaultThinkingLevel
+ }
+ // 5. Check provider default
+ else if (provider?.defaultThinkingLevel) {
+ level = provider.defaultThinkingLevel
+ }
+
+ // Validate level
+ if (!THINKING_LEVELS[level]) {
+ level = 'medium'
+ }
+
+ // Track usage
+ levelUsage[level]++
+
+ const config = THINKING_LEVELS[level]
+
+ cosEvents.emit('thinking:levelResolved', {
+ taskId: task?.id,
+ level,
+ model: config.model,
+ source: determineSource(task, agent, provider)
+ })
+
+ return {
+ level,
+ ...config,
+ resolvedFrom: determineSource(task, agent, provider)
+ }
+}
+
+/**
+ * Determine where the level was resolved from
+ */
+function determineSource(task, agent, provider) {
+ if (task?.metadata?.thinkingLevel) return 'task'
+ if (task?.priority) return 'priority'
+ if (task?.metadata?.taskType) return 'taskType'
+ if (agent?.defaultThinkingLevel) return 'agent'
+ if (provider?.defaultThinkingLevel) return 'provider'
+ return 'default'
+}
+
+/**
+ * Suggest thinking level based on task analysis
+ * @param {Object} analysis - Task analysis from localThinking
+ * @returns {string} - Suggested level
+ */
+function suggestLevel(analysis) {
+ const complexity = analysis.complexity || 0.5
+
+ // Find appropriate level based on complexity
+ for (const [level, threshold] of Object.entries(AUTO_THRESHOLDS.complexity).reverse()) {
+ if (complexity >= threshold) {
+ return level
+ }
+ }
+
+ return 'minimal'
+}
+
+/**
+ * Suggest thinking level based on context length
+ * @param {number} contextLength - Context length in characters
+ * @returns {string} - Suggested level
+ */
+function suggestLevelFromContext(contextLength) {
+ for (const [level, threshold] of Object.entries(AUTO_THRESHOLDS.contextLength).reverse()) {
+ if (contextLength >= threshold) {
+ return level
+ }
+ }
+
+ return 'minimal'
+}
+
+/**
+ * Get model for a thinking level
+ * @param {string} level - Thinking level name
+ * @param {Object} provider - Provider config for model mapping
+ * @returns {string|null} - Model identifier
+ */
+function getModelForLevel(level, provider = {}) {
+ const config = THINKING_LEVELS[level]
+ if (!config) return null
+
+ const modelKey = config.model
+
+ switch (modelKey) {
+ case null:
+ return provider.defaultModel || null
+ case 'local-small':
+ return 'lmstudio' // Will use LM Studio
+ case 'local-medium':
+ return 'lmstudio'
+ case 'provider-default':
+ return provider.defaultModel || 'claude-3-5-sonnet-20241022'
+ case 'provider-heavy':
+ return provider.heavyModel || 'claude-3-5-sonnet-20241022'
+ case 'opus':
+ return 'claude-opus-4-20250514'
+ default:
+ return modelKey
+ }
+}
+
+/**
+ * Check if level prefers local execution
+ * @param {string} level - Thinking level
+ * @returns {boolean} - True if local preferred
+ */
+function isLocalPreferred(level) {
+ const config = THINKING_LEVELS[level]
+ return config?.localPreferred || false
+}
+
+/**
+ * Upgrade thinking level by one step
+ * @param {string} currentLevel - Current level
+ * @returns {string} - Upgraded level
+ */
+function upgradeLevel(currentLevel) {
+ const levels = Object.keys(THINKING_LEVELS)
+ const currentIndex = levels.indexOf(currentLevel)
+
+ if (currentIndex === -1) return 'medium'
+ if (currentIndex >= levels.length - 1) return currentLevel
+
+ return levels[currentIndex + 1]
+}
+
+/**
+ * Downgrade thinking level by one step
+ * @param {string} currentLevel - Current level
+ * @returns {string} - Downgraded level
+ */
+function downgradeLevel(currentLevel) {
+ const levels = Object.keys(THINKING_LEVELS)
+ const currentIndex = levels.indexOf(currentLevel)
+
+ if (currentIndex === -1) return 'medium'
+ if (currentIndex <= 0) return currentLevel
+
+ return levels[currentIndex - 1]
+}
+
+/**
+ * Get thinking level statistics
+ * @returns {Object} - Usage statistics
+ */
+function getStats() {
+ const total = Object.values(levelUsage).reduce((a, b) => a + b, 0)
+
+ return {
+ usage: { ...levelUsage },
+ total,
+ distribution: Object.entries(levelUsage).reduce((acc, [level, count]) => {
+ acc[level] = total > 0 ? ((count / total) * 100).toFixed(1) + '%' : '0%'
+ return acc
+ }, {}),
+ levels: Object.keys(THINKING_LEVELS),
+ thresholds: AUTO_THRESHOLDS
+ }
+}
+
+/**
+ * Reset usage statistics
+ */
+function resetStats() {
+ for (const level of Object.keys(levelUsage)) {
+ levelUsage[level] = 0
+ }
+}
+
+/**
+ * Update auto thresholds
+ * @param {string} thresholdType - 'contextLength' or 'complexity'
+ * @param {Object} newThresholds - New threshold values
+ */
+function updateThresholds(thresholdType, newThresholds) {
+ if (AUTO_THRESHOLDS[thresholdType]) {
+ Object.assign(AUTO_THRESHOLDS[thresholdType], newThresholds)
+ }
+}
+
+/**
+ * Get all thinking levels
+ * @returns {Object} - All level configurations
+ */
+function getLevels() {
+ return { ...THINKING_LEVELS }
+}
+
+export {
+ THINKING_LEVELS,
+ AUTO_THRESHOLDS,
+ TASK_TYPE_LEVELS,
+ resolveThinkingLevel,
+ suggestLevel,
+ suggestLevelFromContext,
+ getModelForLevel,
+ isLocalPreferred,
+ upgradeLevel,
+ downgradeLevel,
+ getStats,
+ resetStats,
+ updateThresholds,
+ getLevels
+}
diff --git a/server/services/thinkingLevels.test.js b/server/services/thinkingLevels.test.js
new file mode 100644
index 0000000..2c3bf8d
--- /dev/null
+++ b/server/services/thinkingLevels.test.js
@@ -0,0 +1,326 @@
+import { describe, it, expect, beforeEach, vi } from 'vitest';
+import {
+ THINKING_LEVELS,
+ AUTO_THRESHOLDS,
+ TASK_TYPE_LEVELS,
+ resolveThinkingLevel,
+ suggestLevel,
+ suggestLevelFromContext,
+ getModelForLevel,
+ isLocalPreferred,
+ upgradeLevel,
+ downgradeLevel,
+ getStats,
+ resetStats,
+ getLevels
+} from './thinkingLevels.js';
+
+// Mock the cosEvents
+vi.mock('./cos.js', () => ({
+ cosEvents: {
+ emit: vi.fn()
+ }
+}));
+
+describe('Thinking Levels Service', () => {
+ beforeEach(() => {
+ resetStats();
+ });
+
+ describe('THINKING_LEVELS', () => {
+ it('should have all required levels', () => {
+ expect(THINKING_LEVELS.off).toBeDefined();
+ expect(THINKING_LEVELS.minimal).toBeDefined();
+ expect(THINKING_LEVELS.low).toBeDefined();
+ expect(THINKING_LEVELS.medium).toBeDefined();
+ expect(THINKING_LEVELS.high).toBeDefined();
+ expect(THINKING_LEVELS.xhigh).toBeDefined();
+ });
+
+ it('should have increasing maxTokens as level increases', () => {
+ expect(THINKING_LEVELS.minimal.maxTokens).toBeLessThan(THINKING_LEVELS.low.maxTokens);
+ expect(THINKING_LEVELS.low.maxTokens).toBeLessThan(THINKING_LEVELS.medium.maxTokens);
+ expect(THINKING_LEVELS.medium.maxTokens).toBeLessThan(THINKING_LEVELS.high.maxTokens);
+ expect(THINKING_LEVELS.high.maxTokens).toBeLessThan(THINKING_LEVELS.xhigh.maxTokens);
+ });
+
+ it('should mark minimal and low as localPreferred', () => {
+ expect(THINKING_LEVELS.minimal.localPreferred).toBe(true);
+ expect(THINKING_LEVELS.low.localPreferred).toBe(true);
+ expect(THINKING_LEVELS.medium.localPreferred).toBe(false);
+ });
+ });
+
+ describe('AUTO_THRESHOLDS', () => {
+ it('should have contextLength thresholds', () => {
+ expect(AUTO_THRESHOLDS.contextLength).toBeDefined();
+ expect(AUTO_THRESHOLDS.contextLength.minimal).toBeDefined();
+ expect(AUTO_THRESHOLDS.contextLength.xhigh).toBeDefined();
+ });
+
+ it('should have complexity thresholds', () => {
+ expect(AUTO_THRESHOLDS.complexity).toBeDefined();
+ expect(AUTO_THRESHOLDS.complexity.minimal).toBeLessThan(AUTO_THRESHOLDS.complexity.xhigh);
+ });
+ });
+
+ describe('TASK_TYPE_LEVELS', () => {
+ it('should map simple tasks to low levels', () => {
+ expect(TASK_TYPE_LEVELS.format).toBe('minimal');
+ expect(TASK_TYPE_LEVELS.typo).toBe('minimal');
+ });
+
+ it('should map complex tasks to high levels', () => {
+ expect(TASK_TYPE_LEVELS.architect).toBe('xhigh');
+ expect(TASK_TYPE_LEVELS.audit).toBe('xhigh');
+ });
+ });
+
+ describe('resolveThinkingLevel', () => {
+ it('should use task metadata thinkingLevel if present', () => {
+ const task = { id: 'task-1', metadata: { thinkingLevel: 'high' } };
+ const result = resolveThinkingLevel(task);
+
+ expect(result.level).toBe('high');
+ expect(result.resolvedFrom).toBe('task');
+ });
+
+ it('should use priority for CRITICAL/URGENT tasks', () => {
+ const task = { id: 'task-1', priority: 'CRITICAL' };
+ const result = resolveThinkingLevel(task);
+
+ expect(result.level).toBe('high');
+ expect(result.resolvedFrom).toBe('priority');
+ });
+
+ it('should use priority for LOW tasks', () => {
+ const task = { id: 'task-1', priority: 'LOW' };
+ const result = resolveThinkingLevel(task);
+
+ expect(result.level).toBe('low');
+ });
+
+ it('should use taskType mapping', () => {
+ const task = { id: 'task-1', metadata: { taskType: 'architect' } };
+ const result = resolveThinkingLevel(task);
+
+ expect(result.level).toBe('xhigh');
+ });
+
+ it('should use agent default if no task-level config', () => {
+ const task = { id: 'task-1' };
+ const agent = { defaultThinkingLevel: 'low' };
+ const result = resolveThinkingLevel(task, agent);
+
+ expect(result.level).toBe('low');
+ expect(result.resolvedFrom).toBe('agent');
+ });
+
+ it('should use provider default as fallback', () => {
+ const task = { id: 'task-1' };
+ const provider = { defaultThinkingLevel: 'minimal' };
+ const result = resolveThinkingLevel(task, {}, provider);
+
+ expect(result.level).toBe('minimal');
+ expect(result.resolvedFrom).toBe('provider');
+ });
+
+ it('should default to medium if nothing specified', () => {
+ const task = { id: 'task-1' };
+ const result = resolveThinkingLevel(task);
+
+ expect(result.level).toBe('medium');
+ expect(result.resolvedFrom).toBe('default');
+ });
+
+ it('should fallback to medium for invalid levels', () => {
+ const task = { id: 'task-1', metadata: { thinkingLevel: 'invalid-level' } };
+ const result = resolveThinkingLevel(task);
+
+ expect(result.level).toBe('medium');
+ });
+
+ it('should include level configuration in result', () => {
+ const task = { id: 'task-1', metadata: { thinkingLevel: 'high' } };
+ const result = resolveThinkingLevel(task);
+
+ expect(result.model).toBe('provider-heavy');
+ expect(result.maxTokens).toBe(8192);
+ });
+ });
+
+ describe('suggestLevel', () => {
+ it('should suggest higher levels for higher complexity', () => {
+ const lowComplexity = suggestLevel({ complexity: 0.2 });
+ const highComplexity = suggestLevel({ complexity: 0.9 });
+
+ expect(['minimal', 'low'].includes(lowComplexity)).toBe(true);
+ expect(['high', 'xhigh'].includes(highComplexity)).toBe(true);
+ });
+
+ it('should return minimal for very low complexity', () => {
+ const result = suggestLevel({ complexity: 0.1 });
+ expect(result).toBe('minimal');
+ });
+
+ it('should handle missing complexity', () => {
+ const result = suggestLevel({});
+ expect(result).toBeDefined();
+ });
+ });
+
+ describe('suggestLevelFromContext', () => {
+ it('should suggest minimal for short context', () => {
+ const result = suggestLevelFromContext(100);
+ expect(result).toBe('minimal');
+ });
+
+ it('should suggest higher levels for longer context', () => {
+ const short = suggestLevelFromContext(500);
+ const long = suggestLevelFromContext(10000);
+
+ expect(['minimal', 'low'].includes(short)).toBe(true);
+ expect(long).toBe('xhigh');
+ });
+ });
+
+ describe('getModelForLevel', () => {
+ it('should return lmstudio for local levels', () => {
+ expect(getModelForLevel('minimal')).toBe('lmstudio');
+ expect(getModelForLevel('low')).toBe('lmstudio');
+ });
+
+ it('should return provider default for medium', () => {
+ const provider = { defaultModel: 'custom-model' };
+ expect(getModelForLevel('medium', provider)).toBe('custom-model');
+ });
+
+ it('should return provider heavy for high', () => {
+ const provider = { heavyModel: 'heavy-model' };
+ expect(getModelForLevel('high', provider)).toBe('heavy-model');
+ });
+
+ it('should return opus for xhigh', () => {
+ expect(getModelForLevel('xhigh')).toBe('claude-opus-4-20250514');
+ });
+
+ it('should return null for invalid level', () => {
+ expect(getModelForLevel('invalid')).toBeNull();
+ });
+
+ it('should return provider default when level is off', () => {
+ const provider = { defaultModel: 'default' };
+ expect(getModelForLevel('off', provider)).toBe('default');
+ });
+ });
+
+ describe('isLocalPreferred', () => {
+ it('should return true for minimal and low', () => {
+ expect(isLocalPreferred('minimal')).toBe(true);
+ expect(isLocalPreferred('low')).toBe(true);
+ });
+
+ it('should return false for medium and above', () => {
+ expect(isLocalPreferred('medium')).toBe(false);
+ expect(isLocalPreferred('high')).toBe(false);
+ expect(isLocalPreferred('xhigh')).toBe(false);
+ });
+
+ it('should return false for invalid level', () => {
+ expect(isLocalPreferred('invalid')).toBe(false);
+ });
+ });
+
+ describe('upgradeLevel', () => {
+ it('should upgrade to next level', () => {
+ expect(upgradeLevel('minimal')).toBe('low');
+ expect(upgradeLevel('low')).toBe('medium');
+ expect(upgradeLevel('medium')).toBe('high');
+ expect(upgradeLevel('high')).toBe('xhigh');
+ });
+
+ it('should not upgrade beyond xhigh', () => {
+ expect(upgradeLevel('xhigh')).toBe('xhigh');
+ });
+
+ it('should return medium for invalid level', () => {
+ expect(upgradeLevel('invalid')).toBe('medium');
+ });
+ });
+
+ describe('downgradeLevel', () => {
+ it('should downgrade to previous level', () => {
+ expect(downgradeLevel('xhigh')).toBe('high');
+ expect(downgradeLevel('high')).toBe('medium');
+ expect(downgradeLevel('medium')).toBe('low');
+ expect(downgradeLevel('low')).toBe('minimal');
+ });
+
+ it('should not downgrade below off', () => {
+ expect(downgradeLevel('off')).toBe('off');
+ });
+
+ it('should return medium for invalid level', () => {
+ expect(downgradeLevel('invalid')).toBe('medium');
+ });
+ });
+
+ describe('getStats', () => {
+ it('should return usage statistics', () => {
+ resolveThinkingLevel({ id: 't1', metadata: { thinkingLevel: 'high' } });
+ resolveThinkingLevel({ id: 't2', metadata: { thinkingLevel: 'high' } });
+ resolveThinkingLevel({ id: 't3', metadata: { thinkingLevel: 'low' } });
+
+ const stats = getStats();
+
+ expect(stats.usage.high).toBe(2);
+ expect(stats.usage.low).toBe(1);
+ expect(stats.total).toBe(3);
+ });
+
+ it('should calculate distribution percentages', () => {
+ resolveThinkingLevel({ id: 't1', metadata: { thinkingLevel: 'medium' } });
+ resolveThinkingLevel({ id: 't2', metadata: { thinkingLevel: 'medium' } });
+
+ const stats = getStats();
+
+ expect(stats.distribution.medium).toBe('100.0%');
+ });
+
+ it('should return all levels', () => {
+ const stats = getStats();
+ expect(stats.levels).toContain('off');
+ expect(stats.levels).toContain('xhigh');
+ });
+ });
+
+ describe('resetStats', () => {
+ it('should reset all usage counters to 0', () => {
+ resolveThinkingLevel({ id: 't1', metadata: { thinkingLevel: 'high' } });
+ resetStats();
+
+ const stats = getStats();
+ expect(stats.total).toBe(0);
+ expect(stats.usage.high).toBe(0);
+ });
+ });
+
+ describe('getLevels', () => {
+ it('should return all level configurations', () => {
+ const levels = getLevels();
+
+ expect(levels.off).toBeDefined();
+ expect(levels.xhigh).toBeDefined();
+ expect(Object.keys(levels).length).toBe(6);
+ });
+
+ it('should return all level configurations', () => {
+ const levels = getLevels();
+
+ // Verify it returns a new object (shallow copy)
+ const levels2 = getLevels();
+ expect(levels).not.toBe(levels2); // Different object references
+ expect(levels.off).toEqual(levels2.off); // Same content
+ });
+ });
+});
diff --git a/server/services/toolStateMachine.js b/server/services/toolStateMachine.js
new file mode 100644
index 0000000..2ae6213
--- /dev/null
+++ b/server/services/toolStateMachine.js
@@ -0,0 +1,431 @@
+/**
+ * Tool State Machine
+ *
+ * Manages tool execution lifecycle with defined states and transitions.
+ * Provides structured error recovery and execution tracking.
+ */
+
+import { v4 as uuidv4 } from 'uuid'
+import { cosEvents } from './cosEvents.js'
+
+// Tool execution states
+const STATES = {
+ IDLE: 'idle',
+ START: 'start',
+ RUNNING: 'running',
+ UPDATE: 'update',
+ END: 'end',
+ ERROR: 'error',
+ RECOVERED: 'recovered'
+}
+
+// Valid state transitions
+const TRANSITIONS = {
+ [STATES.IDLE]: [STATES.START],
+ [STATES.START]: [STATES.RUNNING, STATES.ERROR],
+ [STATES.RUNNING]: [STATES.UPDATE, STATES.END, STATES.ERROR],
+ [STATES.UPDATE]: [STATES.RUNNING, STATES.END, STATES.ERROR],
+ [STATES.END]: [],
+ [STATES.ERROR]: [STATES.RECOVERED, STATES.END],
+ [STATES.RECOVERED]: [STATES.RUNNING, STATES.ERROR]
+}
+
+// In-memory execution storage
+const executions = new Map()
+
+// Execution history (limited to last 1000)
+const executionHistory = []
+const MAX_HISTORY = 1000
+
+/**
+ * Create a new tool execution
+ * @param {string} toolId - Tool identifier
+ * @param {string} agentId - Agent running the tool
+ * @param {Object} metadata - Additional execution metadata
+ * @returns {Object} - Execution object
+ */
+function createToolExecution(toolId, agentId, metadata = {}) {
+ const executionId = uuidv4()
+ const now = Date.now()
+
+ const execution = {
+ id: executionId,
+ toolId,
+ agentId,
+ state: STATES.IDLE,
+ stateHistory: [{ state: STATES.IDLE, timestamp: now }],
+ startedAt: null,
+ completedAt: null,
+ duration: null,
+ input: metadata.input || null,
+ output: null,
+ error: null,
+ recoveryAttempts: 0,
+ metadata,
+ createdAt: now
+ }
+
+ executions.set(executionId, execution)
+ return execution
+}
+
+/**
+ * Transition to a new state
+ * @param {string} executionId - Execution ID
+ * @param {string} newState - Target state
+ * @param {Object} data - State-specific data
+ * @returns {Object} - Updated execution or null if invalid
+ */
+function transitionState(executionId, newState, data = {}) {
+ const execution = executions.get(executionId)
+ if (!execution) return null
+
+ const currentState = execution.state
+ const validTransitions = TRANSITIONS[currentState] || []
+
+ if (!validTransitions.includes(newState)) {
+ console.error(`โ ๏ธ Invalid state transition: ${currentState} โ ${newState} for execution ${executionId}`)
+ return null
+ }
+
+ const now = Date.now()
+
+ // Update state
+ execution.state = newState
+ execution.stateHistory.push({ state: newState, timestamp: now, data })
+
+ // Handle state-specific logic
+ switch (newState) {
+ case STATES.START:
+ execution.startedAt = now
+ break
+
+ case STATES.RUNNING:
+ if (data.input) execution.input = data.input
+ break
+
+ case STATES.UPDATE:
+ if (data.progress) execution.progress = data.progress
+ if (data.partialOutput) execution.partialOutput = data.partialOutput
+ break
+
+ case STATES.END:
+ execution.completedAt = now
+ execution.duration = now - (execution.startedAt || execution.createdAt)
+ if (data.output !== undefined) execution.output = data.output
+ archiveExecution(execution)
+ break
+
+ case STATES.ERROR:
+ execution.error = {
+ message: data.error?.message || data.message || 'Unknown error',
+ code: data.error?.code || data.code,
+ stack: data.error?.stack,
+ timestamp: now
+ }
+ break
+
+ case STATES.RECOVERED:
+ execution.recoveryAttempts++
+ execution.error = null
+ break
+ }
+
+ // Emit state change event
+ cosEvents.emit('tool:stateChange', {
+ executionId,
+ toolId: execution.toolId,
+ agentId: execution.agentId,
+ fromState: currentState,
+ toState: newState,
+ timestamp: now
+ })
+
+ return execution
+}
+
+/**
+ * Start a tool execution
+ * @param {string} executionId - Execution ID
+ * @param {Object} input - Tool input
+ * @returns {Object} - Updated execution
+ */
+function startExecution(executionId, input = null) {
+ const started = transitionState(executionId, STATES.START)
+ if (!started) return null
+
+ return transitionState(executionId, STATES.RUNNING, { input })
+}
+
+/**
+ * Update execution progress
+ * @param {string} executionId - Execution ID
+ * @param {Object} data - Progress data
+ * @returns {Object} - Updated execution
+ */
+function updateExecution(executionId, data) {
+ const execution = executions.get(executionId)
+ if (!execution) return null
+
+ // Only update if running
+ if (execution.state !== STATES.RUNNING && execution.state !== STATES.UPDATE) {
+ return execution
+ }
+
+ return transitionState(executionId, STATES.UPDATE, data)
+}
+
+/**
+ * Complete a tool execution
+ * @param {string} executionId - Execution ID
+ * @param {*} output - Tool output
+ * @returns {Object} - Completed execution
+ */
+function completeExecution(executionId, output = null) {
+ const execution = executions.get(executionId)
+ if (!execution) return null
+
+ // Can complete from RUNNING, UPDATE, or ERROR states
+ if (execution.state === STATES.ERROR) {
+ // Completing from error state means we gave up
+ return transitionState(executionId, STATES.END, { output, wasError: true })
+ }
+
+ if (execution.state === STATES.UPDATE) {
+ // Go back to running first, then end
+ transitionState(executionId, STATES.RUNNING)
+ }
+
+ return transitionState(executionId, STATES.END, { output })
+}
+
+/**
+ * Mark execution as errored
+ * @param {string} executionId - Execution ID
+ * @param {Error|Object} error - Error details
+ * @returns {Object} - Updated execution
+ */
+function errorExecution(executionId, error) {
+ return transitionState(executionId, STATES.ERROR, { error })
+}
+
+/**
+ * Attempt recovery from error state
+ * @param {string} executionId - Execution ID
+ * @param {string} strategy - Recovery strategy name
+ * @returns {Object} - Recovered execution or null
+ */
+function recoverExecution(executionId, strategy) {
+ const execution = executions.get(executionId)
+ if (!execution || execution.state !== STATES.ERROR) return null
+
+ const MAX_RECOVERY_ATTEMPTS = 3
+ if (execution.recoveryAttempts >= MAX_RECOVERY_ATTEMPTS) {
+ console.log(`โ ๏ธ Max recovery attempts (${MAX_RECOVERY_ATTEMPTS}) reached for ${executionId}`)
+ return null
+ }
+
+ const recovered = transitionState(executionId, STATES.RECOVERED, { strategy })
+ if (recovered) {
+ transitionState(executionId, STATES.RUNNING)
+ }
+
+ return recovered
+}
+
+/**
+ * Archive completed execution to history
+ * @param {Object} execution - Completed execution
+ */
+function archiveExecution(execution) {
+ // Add to history
+ executionHistory.unshift({
+ id: execution.id,
+ toolId: execution.toolId,
+ agentId: execution.agentId,
+ startedAt: execution.startedAt,
+ completedAt: execution.completedAt,
+ duration: execution.duration,
+ success: !execution.error,
+ recoveryAttempts: execution.recoveryAttempts
+ })
+
+ // Trim history
+ while (executionHistory.length > MAX_HISTORY) {
+ executionHistory.pop()
+ }
+
+ // Remove from active executions after a delay
+ setTimeout(() => {
+ executions.delete(execution.id)
+ }, 60000) // Keep for 1 minute for debugging
+}
+
+/**
+ * Get execution by ID
+ * @param {string} executionId - Execution ID
+ * @returns {Object|null} - Execution object
+ */
+function getExecution(executionId) {
+ return executions.get(executionId) || null
+}
+
+/**
+ * Get all active executions for an agent
+ * @param {string} agentId - Agent ID
+ * @returns {Array} - Active executions
+ */
+function getAgentExecutions(agentId) {
+ const results = []
+ for (const execution of executions.values()) {
+ if (execution.agentId === agentId) {
+ results.push(execution)
+ }
+ }
+ return results
+}
+
+/**
+ * Get execution history
+ * @param {Object} options - Filter options
+ * @returns {Array} - Execution history
+ */
+function getExecutionHistory(options = {}) {
+ let history = [...executionHistory]
+
+ if (options.agentId) {
+ history = history.filter(e => e.agentId === options.agentId)
+ }
+
+ if (options.toolId) {
+ history = history.filter(e => e.toolId === options.toolId)
+ }
+
+ if (options.success !== undefined) {
+ history = history.filter(e => e.success === options.success)
+ }
+
+ const limit = options.limit || 100
+ return history.slice(0, limit)
+}
+
+/**
+ * Get execution statistics
+ * @returns {Object} - Statistics
+ */
+function getStats() {
+ const active = Array.from(executions.values())
+ const byState = {}
+
+ for (const execution of active) {
+ byState[execution.state] = (byState[execution.state] || 0) + 1
+ }
+
+ const recentHistory = executionHistory.slice(0, 100)
+ const successCount = recentHistory.filter(e => e.success).length
+ const avgDuration = recentHistory.length > 0
+ ? recentHistory.reduce((sum, e) => sum + (e.duration || 0), 0) / recentHistory.length
+ : 0
+
+ return {
+ activeExecutions: active.length,
+ byState,
+ historySize: executionHistory.length,
+ recentSuccessRate: recentHistory.length > 0 ? successCount / recentHistory.length : 1,
+ avgDurationMs: Math.round(avgDuration)
+ }
+}
+
+/**
+ * Clean up stale executions
+ * @param {number} maxAgeMs - Maximum age in milliseconds
+ * @returns {number} - Number of executions cleaned
+ */
+function cleanupStaleExecutions(maxAgeMs = 3600000) {
+ const now = Date.now()
+ let cleaned = 0
+
+ for (const [id, execution] of executions.entries()) {
+ const age = now - execution.createdAt
+ if (age > maxAgeMs && execution.state !== STATES.END) {
+ // Force complete stale executions
+ transitionState(id, STATES.ERROR, { error: { message: 'Execution timeout' } })
+ transitionState(id, STATES.END, { wasTimeout: true })
+ cleaned++
+ }
+ }
+
+ if (cleaned > 0) {
+ console.log(`๐งน Cleaned ${cleaned} stale tool executions`)
+ }
+
+ return cleaned
+}
+
+/**
+ * Create a wrapped tool executor with state machine
+ * @param {string} toolId - Tool identifier
+ * @param {Function} toolFn - Actual tool function
+ * @returns {Function} - Wrapped tool function
+ */
+function wrapToolWithStateMachine(toolId, toolFn) {
+ return async function wrappedTool(agentId, input, metadata = {}) {
+ const execution = createToolExecution(toolId, agentId, { ...metadata, input })
+
+ const started = startExecution(execution.id, input)
+ if (!started) {
+ return { success: false, error: 'Failed to start execution' }
+ }
+
+ let result
+ let lastError
+
+ // Retry loop with recovery
+ const maxAttempts = 3
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
+ const current = getExecution(execution.id)
+ if (!current || current.state === STATES.END) break
+
+ try {
+ result = await toolFn(input)
+ completeExecution(execution.id, result)
+ return { success: true, output: result, executionId: execution.id }
+ } catch (error) {
+ lastError = error
+ errorExecution(execution.id, error)
+
+ if (attempt < maxAttempts) {
+ const recovered = recoverExecution(execution.id, 'retry')
+ if (!recovered) break
+ console.log(`๐ Tool ${toolId} retry attempt ${attempt + 1}/${maxAttempts}`)
+ }
+ }
+ }
+
+ // Final failure
+ completeExecution(execution.id, null)
+ return {
+ success: false,
+ error: lastError?.message || 'Tool execution failed',
+ executionId: execution.id
+ }
+ }
+}
+
+export {
+ STATES,
+ TRANSITIONS,
+ createToolExecution,
+ transitionState,
+ startExecution,
+ updateExecution,
+ completeExecution,
+ errorExecution,
+ recoverExecution,
+ getExecution,
+ getAgentExecutions,
+ getExecutionHistory,
+ getStats,
+ cleanupStaleExecutions,
+ wrapToolWithStateMachine
+}
diff --git a/server/services/toolStateMachine.test.js b/server/services/toolStateMachine.test.js
new file mode 100644
index 0000000..673a617
--- /dev/null
+++ b/server/services/toolStateMachine.test.js
@@ -0,0 +1,335 @@
+import { describe, it, expect, beforeEach, vi } from 'vitest';
+import {
+ STATES,
+ TRANSITIONS,
+ createToolExecution,
+ transitionState,
+ startExecution,
+ updateExecution,
+ completeExecution,
+ errorExecution,
+ recoverExecution,
+ getExecution,
+ getAgentExecutions,
+ getExecutionHistory,
+ getStats,
+ cleanupStaleExecutions,
+ wrapToolWithStateMachine
+} from './toolStateMachine.js';
+
+// Mock the cosEvents to prevent actual event emission
+vi.mock('./cos.js', () => ({
+ cosEvents: {
+ emit: vi.fn()
+ }
+}));
+
+describe('Tool State Machine', () => {
+ describe('STATES', () => {
+ it('should have all required states', () => {
+ expect(STATES.IDLE).toBe('idle');
+ expect(STATES.START).toBe('start');
+ expect(STATES.RUNNING).toBe('running');
+ expect(STATES.UPDATE).toBe('update');
+ expect(STATES.END).toBe('end');
+ expect(STATES.ERROR).toBe('error');
+ expect(STATES.RECOVERED).toBe('recovered');
+ });
+ });
+
+ describe('TRANSITIONS', () => {
+ it('should define valid transitions from IDLE', () => {
+ expect(TRANSITIONS[STATES.IDLE]).toContain(STATES.START);
+ });
+
+ it('should define valid transitions from START', () => {
+ expect(TRANSITIONS[STATES.START]).toContain(STATES.RUNNING);
+ expect(TRANSITIONS[STATES.START]).toContain(STATES.ERROR);
+ });
+
+ it('should define valid transitions from RUNNING', () => {
+ expect(TRANSITIONS[STATES.RUNNING]).toContain(STATES.UPDATE);
+ expect(TRANSITIONS[STATES.RUNNING]).toContain(STATES.END);
+ expect(TRANSITIONS[STATES.RUNNING]).toContain(STATES.ERROR);
+ });
+
+ it('should have no transitions from END (terminal state)', () => {
+ expect(TRANSITIONS[STATES.END]).toEqual([]);
+ });
+
+ it('should allow recovery from ERROR', () => {
+ expect(TRANSITIONS[STATES.ERROR]).toContain(STATES.RECOVERED);
+ expect(TRANSITIONS[STATES.ERROR]).toContain(STATES.END);
+ });
+ });
+
+ describe('createToolExecution', () => {
+ it('should create execution with correct initial state', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+
+ expect(execution.toolId).toBe('tool-1');
+ expect(execution.agentId).toBe('agent-1');
+ expect(execution.state).toBe(STATES.IDLE);
+ expect(execution.id).toBeDefined();
+ });
+
+ it('should include metadata when provided', () => {
+ const metadata = { input: 'test input', custom: 'value' };
+ const execution = createToolExecution('tool-1', 'agent-1', metadata);
+
+ expect(execution.input).toBe('test input');
+ expect(execution.metadata.custom).toBe('value');
+ });
+
+ it('should initialize state history', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+
+ expect(execution.stateHistory).toBeDefined();
+ expect(execution.stateHistory.length).toBe(1);
+ expect(execution.stateHistory[0].state).toBe(STATES.IDLE);
+ });
+ });
+
+ describe('transitionState', () => {
+ it('should transition to valid next state', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ const result = transitionState(execution.id, STATES.START);
+
+ expect(result).not.toBeNull();
+ expect(result.state).toBe(STATES.START);
+ });
+
+ it('should reject invalid transitions', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ // IDLE cannot go directly to END
+ const result = transitionState(execution.id, STATES.END);
+
+ expect(result).toBeNull();
+ });
+
+ it('should update state history', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ transitionState(execution.id, STATES.START);
+
+ const updated = getExecution(execution.id);
+ expect(updated.stateHistory.length).toBe(2);
+ });
+
+ it('should return null for non-existent execution', () => {
+ const result = transitionState('nonexistent', STATES.START);
+ expect(result).toBeNull();
+ });
+ });
+
+ describe('startExecution', () => {
+ it('should transition through START to RUNNING', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ const result = startExecution(execution.id, { data: 'test' });
+
+ expect(result).not.toBeNull();
+ expect(result.state).toBe(STATES.RUNNING);
+ expect(result.startedAt).toBeDefined();
+ });
+
+ it('should store input when provided', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ const result = startExecution(execution.id, { key: 'value' });
+
+ expect(result.input).toEqual({ key: 'value' });
+ });
+ });
+
+ describe('updateExecution', () => {
+ it('should update progress during execution', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ startExecution(execution.id);
+ const result = updateExecution(execution.id, { progress: 0.5 });
+
+ expect(result.state).toBe(STATES.UPDATE);
+ expect(result.progress).toBe(0.5);
+ });
+
+ it('should not update if not running', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ // Still in IDLE state
+ const result = updateExecution(execution.id, { progress: 0.5 });
+
+ expect(result.state).toBe(STATES.IDLE);
+ });
+ });
+
+ describe('completeExecution', () => {
+ it('should transition to END with output', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ startExecution(execution.id);
+ const result = completeExecution(execution.id, { result: 'success' });
+
+ expect(result.state).toBe(STATES.END);
+ expect(result.output).toEqual({ result: 'success' });
+ expect(result.completedAt).toBeDefined();
+ expect(result.duration).toBeDefined();
+ });
+
+ it('should calculate duration', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ startExecution(execution.id);
+
+ // Small delay to ensure measurable duration
+ const result = completeExecution(execution.id, 'done');
+
+ expect(result.duration).toBeGreaterThanOrEqual(0);
+ });
+ });
+
+ describe('errorExecution', () => {
+ it('should transition to ERROR state', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ startExecution(execution.id);
+ const result = errorExecution(execution.id, new Error('Test error'));
+
+ expect(result.state).toBe(STATES.ERROR);
+ expect(result.error.message).toBe('Test error');
+ });
+
+ it('should store error details', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ startExecution(execution.id);
+ const error = { message: 'Custom error', code: 'ERR_TEST' };
+ const result = errorExecution(execution.id, error);
+
+ expect(result.error.code).toBe('ERR_TEST');
+ });
+ });
+
+ describe('recoverExecution', () => {
+ it('should recover from ERROR state', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ startExecution(execution.id);
+ errorExecution(execution.id, new Error('Test'));
+ const result = recoverExecution(execution.id, 'retry');
+
+ expect(result).not.toBeNull();
+ expect(result.recoveryAttempts).toBe(1);
+ });
+
+ it('should track recovery attempts', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ startExecution(execution.id);
+ errorExecution(execution.id, new Error('Test'));
+ recoverExecution(execution.id, 'retry');
+
+ // Transition to running, then error again
+ errorExecution(execution.id, new Error('Test 2'));
+ const result = recoverExecution(execution.id, 'retry');
+
+ expect(result.recoveryAttempts).toBe(2);
+ });
+
+ it('should fail after max recovery attempts', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ startExecution(execution.id);
+
+ // Exhaust recovery attempts
+ for (let i = 0; i < 3; i++) {
+ errorExecution(execution.id, new Error('Test'));
+ recoverExecution(execution.id, 'retry');
+ }
+
+ errorExecution(execution.id, new Error('Final'));
+ const result = recoverExecution(execution.id, 'retry');
+
+ expect(result).toBeNull();
+ });
+
+ it('should not recover from non-ERROR state', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ startExecution(execution.id);
+ // Currently in RUNNING, not ERROR
+ const result = recoverExecution(execution.id, 'retry');
+
+ expect(result).toBeNull();
+ });
+ });
+
+ describe('getExecution', () => {
+ it('should return execution by id', () => {
+ const execution = createToolExecution('tool-1', 'agent-1');
+ const found = getExecution(execution.id);
+
+ expect(found).not.toBeNull();
+ expect(found.id).toBe(execution.id);
+ });
+
+ it('should return null for non-existent id', () => {
+ const result = getExecution('nonexistent-id');
+ expect(result).toBeNull();
+ });
+ });
+
+ describe('getAgentExecutions', () => {
+ it('should return all executions for an agent', () => {
+ createToolExecution('tool-1', 'agent-A');
+ createToolExecution('tool-2', 'agent-A');
+ createToolExecution('tool-3', 'agent-B');
+
+ const agentAExecutions = getAgentExecutions('agent-A');
+ expect(agentAExecutions.length).toBe(2);
+ });
+
+ it('should return empty array for unknown agent', () => {
+ const result = getAgentExecutions('unknown-agent');
+ expect(result).toEqual([]);
+ });
+ });
+
+ describe('getStats', () => {
+ it('should return execution statistics', () => {
+ const stats = getStats();
+
+ expect(stats.activeExecutions).toBeDefined();
+ expect(stats.byState).toBeDefined();
+ expect(stats.historySize).toBeDefined();
+ expect(stats.recentSuccessRate).toBeDefined();
+ expect(stats.avgDurationMs).toBeDefined();
+ });
+ });
+
+ describe('wrapToolWithStateMachine', () => {
+ it('should wrap a function with state machine', async () => {
+ const mockTool = vi.fn().mockResolvedValue('result');
+ const wrapped = wrapToolWithStateMachine('test-tool', mockTool);
+
+ const result = await wrapped('agent-1', 'input');
+
+ expect(result.success).toBe(true);
+ expect(result.output).toBe('result');
+ expect(result.executionId).toBeDefined();
+ expect(mockTool).toHaveBeenCalledWith('input');
+ });
+
+ it('should handle tool errors with retry', async () => {
+ let callCount = 0;
+ const mockTool = vi.fn().mockImplementation(() => {
+ callCount++;
+ if (callCount < 2) throw new Error('Retry me');
+ return Promise.resolve('success after retry');
+ });
+
+ const wrapped = wrapToolWithStateMachine('test-tool', mockTool);
+ const result = await wrapped('agent-1', 'input');
+
+ expect(result.success).toBe(true);
+ expect(callCount).toBe(2);
+ });
+
+ it('should return error after max retries', async () => {
+ const mockTool = vi.fn().mockRejectedValue(new Error('Always fails'));
+ const wrapped = wrapToolWithStateMachine('test-tool', mockTool);
+
+ const result = await wrapped('agent-1', 'input');
+
+ expect(result.success).toBe(false);
+ expect(result.error).toBe('Always fails');
+ });
+ });
+});
diff --git a/server/services/usage.js b/server/services/usage.js
index 49403f0..a2cf6fb 100644
--- a/server/services/usage.js
+++ b/server/services/usage.js
@@ -1,11 +1,8 @@
-import { readFile, writeFile, mkdir } from 'fs/promises';
-import { existsSync } from 'fs';
-import { join, dirname } from 'path';
-import { fileURLToPath } from 'url';
-
-const __filename = fileURLToPath(import.meta.url);
-const __dirname = dirname(__filename);
-const DATA_DIR = join(__dirname, '../../data');
+import { writeFile } from 'fs/promises';
+import { join } from 'path';
+import { ensureDir, PATHS, readJSONFile } from '../lib/fileUtils.js';
+
+const DATA_DIR = PATHS.data;
const USAGE_FILE = join(DATA_DIR, 'usage.json');
let usageData = null;
@@ -34,13 +31,10 @@ function getEmptyUsage() {
* Load usage data from disk
*/
export async function loadUsage() {
- if (!existsSync(DATA_DIR)) {
- await mkdir(DATA_DIR, { recursive: true });
- }
+ await ensureDir(DATA_DIR);
- if (existsSync(USAGE_FILE)) {
- usageData = JSON.parse(await readFile(USAGE_FILE, 'utf-8'));
- } else {
+ usageData = await readJSONFile(USAGE_FILE, null);
+ if (!usageData) {
usageData = getEmptyUsage();
await saveUsage();
}
@@ -154,11 +148,104 @@ export async function recordTokens(inputTokens, outputTokens) {
await saveUsage();
}
+/**
+ * Calculate current activity streak (consecutive days with sessions)
+ */
+function calculateStreak(dailyActivity) {
+ const today = new Date();
+ let streak = 0;
+ let checkDate = new Date(today);
+
+ // Start from today and work backwards
+ while (true) {
+ const dateStr = checkDate.toISOString().split('T')[0];
+ const dayData = dailyActivity[dateStr];
+
+ if (dayData && dayData.sessions > 0) {
+ streak++;
+ checkDate.setDate(checkDate.getDate() - 1);
+ } else if (streak === 0) {
+ // If today has no activity, check if yesterday started a streak
+ checkDate.setDate(checkDate.getDate() - 1);
+ const yesterdayStr = checkDate.toISOString().split('T')[0];
+ const yesterdayData = dailyActivity[yesterdayStr];
+ if (!yesterdayData || yesterdayData.sessions === 0) {
+ break; // No streak
+ }
+ // Continue checking from yesterday
+ } else {
+ break; // Streak broken
+ }
+ }
+
+ return streak;
+}
+
+/**
+ * Find the longest streak in history
+ */
+function findLongestStreak(dailyActivity) {
+ const dates = Object.keys(dailyActivity).sort();
+ if (dates.length === 0) return 0;
+
+ let maxStreak = 0;
+ let currentStreak = 0;
+ let prevDate = null;
+
+ for (const dateStr of dates) {
+ const dayData = dailyActivity[dateStr];
+ if (!dayData || dayData.sessions === 0) continue;
+
+ if (prevDate) {
+ const prev = new Date(prevDate);
+ const curr = new Date(dateStr);
+ const diffDays = Math.round((curr - prev) / (1000 * 60 * 60 * 24));
+
+ if (diffDays === 1) {
+ currentStreak++;
+ } else {
+ currentStreak = 1;
+ }
+ } else {
+ currentStreak = 1;
+ }
+
+ maxStreak = Math.max(maxStreak, currentStreak);
+ prevDate = dateStr;
+ }
+
+ return maxStreak;
+}
+
/**
* Get usage summary
*/
export function getUsageSummary() {
- if (!usageData) return getEmptyUsage();
+ if (!usageData) {
+ const empty = getEmptyUsage();
+ // Generate empty last7Days
+ const last7Days = [];
+ for (let i = 6; i >= 0; i--) {
+ const date = new Date();
+ date.setDate(date.getDate() - i);
+ last7Days.push({
+ date: date.toISOString().split('T')[0],
+ label: date.toLocaleDateString('en-US', { weekday: 'short' }),
+ sessions: 0,
+ messages: 0,
+ tokens: 0
+ });
+ }
+ return {
+ ...empty,
+ currentStreak: 0,
+ longestStreak: 0,
+ last7Days,
+ estimatedCost: 0,
+ topProviders: [],
+ topModels: []
+ };
+ }
// Get last 7 days activity
const last7Days = [];
@@ -173,6 +260,10 @@ export function getUsageSummary() {
});
}
+ // Calculate streaks
+ const currentStreak = calculateStreak(usageData.dailyActivity);
+ const longestStreak = findLongestStreak(usageData.dailyActivity);
+
// Calculate totals
const totalCost = estimateCost(usageData.totalTokens.input, usageData.totalTokens.output);
@@ -182,6 +273,8 @@ export function getUsageSummary() {
totalToolCalls: usageData.totalToolCalls,
totalTokens: usageData.totalTokens,
estimatedCost: totalCost,
+ currentStreak,
+ longestStreak,
last7Days,
hourlyActivity: usageData.hourlyActivity,
topProviders: Object.entries(usageData.byProvider)
diff --git a/server/services/usage.test.js b/server/services/usage.test.js
new file mode 100644
index 0000000..0f211a7
--- /dev/null
+++ b/server/services/usage.test.js
@@ -0,0 +1,83 @@
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
+import { getUsageSummary, recordSession } from './usage.js';
+
+// Helper to generate date strings
+const dateStr = (daysAgo) => {
+ const d = new Date();
+ d.setDate(d.getDate() - daysAgo);
+ return d.toISOString().split('T')[0];
+};
+
+describe('Usage Service - Streak Calculation', () => {
+ describe('currentStreak', () => {
+ it('should return 0 when no activity', () => {
+ // Empty daily activity means no streak
+ const summary = getUsageSummary();
+ // With real data from file, we just verify streak is a number >= 0
+ expect(typeof summary.currentStreak).toBe('number');
+ expect(summary.currentStreak).toBeGreaterThanOrEqual(0);
+ });
+
+ it('should include currentStreak in summary', () => {
+ const summary = getUsageSummary();
+ expect(summary).toHaveProperty('currentStreak');
+ expect(typeof summary.currentStreak).toBe('number');
+ });
+
+ it('should include longestStreak in summary', () => {
+ const summary = getUsageSummary();
+ expect(summary).toHaveProperty('longestStreak');
+ expect(typeof summary.longestStreak).toBe('number');
+ });
+
+ it('should have currentStreak <= longestStreak', () => {
+ const summary = getUsageSummary();
+ expect(summary.currentStreak).toBeLessThanOrEqual(summary.longestStreak);
+ });
+ });
+
+ describe('streak logic validation', () => {
+ it('should include last7Days in summary', () => {
+ const summary = getUsageSummary();
+ expect(summary).toHaveProperty('last7Days');
+ expect(Array.isArray(summary.last7Days)).toBe(true);
+ expect(summary.last7Days.length).toBe(7);
+ });
+
+ it('last7Days should have correct structure', () => {
+ const summary = getUsageSummary();
+ summary.last7Days.forEach(day => {
+ expect(day).toHaveProperty('date');
+ expect(day).toHaveProperty('label');
+ expect(day).toHaveProperty('sessions');
+ expect(typeof day.sessions).toBe('number');
+ });
+ });
+
+ it('last7Days dates should be in chronological order', () => {
+ const summary = getUsageSummary();
+ const dates = summary.last7Days.map(d => d.date);
+ const sorted = [...dates].sort();
+ expect(dates).toEqual(sorted);
+ });
+ });
+
+ describe('summary structure', () => {
+ it('should have all expected fields', () => {
+ const summary = getUsageSummary();
+ expect(summary).toHaveProperty('totalSessions');
+ expect(summary).toHaveProperty('totalMessages');
+ expect(summary).toHaveProperty('currentStreak');
+ expect(summary).toHaveProperty('longestStreak');
+ expect(summary).toHaveProperty('last7Days');
+ expect(summary).toHaveProperty('hourlyActivity');
+ expect(summary).toHaveProperty('topProviders');
+ expect(summary).toHaveProperty('topModels');
+ });
+
+ it('hourlyActivity should have 24 entries', () => {
+ const summary = getUsageSummary();
+ expect(summary.hourlyActivity).toHaveLength(24);
+ });
+ });
+});
diff --git a/server/services/visionTest.integration.test.js b/server/services/visionTest.integration.test.js
new file mode 100644
index 0000000..c0da109
--- /dev/null
+++ b/server/services/visionTest.integration.test.js
@@ -0,0 +1,241 @@
+/**
+ * Vision Integration Test
+ *
+ * Tests LM Studio's vision capabilities end-to-end by:
+ * 1. Creating a run with an actual screenshot
+ * 2. Executing via the API provider
+ * 3. Verifying the model correctly interprets the image
+ *
+ * This test requires:
+ * - LM Studio running (localhost:1234 or remote via Tailscale)
+ * - A vision-capable model loaded (e.g., llava, bakllava)
+ * - Screenshots available in data/screenshots/
+ *
+ * Skip with: npm test -- --testPathIgnorePatterns=integration
+ */
+
+import { describe, it, expect, beforeAll } from 'vitest';
+import { existsSync, readdirSync } from 'fs';
+import { join, dirname, resolve } from 'path';
+import { fileURLToPath } from 'url';
+import { testVision, checkVisionHealth, runVisionTestSuite } from './visionTest.js';
+import { getProviderById } from './providers.js';
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = dirname(__filename);
+const SCREENSHOTS_DIR = resolve(__dirname, '../../data/screenshots');
+
+// Helper to check if LM Studio is available
+async function isLmStudioAvailable() {
+ const health = await checkVisionHealth('lmstudio').catch(() => ({ available: false }));
+ return health.available;
+}
+
+// Helper to get an app screenshot (prefer .png files)
+function getAppScreenshot() {
+ if (!existsSync(SCREENSHOTS_DIR)) {
+ return null;
+ }
+
+ const files = readdirSync(SCREENSHOTS_DIR).filter(f =>
+ /\.(png|jpg|jpeg)$/i.test(f)
+ );
+
+ // Prefer PNG screenshots of the app (likely start with 'Screenshot')
+ const appScreenshot = files.find(f => f.toLowerCase().startsWith('screenshot'));
+ return appScreenshot || files[0] || null;
+}
+
+// Longer timeout for vision API calls (120 seconds)
+const VISION_TEST_TIMEOUT = 120000;
+
+describe('Vision Integration Tests', () => {
+ let lmStudioAvailable = false;
+ let testScreenshot = null;
+
+ beforeAll(async () => {
+ // Check prerequisites
+ lmStudioAvailable = await isLmStudioAvailable();
+ testScreenshot = getAppScreenshot();
+
+ if (!lmStudioAvailable) {
+ console.log('โญ๏ธ LM Studio not available - integration tests will be skipped');
+ }
+ if (!testScreenshot) {
+ console.log('โญ๏ธ No screenshots available - integration tests will be skipped');
+ }
+ }, VISION_TEST_TIMEOUT);
+
+ describe('LM Studio Vision Health', () => {
+ it('should check vision health status', async () => {
+ const health = await checkVisionHealth('lmstudio');
+
+ // This test should always pass - it just reports the status
+ expect(health).toBeDefined();
+ expect(typeof health.available).toBe('boolean');
+
+ if (health.available) {
+ expect(health.provider).toBe('lmstudio');
+ expect(health.endpoint).toBeDefined();
+ } else {
+ expect(health.error).toBeDefined();
+ }
+ });
+ });
+
+ describe('Vision Image Interpretation', () => {
+ it('should correctly interpret an app screenshot with basic description', async () => {
+ if (!lmStudioAvailable || !testScreenshot) {
+ console.log('โญ๏ธ Skipping: LM Studio or screenshots not available');
+ return;
+ }
+
+ console.log(`๐ธ Testing with screenshot: ${testScreenshot}`);
+
+ const result = await testVision({
+ imagePath: testScreenshot,
+ prompt: 'Describe what you see in this image. Focus on the main UI elements, layout, and any visible text or buttons.',
+ expectedContent: [], // No specific content required, just verify it responds
+ providerId: 'lmstudio'
+ });
+
+ expect(result).toBeDefined();
+ expect(result.imagePath).toBe(testScreenshot);
+ expect(result.provider).toBe('lmstudio');
+ expect(result.duration).toBeGreaterThan(0);
+
+ // Verify we got a meaningful response (not empty or error)
+ expect(result.response).toBeDefined();
+ expect(result.response.length).toBeGreaterThan(20);
+
+ console.log(`โ
Vision response (${result.response.length} chars): ${result.response.substring(0, 200)}...`);
+ }, VISION_TEST_TIMEOUT);
+
+ it('should identify UI elements in app screenshots', async () => {
+ if (!lmStudioAvailable || !testScreenshot) {
+ console.log('โญ๏ธ Skipping: LM Studio or screenshots not available');
+ return;
+ }
+
+ const result = await testVision({
+ imagePath: testScreenshot,
+ prompt: 'List the main UI components visible in this screenshot. Look for: navigation elements, buttons, forms, cards, tables, or any interactive elements. Be specific about what you see.',
+ expectedContent: [], // We'll verify response quality manually
+ providerId: 'lmstudio'
+ });
+
+ expect(result).toBeDefined();
+ expect(result.response).toBeDefined();
+ expect(result.response.length).toBeGreaterThan(50);
+
+ // Response should contain some UI-related terms for an app screenshot
+ const uiTerms = ['button', 'text', 'menu', 'navigation', 'card', 'input', 'form', 'table', 'list', 'header', 'sidebar', 'panel', 'icon', 'tab'];
+ const responseLower = result.response.toLowerCase();
+ const foundTerms = uiTerms.filter(term => responseLower.includes(term));
+
+ console.log(`๐ UI terms found: ${foundTerms.join(', ')}`);
+
+ // For an app screenshot, we expect at least one UI term to be identified
+ // This is a soft check since the model's response varies
+ if (foundTerms.length === 0) {
+ console.warn('โ ๏ธ No standard UI terms found in response - this may be expected for non-UI images');
+ }
+ }, VISION_TEST_TIMEOUT);
+
+ it('should handle expected content validation', async () => {
+ if (!lmStudioAvailable || !testScreenshot) {
+ console.log('โญ๏ธ Skipping: LM Studio or screenshots not available');
+ return;
+ }
+
+ // Test with expected content that should be found in any image description
+ const result = await testVision({
+ imagePath: testScreenshot,
+ prompt: 'Describe the colors and visual elements in this image.',
+ expectedContent: ['image', 'color'], // Very generic terms likely to appear
+ providerId: 'lmstudio'
+ });
+
+ expect(result).toBeDefined();
+ expect(result.expectedTerms).toContain('image');
+ expect(result.expectedTerms).toContain('color');
+ expect(result.foundTerms).toBeDefined();
+ expect(result.missingTerms).toBeDefined();
+
+ // At least one of our generic terms should be found
+ expect(result.foundTerms.length + result.missingTerms.length).toBe(2);
+ }, VISION_TEST_TIMEOUT);
+ });
+
+ describe('Vision Test Suite', () => {
+ it('should run the full vision test suite', async () => {
+ if (!lmStudioAvailable || !testScreenshot) {
+ console.log('โญ๏ธ Skipping: LM Studio or screenshots not available');
+ return;
+ }
+
+ const result = await runVisionTestSuite('lmstudio');
+
+ expect(result).toBeDefined();
+ expect(result.provider).toBe('lmstudio');
+ expect(result.totalTests).toBeGreaterThan(0);
+ expect(result.results).toBeInstanceOf(Array);
+ expect(result.results.length).toBe(result.totalTests);
+
+ console.log(`๐ Test suite results: ${result.passedTests}/${result.totalTests} passed`);
+
+ // Each result should have required fields
+ for (const testResult of result.results) {
+ expect(testResult.testName).toBeDefined();
+ // Either we have a successful response or an error
+ expect(testResult.response || testResult.error).toBeDefined();
+ }
+ }, VISION_TEST_TIMEOUT);
+ });
+
+ describe('Error Handling', () => {
+ it('should gracefully handle non-existent provider', async () => {
+ const result = await testVision({
+ imagePath: 'test.png',
+ prompt: 'Test',
+ expectedContent: [],
+ providerId: 'nonexistent-provider'
+ });
+
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('not found');
+ });
+
+ it('should gracefully handle non-existent image', async () => {
+ if (!lmStudioAvailable) {
+ console.log('โญ๏ธ Skipping: LM Studio not available');
+ return;
+ }
+
+ // This should throw an error about missing image
+ await expect(testVision({
+ imagePath: 'definitely-does-not-exist.png',
+ prompt: 'Test',
+ expectedContent: [],
+ providerId: 'lmstudio'
+ })).rejects.toThrow('Failed to load image');
+ }, VISION_TEST_TIMEOUT);
+ });
+});
+
+describe('Provider Configuration Validation', () => {
+ it('should have lmstudio provider configured', async () => {
+ const provider = await getProviderById('lmstudio').catch(() => null);
+
+ // This test documents the expected configuration
+ // It passes even if provider doesn't exist (for CI environments)
+ if (provider) {
+ expect(provider.type).toBe('api');
+ // Endpoint can be localhost or remote (e.g., Tailscale IP)
+ expect(provider.endpoint).toContain(':1234');
+ console.log(`โ
LM Studio provider configured: ${provider.endpoint}`);
+ } else {
+ console.log('โน๏ธ LM Studio provider not configured - this is expected in some environments');
+ }
+ });
+});
diff --git a/server/services/visionTest.js b/server/services/visionTest.js
new file mode 100644
index 0000000..fdd2fb4
--- /dev/null
+++ b/server/services/visionTest.js
@@ -0,0 +1,313 @@
+/**
+ * Vision Test Service
+ *
+ * Tests LM Studio's vision capabilities by sending images to the API
+ * and verifying the model can correctly interpret them.
+ */
+
+import { readFile } from 'fs/promises';
+import { existsSync } from 'fs';
+import { join, dirname, resolve, extname } from 'path';
+import { fileURLToPath } from 'url';
+import { getProviderById } from './providers.js';
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = dirname(__filename);
+const SCREENSHOTS_DIR = resolve(__dirname, '../../data/screenshots');
+
+/**
+ * Get MIME type from file extension
+ * @param {string} filepath - Path to the file
+ * @returns {string} - MIME type
+ */
+function getMimeType(filepath) {
+ const ext = extname(filepath).toLowerCase();
+ const mimeTypes = {
+ '.png': 'image/png',
+ '.jpg': 'image/jpeg',
+ '.jpeg': 'image/jpeg',
+ '.gif': 'image/gif',
+ '.webp': 'image/webp'
+ };
+ return mimeTypes[ext] || 'image/png';
+}
+
+/**
+ * Load an image as base64 data URL
+ * @param {string} imagePath - Path to the image file
+ * @returns {Promise} - Base64 data URL
+ */
+async function loadImageAsBase64(imagePath) {
+ const fullPath = imagePath.startsWith('/') ? imagePath : join(SCREENSHOTS_DIR, imagePath);
+
+ if (!existsSync(fullPath)) {
+ throw new Error(`Image not found: ${fullPath}`);
+ }
+
+ const buffer = await readFile(fullPath);
+ const mimeType = getMimeType(fullPath);
+ return `data:${mimeType};base64,${buffer.toString('base64')}`;
+}
+
+/**
+ * Call LM Studio API with vision request
+ * @param {Object} options - Request options
+ * @param {string} options.endpoint - API endpoint
+ * @param {string} options.apiKey - API key
+ * @param {string} options.model - Model to use
+ * @param {string} options.imageDataUrl - Base64 image data URL
+ * @param {string} options.prompt - Prompt to send with image
+ * @param {number} options.timeout - Request timeout in ms
+ * @returns {Promise} - API response
+ */
+async function callVisionAPI({ endpoint, apiKey, model, imageDataUrl, prompt, timeout = 60000 }) {
+ const controller = new AbortController();
+ const timeoutId = setTimeout(() => controller.abort(), timeout);
+
+ const response = await fetch(`${endpoint}/chat/completions`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': `Bearer ${apiKey}`
+ },
+ body: JSON.stringify({
+ model,
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ type: 'image_url',
+ image_url: {
+ url: imageDataUrl
+ }
+ },
+ {
+ type: 'text',
+ text: prompt
+ }
+ ]
+ }
+ ],
+ max_tokens: 500,
+ temperature: 0.1
+ }),
+ signal: controller.signal
+ }).finally(() => clearTimeout(timeoutId));
+
+ if (!response.ok) {
+ const errorText = await response.text().catch(() => 'Unknown error');
+ throw new Error(`Vision API error ${response.status}: ${errorText}`);
+ }
+
+ return response.json();
+}
+
+/**
+ * Test vision capability with a specific image and prompt
+ * @param {Object} options - Test options
+ * @param {string} options.imagePath - Path to test image
+ * @param {string} options.prompt - Test prompt
+ * @param {string} options.expectedContent - Keywords expected in response
+ * @param {string} [options.providerId='lmstudio'] - Provider to use
+ * @param {string} [options.model] - Model to use (defaults to provider's default)
+ * @returns {Promise} - Test result
+ */
+export async function testVision({ imagePath, prompt, expectedContent, providerId = 'lmstudio', model }) {
+ const startTime = Date.now();
+
+ // Get provider configuration
+ let provider;
+ try {
+ provider = await getProviderById(providerId);
+ } catch (err) {
+ return {
+ success: false,
+ error: err.message.includes('not initialized') ? `Provider '${providerId}' not found` : err.message,
+ duration: Date.now() - startTime
+ };
+ }
+ if (!provider) {
+ return {
+ success: false,
+ error: `Provider '${providerId}' not found`,
+ duration: Date.now() - startTime
+ };
+ }
+
+ if (provider.type !== 'api') {
+ return {
+ success: false,
+ error: `Provider '${providerId}' is not an API provider (type: ${provider.type})`,
+ duration: Date.now() - startTime
+ };
+ }
+
+ const testModel = model || provider.defaultModel;
+ if (!testModel) {
+ return {
+ success: false,
+ error: 'No model specified and provider has no default model',
+ duration: Date.now() - startTime
+ };
+ }
+
+ // Load image
+ const imageDataUrl = await loadImageAsBase64(imagePath).catch(err => {
+ throw new Error(`Failed to load image: ${err.message}`);
+ });
+
+ console.log(`๐ Testing vision with model: ${testModel}`);
+ console.log(`๐ธ Image: ${imagePath}`);
+
+ // Call vision API
+ const apiResponse = await callVisionAPI({
+ endpoint: provider.endpoint,
+ apiKey: provider.apiKey,
+ model: testModel,
+ imageDataUrl,
+ prompt,
+ timeout: provider.timeout || 60000
+ });
+
+ const responseContent = apiResponse.choices?.[0]?.message?.content || '';
+ const duration = Date.now() - startTime;
+
+ // Check if expected content is present
+ const expectedTerms = Array.isArray(expectedContent) ? expectedContent : [expectedContent];
+ const foundTerms = expectedTerms.filter(term =>
+ responseContent.toLowerCase().includes(term.toLowerCase())
+ );
+ const allFound = foundTerms.length === expectedTerms.length;
+
+ console.log(`โ
Vision test completed in ${duration}ms`);
+ console.log(`๐ Response: ${responseContent.substring(0, 200)}...`);
+
+ return {
+ success: allFound,
+ model: testModel,
+ provider: providerId,
+ imagePath,
+ prompt,
+ response: responseContent,
+ expectedTerms,
+ foundTerms,
+ missingTerms: expectedTerms.filter(t => !foundTerms.includes(t)),
+ duration,
+ usage: apiResponse.usage || null
+ };
+}
+
+/**
+ * Run a comprehensive vision test suite
+ * @param {string} [providerId='lmstudio'] - Provider to test
+ * @param {string} [model] - Specific model to test
+ * @returns {Promise} - Test suite results
+ */
+export async function runVisionTestSuite(providerId = 'lmstudio', model) {
+ const results = [];
+ const screenshotFiles = await import('fs').then(fs =>
+ fs.readdirSync(SCREENSHOTS_DIR).filter(f =>
+ /\.(png|jpg|jpeg|gif|webp)$/i.test(f)
+ )
+ );
+
+ if (screenshotFiles.length === 0) {
+ return {
+ success: false,
+ error: 'No screenshots available for testing',
+ results: []
+ };
+ }
+
+ // Use first available screenshot for basic vision test
+ const testImage = screenshotFiles[0];
+
+ // Test 1: Basic image description
+ const describeTest = await testVision({
+ imagePath: testImage,
+ prompt: 'Describe what you see in this image in 2-3 sentences. Focus on the main elements visible.',
+ expectedContent: [], // No specific content expected, just verify it responds
+ providerId,
+ model
+ }).catch(err => ({
+ success: false,
+ error: err.message,
+ testName: 'basic-description'
+ }));
+
+ results.push({ ...describeTest, testName: 'basic-description' });
+
+ // Test 2: UI element identification (if it's an app screenshot)
+ const uiTest = await testVision({
+ imagePath: testImage,
+ prompt: 'If this is a screenshot of an application or website, identify any visible UI elements like buttons, forms, navigation, or text. If not a UI screenshot, describe the main subject.',
+ expectedContent: [], // Just verify response quality
+ providerId,
+ model
+ }).catch(err => ({
+ success: false,
+ error: err.message,
+ testName: 'ui-identification'
+ }));
+
+ results.push({ ...uiTest, testName: 'ui-identification' });
+
+ // Calculate overall success
+ const successfulTests = results.filter(r => !r.error && r.response?.length > 20);
+
+ return {
+ success: successfulTests.length === results.length,
+ totalTests: results.length,
+ passedTests: successfulTests.length,
+ provider: providerId,
+ model: model || 'default',
+ results
+ };
+}
+
+/**
+ * Quick health check for vision capabilities
+ * @param {string} [providerId='lmstudio'] - Provider to check
+ * @returns {Promise} - Health check result
+ */
+export async function checkVisionHealth(providerId = 'lmstudio') {
+ let provider;
+ try {
+ provider = await getProviderById(providerId);
+ } catch (err) {
+ return { available: false, error: err.message };
+ }
+
+ if (!provider) {
+ return { available: false, error: 'Provider not found' };
+ }
+
+ if (!provider.enabled) {
+ return { available: false, error: 'Provider is disabled' };
+ }
+
+ if (provider.type !== 'api') {
+ return { available: false, error: 'Vision requires API provider' };
+ }
+
+ // Check if endpoint is reachable
+ const controller = new AbortController();
+ const timeoutId = setTimeout(() => controller.abort(), 5000);
+
+ const response = await fetch(`${provider.endpoint}/models`, {
+ headers: provider.apiKey ? { 'Authorization': `Bearer ${provider.apiKey}` } : {},
+ signal: controller.signal
+ }).catch(() => null).finally(() => clearTimeout(timeoutId));
+
+ if (!response?.ok) {
+ return { available: false, error: 'API endpoint not reachable' };
+ }
+
+ return {
+ available: true,
+ provider: providerId,
+ endpoint: provider.endpoint,
+ defaultModel: provider.defaultModel
+ };
+}
diff --git a/server/services/visionTest.test.js b/server/services/visionTest.test.js
new file mode 100644
index 0000000..ddb043c
--- /dev/null
+++ b/server/services/visionTest.test.js
@@ -0,0 +1,317 @@
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
+import { testVision, runVisionTestSuite, checkVisionHealth } from './visionTest.js';
+
+// Mock the providers module
+vi.mock('./providers.js', () => ({
+ getProviderById: vi.fn()
+}));
+
+// Mock fs/promises for image loading
+vi.mock('fs/promises', () => ({
+ readFile: vi.fn()
+}));
+
+// Mock fs for existsSync
+vi.mock('fs', () => ({
+ existsSync: vi.fn(),
+ readdirSync: vi.fn()
+}));
+
+// Import mocked modules
+import { getProviderById } from './providers.js';
+import { readFile } from 'fs/promises';
+import { existsSync, readdirSync } from 'fs';
+
+describe('Vision Test Service', () => {
+ const mockProvider = {
+ id: 'lmstudio',
+ name: 'LM Studio',
+ type: 'api',
+ endpoint: 'http://localhost:1234/v1',
+ apiKey: 'lm-studio',
+ models: ['test-vision-model'],
+ defaultModel: 'test-vision-model',
+ timeout: 60000,
+ enabled: true
+ };
+
+ const mockImageBuffer = Buffer.from('fake-image-data');
+
+ beforeEach(() => {
+ vi.clearAllMocks();
+ global.fetch = vi.fn();
+ });
+
+ afterEach(() => {
+ vi.restoreAllMocks();
+ });
+
+ describe('testVision', () => {
+ it('should return error when provider not found', async () => {
+ getProviderById.mockResolvedValue(null);
+
+ const result = await testVision({
+ imagePath: '/test/image.png',
+ prompt: 'Describe this image',
+ expectedContent: ['test'],
+ providerId: 'nonexistent'
+ });
+
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('not found');
+ });
+
+ it('should return error when provider is not API type', async () => {
+ getProviderById.mockResolvedValue({
+ ...mockProvider,
+ type: 'cli'
+ });
+
+ const result = await testVision({
+ imagePath: '/test/image.png',
+ prompt: 'Describe this image',
+ expectedContent: ['test'],
+ providerId: 'lmstudio'
+ });
+
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('not an API provider');
+ });
+
+ it('should return error when no model specified and no default', async () => {
+ getProviderById.mockResolvedValue({
+ ...mockProvider,
+ defaultModel: null
+ });
+
+ const result = await testVision({
+ imagePath: '/test/image.png',
+ prompt: 'Describe this image',
+ expectedContent: ['test']
+ });
+
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('No model specified');
+ });
+
+ it('should successfully test vision when API returns expected content', async () => {
+ getProviderById.mockResolvedValue(mockProvider);
+ existsSync.mockReturnValue(true);
+ readFile.mockResolvedValue(mockImageBuffer);
+
+ const mockResponse = {
+ choices: [{
+ message: {
+ content: 'This is a screenshot of an application showing a button and text.'
+ }
+ }],
+ usage: { prompt_tokens: 100, completion_tokens: 50 }
+ };
+
+ global.fetch.mockResolvedValue({
+ ok: true,
+ json: () => Promise.resolve(mockResponse)
+ });
+
+ const result = await testVision({
+ imagePath: '/test/image.png',
+ prompt: 'Describe this image',
+ expectedContent: ['button', 'text']
+ });
+
+ expect(result.success).toBe(true);
+ expect(result.model).toBe('test-vision-model');
+ expect(result.foundTerms).toContain('button');
+ expect(result.foundTerms).toContain('text');
+ expect(result.missingTerms).toHaveLength(0);
+ });
+
+ it('should return success false when expected content not found', async () => {
+ getProviderById.mockResolvedValue(mockProvider);
+ existsSync.mockReturnValue(true);
+ readFile.mockResolvedValue(mockImageBuffer);
+
+ const mockResponse = {
+ choices: [{
+ message: {
+ content: 'This is a blank image.'
+ }
+ }]
+ };
+
+ global.fetch.mockResolvedValue({
+ ok: true,
+ json: () => Promise.resolve(mockResponse)
+ });
+
+ const result = await testVision({
+ imagePath: '/test/image.png',
+ prompt: 'Describe this image',
+ expectedContent: ['button', 'navigation']
+ });
+
+ expect(result.success).toBe(false);
+ expect(result.missingTerms).toContain('button');
+ expect(result.missingTerms).toContain('navigation');
+ });
+
+ it('should handle API errors gracefully', async () => {
+ getProviderById.mockResolvedValue(mockProvider);
+ existsSync.mockReturnValue(true);
+ readFile.mockResolvedValue(mockImageBuffer);
+
+ global.fetch.mockResolvedValue({
+ ok: false,
+ status: 500,
+ text: () => Promise.resolve('Internal Server Error')
+ });
+
+ await expect(testVision({
+ imagePath: '/test/image.png',
+ prompt: 'Describe this image',
+ expectedContent: ['test']
+ })).rejects.toThrow('Vision API error 500');
+ });
+
+ it('should use custom model when specified', async () => {
+ getProviderById.mockResolvedValue(mockProvider);
+ existsSync.mockReturnValue(true);
+ readFile.mockResolvedValue(mockImageBuffer);
+
+ const mockResponse = {
+ choices: [{
+ message: { content: 'Test response' }
+ }]
+ };
+
+ global.fetch.mockResolvedValue({
+ ok: true,
+ json: () => Promise.resolve(mockResponse)
+ });
+
+ const result = await testVision({
+ imagePath: '/test/image.png',
+ prompt: 'Describe',
+ expectedContent: [],
+ model: 'custom-model'
+ });
+
+ expect(result.model).toBe('custom-model');
+
+ // Verify the API was called with custom model
+ const fetchCall = global.fetch.mock.calls[0];
+ const body = JSON.parse(fetchCall[1].body);
+ expect(body.model).toBe('custom-model');
+ });
+
+ it('should handle image not found error', async () => {
+ getProviderById.mockResolvedValue(mockProvider);
+ existsSync.mockReturnValue(false);
+
+ await expect(testVision({
+ imagePath: '/nonexistent/image.png',
+ prompt: 'Describe',
+ expectedContent: []
+ })).rejects.toThrow('Failed to load image');
+ });
+ });
+
+ describe('runVisionTestSuite', () => {
+ it('should return error when no screenshots available', async () => {
+ readdirSync.mockReturnValue([]);
+
+ const result = await runVisionTestSuite('lmstudio');
+
+ expect(result.success).toBe(false);
+ expect(result.error).toContain('No screenshots available');
+ });
+
+ it('should run multiple tests on available screenshots', async () => {
+ readdirSync.mockReturnValue(['test1.png', 'test2.jpg']);
+ getProviderById.mockResolvedValue(mockProvider);
+ existsSync.mockReturnValue(true);
+ readFile.mockResolvedValue(mockImageBuffer);
+
+ const mockResponse = {
+ choices: [{
+ message: { content: 'This is a detailed description of what I see in the image.' }
+ }]
+ };
+
+ global.fetch.mockResolvedValue({
+ ok: true,
+ json: () => Promise.resolve(mockResponse)
+ });
+
+ const result = await runVisionTestSuite('lmstudio');
+
+ expect(result.totalTests).toBe(2);
+ expect(result.results).toHaveLength(2);
+ expect(result.results[0].testName).toBe('basic-description');
+ expect(result.results[1].testName).toBe('ui-identification');
+ });
+ });
+
+ describe('checkVisionHealth', () => {
+ it('should return unavailable when provider not found', async () => {
+ getProviderById.mockResolvedValue(null);
+
+ const result = await checkVisionHealth('nonexistent');
+
+ expect(result.available).toBe(false);
+ expect(result.error).toContain('not found');
+ });
+
+ it('should return unavailable when provider is disabled', async () => {
+ getProviderById.mockResolvedValue({
+ ...mockProvider,
+ enabled: false
+ });
+
+ const result = await checkVisionHealth('lmstudio');
+
+ expect(result.available).toBe(false);
+ expect(result.error).toContain('disabled');
+ });
+
+ it('should return unavailable when provider is not API type', async () => {
+ getProviderById.mockResolvedValue({
+ ...mockProvider,
+ type: 'cli'
+ });
+
+ const result = await checkVisionHealth('lmstudio');
+
+ expect(result.available).toBe(false);
+ expect(result.error).toContain('requires API provider');
+ });
+
+ it('should return available when endpoint is reachable', async () => {
+ getProviderById.mockResolvedValue(mockProvider);
+
+ global.fetch.mockResolvedValue({
+ ok: true,
+ json: () => Promise.resolve({ data: [] })
+ });
+
+ const result = await checkVisionHealth('lmstudio');
+
+ expect(result.available).toBe(true);
+ expect(result.provider).toBe('lmstudio');
+ expect(result.endpoint).toBe(mockProvider.endpoint);
+ });
+
+ it('should return unavailable when endpoint not reachable', async () => {
+ getProviderById.mockResolvedValue(mockProvider);
+
+ global.fetch.mockResolvedValue({
+ ok: false
+ });
+
+ const result = await checkVisionHealth('lmstudio');
+
+ expect(result.available).toBe(false);
+ expect(result.error).toContain('not reachable');
+ });
+ });
+});
diff --git a/server/services/weeklyDigest.js b/server/services/weeklyDigest.js
index e76e076..cb8428a 100644
--- a/server/services/weeklyDigest.js
+++ b/server/services/weeklyDigest.js
@@ -6,11 +6,12 @@
* error patterns, and accomplishments.
*/
-import { readFile, writeFile, mkdir, readdir } from 'fs/promises';
+import { writeFile, mkdir, readdir } from 'fs/promises';
import { existsSync } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import { cosEvents, emitLog, getAgents } from './cos.js';
+import { readJSONFile } from '../lib/fileUtils.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
@@ -68,11 +69,7 @@ function getDigestPath(weekId) {
*/
async function loadDigest(weekId) {
const path = getDigestPath(weekId);
- if (!existsSync(path)) {
- return null;
- }
- const content = await readFile(path, 'utf-8');
- return JSON.parse(content);
+ return readJSONFile(path, null);
}
/**
@@ -121,7 +118,7 @@ export async function generateWeeklyDigest(weekId = null) {
const weekStart = getWeekStart(new Date());
weekStart.setHours(0, 0, 0, 0);
- emitLog('info', `Generating weekly digest for ${targetWeekId}`, { weekId: targetWeekId }, '[WeeklyDigest]');
+ emitLog('info', `Generating weekly digest for ${targetWeekId}`, { weekId: targetWeekId }, '๐ WeeklyDigest');
// Get all agents
const agents = await getAgents();