diff --git a/packages/opencode/src/altimate/native/altimate-core.ts b/packages/opencode/src/altimate/native/altimate-core.ts index d44de33d2..958230b26 100644 --- a/packages/opencode/src/altimate/native/altimate-core.ts +++ b/packages/opencode/src/altimate/native/altimate-core.ts @@ -25,11 +25,15 @@ function toData(obj: unknown): Record { return JSON.parse(JSON.stringify(obj)) as Record } -/** Wrap a handler body into the standard AltimateCoreResult envelope. */ -function ok( - success: boolean, - data: Record, -): AltimateCoreResult { +/** + * Wrap a handler body into the standard AltimateCoreResult envelope. + * + * Contract: ok(true, data) means "the operation completed." Semantic results + * (e.g., SQL is invalid, queries are not equivalent) live in the data fields, + * NOT in the success flag. success=false only when the handler throws (fail()). + * This prevents semantic findings from being misreported as tool crashes. + */ +function ok(success: boolean, data: Record): AltimateCoreResult { return { success, data } } @@ -50,18 +54,14 @@ const IFF_PATTERN = /\bIFF\s*\(([^,()]+),\s*([^,()]+),\s*([^()]+)\)/gi export function preprocessIff(sql: string): string { let current = sql for (let i = 0; i < 10; i++) { - const next = current.replace( - IFF_PATTERN, - "CASE WHEN $1 THEN $2 ELSE $3 END", - ) + const next = current.replace(IFF_PATTERN, "CASE WHEN $1 THEN $2 ELSE $3 END") if (next === current) break current = next } return current } -const QUALIFY_PATTERN = - /\bQUALIFY\b\s+(.+?)(?=\s*(?:LIMIT\s+\d|ORDER\s+BY|;|$))/is +const QUALIFY_PATTERN = /\bQUALIFY\b\s+(.+?)(?=\s*(?:LIMIT\s+\d|ORDER\s+BY|;|$))/is /** * Wrap QUALIFY clause into outer SELECT for targets that lack native support. @@ -85,436 +85,415 @@ const QUALIFY_TARGETS = new Set(["bigquery", "databricks", "spark", "trino"]) /** Register all 34 altimate_core.* native handlers with the Dispatcher. * Exported so tests can re-register after Dispatcher.reset(). */ export function registerAll(): void { - -// 1. altimate_core.validate -register("altimate_core.validate", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = await core.validate(params.sql, schema) - const data = toData(raw) - return ok(data.valid !== false, data) - } catch (e) { - return fail(e) - } -}) - -// 2. altimate_core.lint -register("altimate_core.lint", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.lint(params.sql, schema) - const data = toData(raw) - return ok(data.clean !== false, data) - } catch (e) { - return fail(e) - } -}) - -// 3. altimate_core.safety -register("altimate_core.safety", async (params) => { - try { - const raw = core.scanSql(params.sql) - const data = toData(raw) - return ok(data.safe !== false, data) - } catch (e) { - return fail(e) - } -}) - -// 4. altimate_core.transpile — with IFF/QUALIFY transforms -register("altimate_core.transpile", async (params) => { - try { - const processed = preprocessIff(params.sql) - const raw = core.transpile(processed, params.from_dialect, params.to_dialect) - const data = toData(raw) - - // Post-process QUALIFY for targets that lack native support - const targetLower = params.to_dialect.toLowerCase() - if (QUALIFY_TARGETS.has(targetLower)) { - // Rust returns transpiled_sql as string[] — use first element - const transpiled = Array.isArray(data.transpiled_sql) - ? (data.transpiled_sql as string[])[0] - : (data.transpiled_sql as string) || (data.sql as string) || (data.translated_sql as string) || "" - if (transpiled && transpiled.toUpperCase().includes("QUALIFY")) { - const fixed = postprocessQualify(transpiled) - if (Array.isArray(data.transpiled_sql)) { - ;(data.transpiled_sql as string[])[0] = fixed - } else if ("sql" in data) { - data.sql = fixed - } else { - data.translated_sql = fixed + // 1. altimate_core.validate + register("altimate_core.validate", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = await core.validate(params.sql, schema) + const data = toData(raw) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 2. altimate_core.lint + register("altimate_core.lint", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.lint(params.sql, schema) + const data = toData(raw) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 3. altimate_core.safety + register("altimate_core.safety", async (params) => { + try { + const raw = core.scanSql(params.sql) + const data = toData(raw) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 4. altimate_core.transpile — with IFF/QUALIFY transforms + register("altimate_core.transpile", async (params) => { + try { + const processed = preprocessIff(params.sql) + const raw = core.transpile(processed, params.from_dialect, params.to_dialect) + const data = toData(raw) + + // Post-process QUALIFY for targets that lack native support + const targetLower = params.to_dialect.toLowerCase() + if (QUALIFY_TARGETS.has(targetLower)) { + // Rust returns transpiled_sql as string[] — use first element + const transpiled = Array.isArray(data.transpiled_sql) + ? (data.transpiled_sql as string[])[0] + : (data.transpiled_sql as string) || (data.sql as string) || (data.translated_sql as string) || "" + if (transpiled && transpiled.toUpperCase().includes("QUALIFY")) { + const fixed = postprocessQualify(transpiled) + if (Array.isArray(data.transpiled_sql)) { + ;(data.transpiled_sql as string[])[0] = fixed + } else if ("sql" in data) { + data.sql = fixed + } else { + data.translated_sql = fixed + } } } - } - - return ok(data.success !== false, data) - } catch (e) { - return fail(e) - } -}) - -// 5. altimate_core.explain -register("altimate_core.explain", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = await core.explain(params.sql, schema) - const data = toData(raw) - return ok(data.valid !== false, data) - } catch (e) { - return fail(e) - } -}) - -// 6. altimate_core.check — composite: validate + lint + scan_sql -register("altimate_core.check", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const validation = await core.validate(params.sql, schema) - const lintResult = core.lint(params.sql, schema) - const safety = core.scanSql(params.sql) - const data: Record = { - validation: toData(validation), - lint: toData(lintResult), - safety: toData(safety), - } - return ok(true, data) - } catch (e) { - return fail(e) - } -}) - -// 7. altimate_core.fix -register("altimate_core.fix", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = await core.fix( - params.sql, - schema, - params.max_iterations ?? undefined, - ) - const data = toData(raw) - return ok(data.fixed !== false, data) - } catch (e) { - return fail(e) - } -}) - -// 8. altimate_core.policy -register("altimate_core.policy", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = await core.checkPolicy(params.sql, schema, params.policy_json) - const data = toData(raw) - return ok(data.allowed !== false, data) - } catch (e) { - return fail(e) - } -}) - -// 9. altimate_core.semantics -register("altimate_core.semantics", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = await core.checkSemantics(params.sql, schema) - const data = toData(raw) - return ok(data.valid !== false, data) - } catch (e) { - return fail(e) - } -}) - -// 10. altimate_core.testgen -register("altimate_core.testgen", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.generateTests(params.sql, schema) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 11. altimate_core.equivalence -register("altimate_core.equivalence", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = await core.checkEquivalence(params.sql1, params.sql2, schema) - const data = toData(raw) - return ok(data.equivalent !== false, data) - } catch (e) { - return fail(e) - } -}) - -// 12. altimate_core.migration -register("altimate_core.migration", async (params) => { - try { - // Build schema from old_ddl, analyze new_ddl against it - const schema = core.Schema.fromDdl( - params.old_ddl, - params.dialect || undefined, - ) - const raw = core.analyzeMigration(params.new_ddl, schema) - const data = toData(raw) - return ok(data.safe !== false, data) - } catch (e) { - return fail(e) - } -}) - -// 13. altimate_core.schema_diff -register("altimate_core.schema_diff", async (params) => { - try { - const s1 = schemaOrEmpty(params.schema1_path, params.schema1_context) - const s2 = schemaOrEmpty(params.schema2_path, params.schema2_context) - const raw = core.diffSchemas(s1, s2) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 14. altimate_core.rewrite -register("altimate_core.rewrite", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.rewrite(params.sql, schema) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 15. altimate_core.correct -register("altimate_core.correct", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = await core.correct(params.sql, schema) - const data = toData(raw) - return ok(data.status !== "unfixable", data) - } catch (e) { - return fail(e) - } -}) - -// 16. altimate_core.grade -register("altimate_core.grade", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = await core.evaluate(params.sql, schema) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 17. altimate_core.classify_pii -register("altimate_core.classify_pii", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.classifyPii(schema) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 18. altimate_core.query_pii -register("altimate_core.query_pii", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.checkQueryPii(params.sql, schema) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 19. altimate_core.resolve_term — returns array, must wrap -register("altimate_core.resolve_term", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.resolveTerm(params.term, schema) - // Rust returns an array of matches — wrap for consistent object shape - const matches = Array.isArray(raw) ? JSON.parse(JSON.stringify(raw)) : [] - return ok(matches.length > 0, { matches }) - } catch (e) { - return fail(e) - } -}) - -// 20. altimate_core.column_lineage -register("altimate_core.column_lineage", async (params) => { - try { - const schema = resolveSchema(params.schema_path, params.schema_context) - const raw = core.columnLineage( - params.sql, - params.dialect || undefined, - schema ?? undefined, - ) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 21. altimate_core.track_lineage -register("altimate_core.track_lineage", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.trackLineage(params.queries, schema) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 22. altimate_core.format -register("altimate_core.format", async (params) => { - try { - const raw = core.formatSql(params.sql, params.dialect || undefined) - const data = toData(raw) - return ok(data.success !== false, data) - } catch (e) { - return fail(e) - } -}) - -// 23. altimate_core.metadata -register("altimate_core.metadata", async (params) => { - try { - const raw = core.extractMetadata(params.sql, params.dialect || undefined) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 24. altimate_core.compare -register("altimate_core.compare", async (params) => { - try { - const raw = core.compareQueries( - params.left_sql, - params.right_sql, - params.dialect || undefined, - ) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 25. altimate_core.complete -register("altimate_core.complete", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.complete(params.sql, params.cursor_pos, schema) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 26. altimate_core.optimize_context -register("altimate_core.optimize_context", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.optimizeContext(schema) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 27. altimate_core.optimize_for_query -register("altimate_core.optimize_for_query", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.optimizeForQuery(params.sql, schema) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 28. altimate_core.prune_schema -register("altimate_core.prune_schema", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.pruneSchema(params.sql, schema) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 29. altimate_core.import_ddl — returns Schema, must serialize -register("altimate_core.import_ddl", async (params) => { - try { - const schema = core.importDdl(params.ddl, params.dialect || undefined) - const jsonObj = schema.toJson() - return ok(true, { success: true, schema: toData(jsonObj) }) - } catch (e) { - return fail(e) - } -}) - -// 30. altimate_core.export_ddl — returns string -register("altimate_core.export_ddl", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const ddl = core.exportDdl(schema) - return ok(true, { success: true, ddl }) - } catch (e) { - return fail(e) - } -}) - -// 31. altimate_core.fingerprint — returns string hash -register("altimate_core.fingerprint", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const fingerprint = core.schemaFingerprint(schema) - return ok(true, { success: true, fingerprint }) - } catch (e) { - return fail(e) - } -}) - -// 32. altimate_core.introspection_sql -register("altimate_core.introspection_sql", async (params) => { - try { - const raw = core.introspectionSql( - params.db_type, - params.database, - params.schema_name ?? undefined, - ) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 33. altimate_core.parse_dbt -register("altimate_core.parse_dbt", async (params) => { - try { - const raw = core.parseDbtProject(params.project_dir) - return ok(true, toData(raw)) - } catch (e) { - return fail(e) - } -}) - -// 34. altimate_core.is_safe — returns boolean -register("altimate_core.is_safe", async (params) => { - try { - const safe = core.isSafe(params.sql) - return ok(true, { safe }) - } catch (e) { - return fail(e) - } -}) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 5. altimate_core.explain + register("altimate_core.explain", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = await core.explain(params.sql, schema) + const data = toData(raw) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 6. altimate_core.check — composite: validate + lint + scan_sql + register("altimate_core.check", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const validation = await core.validate(params.sql, schema) + const lintResult = core.lint(params.sql, schema) + const safety = core.scanSql(params.sql) + const data: Record = { + validation: toData(validation), + lint: toData(lintResult), + safety: toData(safety), + } + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 7. altimate_core.fix + register("altimate_core.fix", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = await core.fix(params.sql, schema, params.max_iterations ?? undefined) + const data = toData(raw) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 8. altimate_core.policy + register("altimate_core.policy", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = await core.checkPolicy(params.sql, schema, params.policy_json) + const data = toData(raw) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 9. altimate_core.semantics + register("altimate_core.semantics", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = await core.checkSemantics(params.sql, schema) + const data = toData(raw) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 10. altimate_core.testgen + register("altimate_core.testgen", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.generateTests(params.sql, schema) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 11. altimate_core.equivalence + register("altimate_core.equivalence", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = await core.checkEquivalence(params.sql1, params.sql2, schema) + const data = toData(raw) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 12. altimate_core.migration + register("altimate_core.migration", async (params) => { + try { + // Build schema from old_ddl, analyze new_ddl against it + const schema = core.Schema.fromDdl(params.old_ddl, params.dialect || undefined) + const raw = core.analyzeMigration(params.new_ddl, schema) + const data = toData(raw) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 13. altimate_core.schema_diff + register("altimate_core.schema_diff", async (params) => { + try { + const s1 = schemaOrEmpty(params.schema1_path, params.schema1_context) + const s2 = schemaOrEmpty(params.schema2_path, params.schema2_context) + const raw = core.diffSchemas(s1, s2) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 14. altimate_core.rewrite + register("altimate_core.rewrite", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.rewrite(params.sql, schema) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 15. altimate_core.correct + register("altimate_core.correct", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = await core.correct(params.sql, schema) + const data = toData(raw) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 16. altimate_core.grade + register("altimate_core.grade", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = await core.evaluate(params.sql, schema) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 17. altimate_core.classify_pii + register("altimate_core.classify_pii", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.classifyPii(schema) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 18. altimate_core.query_pii + register("altimate_core.query_pii", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.checkQueryPii(params.sql, schema) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 19. altimate_core.resolve_term — returns array, must wrap + register("altimate_core.resolve_term", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.resolveTerm(params.term, schema) + // Rust returns an array of matches — wrap for consistent object shape + const matches = Array.isArray(raw) ? JSON.parse(JSON.stringify(raw)) : [] + return ok(true, { matches }) + } catch (e) { + return fail(e) + } + }) + + // 20. altimate_core.column_lineage + register("altimate_core.column_lineage", async (params) => { + try { + const schema = resolveSchema(params.schema_path, params.schema_context) + const raw = core.columnLineage(params.sql, params.dialect || undefined, schema ?? undefined) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 21. altimate_core.track_lineage + register("altimate_core.track_lineage", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.trackLineage(params.queries, schema) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 22. altimate_core.format + register("altimate_core.format", async (params) => { + try { + const raw = core.formatSql(params.sql, params.dialect || undefined) + const data = toData(raw) + return ok(true, data) + } catch (e) { + return fail(e) + } + }) + + // 23. altimate_core.metadata + register("altimate_core.metadata", async (params) => { + try { + const raw = core.extractMetadata(params.sql, params.dialect || undefined) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 24. altimate_core.compare + register("altimate_core.compare", async (params) => { + try { + const raw = core.compareQueries(params.left_sql, params.right_sql, params.dialect || undefined) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 25. altimate_core.complete + register("altimate_core.complete", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.complete(params.sql, params.cursor_pos, schema) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 26. altimate_core.optimize_context + register("altimate_core.optimize_context", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.optimizeContext(schema) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 27. altimate_core.optimize_for_query + register("altimate_core.optimize_for_query", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.optimizeForQuery(params.sql, schema) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 28. altimate_core.prune_schema + register("altimate_core.prune_schema", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.pruneSchema(params.sql, schema) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 29. altimate_core.import_ddl — returns Schema, must serialize + register("altimate_core.import_ddl", async (params) => { + try { + const schema = core.importDdl(params.ddl, params.dialect || undefined) + const jsonObj = schema.toJson() + return ok(true, { success: true, schema: toData(jsonObj) }) + } catch (e) { + return fail(e) + } + }) + + // 30. altimate_core.export_ddl — returns string + register("altimate_core.export_ddl", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const ddl = core.exportDdl(schema) + return ok(true, { success: true, ddl }) + } catch (e) { + return fail(e) + } + }) + + // 31. altimate_core.fingerprint — returns string hash + register("altimate_core.fingerprint", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const fingerprint = core.schemaFingerprint(schema) + return ok(true, { success: true, fingerprint }) + } catch (e) { + return fail(e) + } + }) + + // 32. altimate_core.introspection_sql + register("altimate_core.introspection_sql", async (params) => { + try { + const raw = core.introspectionSql(params.db_type, params.database, params.schema_name ?? undefined) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 33. altimate_core.parse_dbt + register("altimate_core.parse_dbt", async (params) => { + try { + const raw = core.parseDbtProject(params.project_dir) + return ok(true, toData(raw)) + } catch (e) { + return fail(e) + } + }) + + // 34. altimate_core.is_safe — returns boolean + register("altimate_core.is_safe", async (params) => { + try { + const safe = core.isSafe(params.sql) + return ok(true, { safe }) + } catch (e) { + return fail(e) + } + }) } // end registerAll // Auto-register on module load diff --git a/packages/opencode/src/altimate/native/sql/register.ts b/packages/opencode/src/altimate/native/sql/register.ts index f992a8048..8a337bdcf 100644 --- a/packages/opencode/src/altimate/native/sql/register.ts +++ b/packages/opencode/src/altimate/native/sql/register.ts @@ -23,420 +23,457 @@ import type { /** Register all composite SQL handlers with the Dispatcher. * Exported so tests can re-register after Dispatcher.reset(). */ export function registerAllSql(): void { + // --------------------------------------------------------------------------- + // sql.analyze — lint + semantics + safety + // --------------------------------------------------------------------------- + register("sql.analyze", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const [lintRaw, semanticsRaw, safetyRaw] = await Promise.all([ + core.lint(params.sql, schema), + core.checkSemantics(params.sql, schema), + core.scanSql(params.sql), + ]) + + const lint = JSON.parse(JSON.stringify(lintRaw)) + const semantics = JSON.parse(JSON.stringify(semanticsRaw)) + const safety = JSON.parse(JSON.stringify(safetyRaw)) + + const issues: SqlAnalyzeIssue[] = [] + + for (const f of lint.findings ?? []) { + issues.push({ + type: "lint", + rule: f.rule, + severity: f.severity ?? "warning", + message: f.message ?? f.rule ?? "", + recommendation: f.suggestion ?? "", + location: f.line ? `line ${f.line}` : undefined, + confidence: "high", + }) + } -// --------------------------------------------------------------------------- -// sql.analyze — lint + semantics + safety -// --------------------------------------------------------------------------- -register("sql.analyze", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const [lintRaw, semanticsRaw, safetyRaw] = await Promise.all([ - core.lint(params.sql, schema), - core.checkSemantics(params.sql, schema), - core.scanSql(params.sql), - ]) - - const lint = JSON.parse(JSON.stringify(lintRaw)) - const semantics = JSON.parse(JSON.stringify(semanticsRaw)) - const safety = JSON.parse(JSON.stringify(safetyRaw)) - - const issues: SqlAnalyzeIssue[] = [] - - for (const f of lint.findings ?? []) { - issues.push({ - type: "lint", - severity: f.severity ?? "warning", - message: f.message ?? f.rule ?? "", - recommendation: f.suggestion ?? "", - location: f.line ? `line ${f.line}` : undefined, + for (const f of semantics.findings ?? []) { + issues.push({ + type: "semantic", + severity: f.severity ?? "warning", + message: f.message ?? "", + recommendation: f.suggestion ?? f.explanation ?? "", + confidence: String(f.confidence ?? "medium"), + }) + } + + for (const t of safety.threats ?? []) { + issues.push({ + type: "safety", + severity: t.severity ?? "high", + message: t.message ?? "", + recommendation: t.detail ?? "", + location: t.location ? `chars ${t.location[0]}-${t.location[1]}` : undefined, + confidence: "high", + }) + } + + return { + success: true, + issues, + issue_count: issues.length, confidence: "high", - }) + confidence_factors: ["lint", "semantics", "safety"], + } satisfies SqlAnalyzeResult + } catch (e) { + return { + success: false, + issues: [], + issue_count: 0, + confidence: "low", + confidence_factors: [], + error: String(e), + } satisfies SqlAnalyzeResult } + }) + + // --------------------------------------------------------------------------- + // sql.translate — transpile with IFF/QUALIFY transforms + // --------------------------------------------------------------------------- + register("sql.translate", async (params) => { + try { + const processed = preprocessIff(params.sql) + const raw = core.transpile(processed, params.source_dialect, params.target_dialect) + const result = JSON.parse(JSON.stringify(raw)) + + let translatedSql = result.transpiled_sql?.[0] ?? "" + const target = params.target_dialect.toLowerCase() + if (["bigquery", "databricks", "spark", "trino"].includes(target)) { + if (translatedSql.toUpperCase().includes("QUALIFY")) { + translatedSql = postprocessQualify(translatedSql) + } + } - for (const f of semantics.findings ?? []) { - issues.push({ - type: "semantic", + return { + success: result.success ?? true, + translated_sql: translatedSql, + source_dialect: params.source_dialect, + target_dialect: params.target_dialect, + warnings: result.error ? [result.error] : [], + } satisfies SqlTranslateResult + } catch (e) { + return { + success: false, + source_dialect: params.source_dialect, + target_dialect: params.target_dialect, + warnings: [], + error: String(e), + } satisfies SqlTranslateResult + } + }) + + // --------------------------------------------------------------------------- + // sql.optimize — rewrite + lint + // --------------------------------------------------------------------------- + register("sql.optimize", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const [rewriteRaw, lintRaw] = await Promise.all([core.rewrite(params.sql, schema), core.lint(params.sql, schema)]) + + const rewrite = JSON.parse(JSON.stringify(rewriteRaw)) + const lint = JSON.parse(JSON.stringify(lintRaw)) + + const suggestions: SqlOptimizeSuggestion[] = (rewrite.suggestions ?? []).map((s: any) => ({ + type: "REWRITE", + description: s.explanation ?? s.rule ?? "", + before: params.sql, + after: s.rewritten_sql, + impact: s.confidence > 0.7 ? "high" : s.confidence > 0.4 ? "medium" : "low", + })) + + const antiPatterns = (lint.findings ?? []).map((f: any) => ({ + type: f.rule ?? "lint", severity: f.severity ?? "warning", message: f.message ?? "", - recommendation: f.suggestion ?? f.explanation ?? "", - confidence: String(f.confidence ?? "medium"), - }) - } - - for (const t of safety.threats ?? []) { - issues.push({ - type: "safety", - severity: t.severity ?? "high", - message: t.message ?? "", - recommendation: t.detail ?? "", - location: t.location ? `chars ${t.location[0]}-${t.location[1]}` : undefined, + recommendation: f.suggestion ?? "", + location: f.line ? `line ${f.line}` : undefined, confidence: "high", - }) + })) + + const bestRewrite = rewrite.suggestions?.[0]?.rewritten_sql + + return { + success: true, + original_sql: params.sql, + optimized_sql: bestRewrite ?? params.sql, + suggestions, + anti_patterns: antiPatterns, + confidence: suggestions.length > 0 ? "high" : "medium", + } satisfies SqlOptimizeResult + } catch (e) { + return { + success: false, + original_sql: params.sql, + suggestions: [], + anti_patterns: [], + confidence: "low", + error: String(e), + } satisfies SqlOptimizeResult } - - return { - success: true, - issues, - issue_count: issues.length, - confidence: "high", - confidence_factors: ["lint", "semantics", "safety"], - } satisfies SqlAnalyzeResult - } catch (e) { - return { - success: false, - issues: [], - issue_count: 0, - confidence: "low", - confidence_factors: [], - error: String(e), - } satisfies SqlAnalyzeResult - } -}) - -// --------------------------------------------------------------------------- -// sql.translate — transpile with IFF/QUALIFY transforms -// --------------------------------------------------------------------------- -register("sql.translate", async (params) => { - try { - const processed = preprocessIff(params.sql) - const raw = core.transpile(processed, params.source_dialect, params.target_dialect) - const result = JSON.parse(JSON.stringify(raw)) - - let translatedSql = result.transpiled_sql?.[0] ?? "" - const target = params.target_dialect.toLowerCase() - if (["bigquery", "databricks", "spark", "trino"].includes(target)) { - if (translatedSql.toUpperCase().includes("QUALIFY")) { - translatedSql = postprocessQualify(translatedSql) + }) + + // --------------------------------------------------------------------------- + // sql.format + // --------------------------------------------------------------------------- + register("sql.format", async (params) => { + try { + const raw = core.formatSql(params.sql, params.dialect) + const result = JSON.parse(JSON.stringify(raw)) + return { + success: result.success ?? true, + formatted_sql: result.formatted_sql ?? params.sql, + dialect: params.dialect ?? "generic", + error: result.error, } + } catch (e) { + return { success: false, formatted_sql: params.sql, dialect: params.dialect ?? "generic", error: String(e) } } - - return { - success: result.success ?? true, - translated_sql: translatedSql, - source_dialect: params.source_dialect, - target_dialect: params.target_dialect, - warnings: result.error ? [result.error] : [], - } satisfies SqlTranslateResult - } catch (e) { - return { - success: false, - source_dialect: params.source_dialect, - target_dialect: params.target_dialect, - warnings: [], - error: String(e), - } satisfies SqlTranslateResult - } -}) - -// --------------------------------------------------------------------------- -// sql.optimize — rewrite + lint -// --------------------------------------------------------------------------- -register("sql.optimize", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const [rewriteRaw, lintRaw] = await Promise.all([ - core.rewrite(params.sql, schema), - core.lint(params.sql, schema), - ]) - - const rewrite = JSON.parse(JSON.stringify(rewriteRaw)) - const lint = JSON.parse(JSON.stringify(lintRaw)) - - const suggestions: SqlOptimizeSuggestion[] = (rewrite.suggestions ?? []).map((s: any) => ({ - type: "REWRITE", - description: s.explanation ?? s.rule ?? "", - before: params.sql, - after: s.rewritten_sql, - impact: s.confidence > 0.7 ? "high" : s.confidence > 0.4 ? "medium" : "low", - })) - - const antiPatterns = (lint.findings ?? []).map((f: any) => ({ - type: f.rule ?? "lint", - severity: f.severity ?? "warning", - message: f.message ?? "", - recommendation: f.suggestion ?? "", - location: f.line ? `line ${f.line}` : undefined, - confidence: "high", - })) - - const bestRewrite = rewrite.suggestions?.[0]?.rewritten_sql - - return { - success: true, - original_sql: params.sql, - optimized_sql: bestRewrite ?? params.sql, - suggestions, - anti_patterns: antiPatterns, - confidence: suggestions.length > 0 ? "high" : "medium", - } satisfies SqlOptimizeResult - } catch (e) { - return { - success: false, - original_sql: params.sql, - suggestions: [], - anti_patterns: [], - confidence: "low", - error: String(e), - } satisfies SqlOptimizeResult - } -}) - -// --------------------------------------------------------------------------- -// sql.format -// --------------------------------------------------------------------------- -register("sql.format", async (params) => { - try { - const raw = core.formatSql(params.sql, params.dialect) - const result = JSON.parse(JSON.stringify(raw)) - return { - success: result.success ?? true, - formatted_sql: result.formatted_sql ?? params.sql, - dialect: params.dialect ?? "generic", - error: result.error, - } - } catch (e) { - return { success: false, formatted_sql: params.sql, dialect: params.dialect ?? "generic", error: String(e) } - } -}) - -// --------------------------------------------------------------------------- -// sql.fix -// --------------------------------------------------------------------------- -register("sql.fix", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = await core.fix(params.sql, schema) - const result = JSON.parse(JSON.stringify(raw)) - - const suggestions = (result.fixes_applied ?? []).map((f: any) => ({ - type: f.type ?? f.rule ?? "fix", - message: f.message ?? f.description ?? "", - confidence: f.confidence ?? "medium", - fixed_sql: f.fixed_sql ?? f.rewritten_sql, - })) - - return { - success: result.fixed ?? true, - original_sql: result.original_sql ?? params.sql, - fixed_sql: result.fixed_sql ?? params.sql, - error_message: params.error_message ?? "", - suggestions, - suggestion_count: suggestions.length, - } - } catch (e) { - return { - success: false, - original_sql: params.sql, - fixed_sql: params.sql, - error_message: params.error_message ?? "", - suggestions: [], - suggestion_count: 0, - error: String(e), + }) + + // --------------------------------------------------------------------------- + // sql.fix + // --------------------------------------------------------------------------- + register("sql.fix", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = await core.fix(params.sql, schema) + const result = JSON.parse(JSON.stringify(raw)) + + const suggestions = (result.fixes_applied ?? []).map((f: any) => ({ + type: f.type ?? f.rule ?? "fix", + message: f.message ?? f.description ?? "", + confidence: f.confidence ?? "medium", + fixed_sql: f.fixed_sql ?? f.rewritten_sql, + })) + + const unfixableMessages = Array.isArray(result.unfixable_errors) + ? result.unfixable_errors + .map((e: any) => e.error?.message ?? e.message ?? e.reason ?? String(e)) + .filter((msg: any) => (typeof msg === "string" ? msg.trim().length > 0 : Boolean(msg))) + : [] + const unfixableError = !result.fixed && unfixableMessages.length > 0 ? unfixableMessages.join("; ") : undefined + + return { + success: result.fixed ?? true, + original_sql: result.original_sql ?? params.sql, + fixed_sql: result.fixed_sql ?? params.sql, + error_message: params.error_message ?? "", + suggestions, + suggestion_count: suggestions.length, + ...(unfixableError && { error: unfixableError }), + } + } catch (e) { + return { + success: false, + original_sql: params.sql, + fixed_sql: params.sql, + error_message: params.error_message ?? "", + suggestions: [], + suggestion_count: 0, + error: String(e), + } } - } -}) - -// --------------------------------------------------------------------------- -// sql.autocomplete — uses altimate-core complete() + schema cache search -// --------------------------------------------------------------------------- -register("sql.autocomplete", async (params) => { - try { - const suggestions: Array<{ - name: string - type: string - detail?: string - fqn?: string - table?: string - warehouse?: string - in_context: boolean - }> = [] - - // Try altimate-core completion if we have a schema context - if (params.table_context?.length) { - try { - const ddl = params.table_context - .map((t: string) => `CREATE TABLE ${t} (id INT);`) - .join("\n") - const schema = core.Schema.fromDdl(ddl) - const raw = core.complete(params.prefix, params.prefix.length, schema) - const result = JSON.parse(JSON.stringify(raw)) - for (const item of result.items ?? []) { - suggestions.push({ - name: item.label, - type: item.kind ?? "keyword", - detail: item.detail, - in_context: true, - }) + }) + + // --------------------------------------------------------------------------- + // sql.autocomplete — uses altimate-core complete() + schema cache search + // --------------------------------------------------------------------------- + register("sql.autocomplete", async (params) => { + try { + const suggestions: Array<{ + name: string + type: string + detail?: string + fqn?: string + table?: string + warehouse?: string + in_context: boolean + }> = [] + + // Try altimate-core completion if we have a schema context + if (params.table_context?.length) { + try { + const ddl = params.table_context.map((t: string) => `CREATE TABLE ${t} (id INT);`).join("\n") + const schema = core.Schema.fromDdl(ddl) + const raw = core.complete(params.prefix, params.prefix.length, schema) + const result = JSON.parse(JSON.stringify(raw)) + for (const item of result.items ?? []) { + suggestions.push({ + name: item.label, + type: item.kind ?? "keyword", + detail: item.detail, + in_context: true, + }) + } + } catch { + // Fallback to simple keyword suggestions below } - } catch { - // Fallback to simple keyword suggestions below } - } - // SQL keyword suggestions as fallback - if (suggestions.length === 0 && params.prefix) { - const prefix = params.prefix.toUpperCase() - const keywords = [ - "SELECT", "FROM", "WHERE", "JOIN", "LEFT JOIN", "RIGHT JOIN", - "INNER JOIN", "GROUP BY", "ORDER BY", "HAVING", "LIMIT", - "INSERT", "UPDATE", "DELETE", "CREATE", "ALTER", "DROP", - "UNION", "UNION ALL", "DISTINCT", "AS", "ON", "AND", "OR", - "NOT", "IN", "BETWEEN", "LIKE", "IS NULL", "IS NOT NULL", - "COUNT", "SUM", "AVG", "MIN", "MAX", "CASE", "WHEN", "THEN", - "ELSE", "END", "EXISTS", "WITH", "OVER", "PARTITION BY", - ] - for (const kw of keywords) { - if (kw.startsWith(prefix)) { - suggestions.push({ name: kw, type: "keyword", in_context: false }) + // SQL keyword suggestions as fallback + if (suggestions.length === 0 && params.prefix) { + const prefix = params.prefix.toUpperCase() + const keywords = [ + "SELECT", + "FROM", + "WHERE", + "JOIN", + "LEFT JOIN", + "RIGHT JOIN", + "INNER JOIN", + "GROUP BY", + "ORDER BY", + "HAVING", + "LIMIT", + "INSERT", + "UPDATE", + "DELETE", + "CREATE", + "ALTER", + "DROP", + "UNION", + "UNION ALL", + "DISTINCT", + "AS", + "ON", + "AND", + "OR", + "NOT", + "IN", + "BETWEEN", + "LIKE", + "IS NULL", + "IS NOT NULL", + "COUNT", + "SUM", + "AVG", + "MIN", + "MAX", + "CASE", + "WHEN", + "THEN", + "ELSE", + "END", + "EXISTS", + "WITH", + "OVER", + "PARTITION BY", + ] + for (const kw of keywords) { + if (kw.startsWith(prefix)) { + suggestions.push({ name: kw, type: "keyword", in_context: false }) + } } } - } - const limit = params.limit ?? 50 - return { - suggestions: suggestions.slice(0, limit), - prefix: params.prefix, - position: params.position ?? "", - suggestion_count: Math.min(suggestions.length, limit), + const limit = params.limit ?? 50 + return { + suggestions: suggestions.slice(0, limit), + prefix: params.prefix, + position: params.position ?? "", + suggestion_count: Math.min(suggestions.length, limit), + } + } catch (e) { + return { + suggestions: [], + prefix: params.prefix ?? "", + position: params.position ?? "", + suggestion_count: 0, + } } - } catch (e) { - return { - suggestions: [], - prefix: params.prefix ?? "", - position: params.position ?? "", - suggestion_count: 0, + }) + + // --------------------------------------------------------------------------- + // sql.diff — text diff + equivalence check + // --------------------------------------------------------------------------- + register("sql.diff", async (params) => { + try { + const schema = params.schema_context ? (resolveSchema(undefined, params.schema_context) ?? undefined) : undefined + + const sqlA = params.original ?? params.sql_a + const sqlB = params.modified ?? params.sql_b + + const compareRaw = schema ? await core.checkEquivalence(sqlA, sqlB, schema) : null + const compare = compareRaw ? JSON.parse(JSON.stringify(compareRaw)) : null + + // Simple line-based diff + const linesA = sqlA.split("\n") + const linesB = sqlB.split("\n") + const diffLines: string[] = [] + const maxLen = Math.max(linesA.length, linesB.length) + for (let i = 0; i < maxLen; i++) { + const a = linesA[i] ?? "" + const b = linesB[i] ?? "" + if (a !== b) { + if (a) diffLines.push(`- ${a}`) + if (b) diffLines.push(`+ ${b}`) + } + } + + return { + success: true, + diff: diffLines.join("\n"), + equivalent: compare?.equivalent ?? false, + equivalence_confidence: compare?.confidence ?? 0, + differences: compare?.differences ?? [], + } + } catch (e) { + return { + success: false, + diff: "", + equivalent: false, + equivalence_confidence: 0, + differences: [], + error: String(e), + } } - } -}) - -// --------------------------------------------------------------------------- -// sql.diff — text diff + equivalence check -// --------------------------------------------------------------------------- -register("sql.diff", async (params) => { - try { - const schema = params.schema_context - ? resolveSchema(undefined, params.schema_context) ?? undefined - : undefined - - const sqlA = params.original ?? params.sql_a - const sqlB = params.modified ?? params.sql_b - - const compareRaw = schema - ? await core.checkEquivalence(sqlA, sqlB, schema) - : null - const compare = compareRaw ? JSON.parse(JSON.stringify(compareRaw)) : null - - // Simple line-based diff - const linesA = sqlA.split("\n") - const linesB = sqlB.split("\n") - const diffLines: string[] = [] - const maxLen = Math.max(linesA.length, linesB.length) - for (let i = 0; i < maxLen; i++) { - const a = linesA[i] ?? "" - const b = linesB[i] ?? "" - if (a !== b) { - if (a) diffLines.push(`- ${a}`) - if (b) diffLines.push(`+ ${b}`) + }) + + // --------------------------------------------------------------------------- + // sql.rewrite + // --------------------------------------------------------------------------- + register("sql.rewrite", async (params) => { + try { + const schema = schemaOrEmpty(params.schema_path, params.schema_context) + const raw = core.rewrite(params.sql, schema) + const result = JSON.parse(JSON.stringify(raw)) + return { + success: true, + original_sql: params.sql, + rewritten_sql: result.suggestions?.[0]?.rewritten_sql ?? null, + rewrites_applied: + result.suggestions?.map((s: any) => ({ + rule: s.rule, + original_fragment: params.sql, + rewritten_fragment: s.rewritten_sql ?? params.sql, + explanation: s.explanation ?? s.improvement ?? "", + can_auto_apply: (s.confidence ?? 0) >= 0.7, + })) ?? [], } + } catch (e) { + return { success: false, original_sql: params.sql, rewritten_sql: null, rewrites_applied: [], error: String(e) } } - - return { - success: true, - diff: diffLines.join("\n"), - equivalent: compare?.equivalent ?? false, - equivalence_confidence: compare?.confidence ?? 0, - differences: compare?.differences ?? [], + }) + + // --------------------------------------------------------------------------- + // sql.schema_diff + // --------------------------------------------------------------------------- + register("sql.schema_diff", async (params) => { + try { + const oldDdl = params.old_sql + const newDdl = params.new_sql + const oldSchema = core.Schema.fromDdl(oldDdl, params.dialect || undefined) + const newSchema = core.Schema.fromDdl(newDdl, params.dialect || undefined) + const raw = core.diffSchemas(oldSchema, newSchema) + const result = JSON.parse(JSON.stringify(raw)) + + const changes = result.changes ?? [] + const hasBreaking = changes.some((c: any) => c.severity === "breaking") + + return { + success: true, + changes, + has_breaking_changes: hasBreaking, + summary: result.summary ?? {}, + error: undefined, + } satisfies SchemaDiffResult + } catch (e) { + return { + success: false, + changes: [], + has_breaking_changes: false, + summary: {}, + error: String(e), + } satisfies SchemaDiffResult } - } catch (e) { - return { success: false, diff: "", equivalent: false, equivalence_confidence: 0, differences: [], error: String(e) } - } -}) - -// --------------------------------------------------------------------------- -// sql.rewrite -// --------------------------------------------------------------------------- -register("sql.rewrite", async (params) => { - try { - const schema = schemaOrEmpty(params.schema_path, params.schema_context) - const raw = core.rewrite(params.sql, schema) - const result = JSON.parse(JSON.stringify(raw)) - return { - success: true, - original_sql: params.sql, - rewritten_sql: result.suggestions?.[0]?.rewritten_sql ?? null, - rewrites_applied: result.suggestions?.map((s: any) => ({ - rule: s.rule, - original_fragment: params.sql, - rewritten_fragment: s.rewritten_sql ?? params.sql, - explanation: s.explanation ?? s.improvement ?? "", - can_auto_apply: (s.confidence ?? 0) >= 0.7, - })) ?? [], + }) + + // --------------------------------------------------------------------------- + // lineage.check + // --------------------------------------------------------------------------- + register("lineage.check", async (params) => { + try { + const schema = params.schema_context ? (resolveSchema(undefined, params.schema_context) ?? undefined) : undefined + const raw = core.columnLineage(params.sql, params.dialect ?? undefined, schema ?? undefined) + const result = JSON.parse(JSON.stringify(raw)) + return { + success: true, + data: result, + } satisfies LineageCheckResult + } catch (e) { + return { + success: false, + data: {}, + error: String(e), + } satisfies LineageCheckResult } - } catch (e) { - return { success: false, original_sql: params.sql, rewritten_sql: null, rewrites_applied: [], error: String(e) } - } -}) - -// --------------------------------------------------------------------------- -// sql.schema_diff -// --------------------------------------------------------------------------- -register("sql.schema_diff", async (params) => { - try { - const oldDdl = params.old_sql - const newDdl = params.new_sql - const oldSchema = core.Schema.fromDdl(oldDdl, params.dialect || undefined) - const newSchema = core.Schema.fromDdl(newDdl, params.dialect || undefined) - const raw = core.diffSchemas(oldSchema, newSchema) - const result = JSON.parse(JSON.stringify(raw)) - - const changes = result.changes ?? [] - const hasBreaking = changes.some((c: any) => c.severity === "breaking") - - return { - success: true, - changes, - has_breaking_changes: hasBreaking, - summary: result.summary ?? {}, - error: undefined, - } satisfies SchemaDiffResult - } catch (e) { - return { - success: false, - changes: [], - has_breaking_changes: false, - summary: {}, - error: String(e), - } satisfies SchemaDiffResult - } -}) - -// --------------------------------------------------------------------------- -// lineage.check -// --------------------------------------------------------------------------- -register("lineage.check", async (params) => { - try { - const schema = params.schema_context - ? resolveSchema(undefined, params.schema_context) ?? undefined - : undefined - const raw = core.columnLineage( - params.sql, - params.dialect ?? undefined, - schema ?? undefined, - ) - const result = JSON.parse(JSON.stringify(raw)) - return { - success: true, - data: result, - } satisfies LineageCheckResult - } catch (e) { - return { - success: false, - data: {}, - error: String(e), - } satisfies LineageCheckResult - } -}) - + }) } // end registerAllSql // Auto-register on module load diff --git a/packages/opencode/src/altimate/native/types.ts b/packages/opencode/src/altimate/native/types.ts index b8c87dddf..8d0f3978f 100644 --- a/packages/opencode/src/altimate/native/types.ts +++ b/packages/opencode/src/altimate/native/types.ts @@ -24,11 +24,13 @@ export interface SqlExecuteResult { export interface SqlAnalyzeParams { sql: string dialect?: string + schema_path?: string schema_context?: Record } export interface SqlAnalyzeIssue { type: string + rule?: string severity: string message: string recommendation: string @@ -385,6 +387,7 @@ export interface SqlFixResult { error_message: string suggestions: SqlFixSuggestion[] suggestion_count: number + error?: string } // --- SQL Autocomplete --- diff --git a/packages/opencode/src/altimate/telemetry/index.ts b/packages/opencode/src/altimate/telemetry/index.ts index 659cf70d8..0767b9210 100644 --- a/packages/opencode/src/altimate/telemetry/index.ts +++ b/packages/opencode/src/altimate/telemetry/index.ts @@ -48,8 +48,8 @@ export namespace Telemetry { // No nested objects: Azure App Insights custom measures must be top-level numbers. tokens_input: number tokens_output: number - tokens_reasoning?: number // only for reasoning models - tokens_cache_read?: number // only when a cached prompt was reused + tokens_reasoning?: number // only for reasoning models + tokens_cache_read?: number // only when a cached prompt was reused tokens_cache_write?: number // only when a new cache entry was written } | { @@ -401,19 +401,27 @@ export namespace Telemetry { session_id: string tool_name: string tool_category: string - error_class: - | "parse_error" - | "connection" - | "timeout" - | "validation" - | "internal" - | "permission" - | "unknown" + error_class: "parse_error" | "connection" | "timeout" | "validation" | "internal" | "permission" | "unknown" error_message: string input_signature: string masked_args?: string duration_ms: number } + // altimate_change start — sql quality telemetry for issue prevention metrics + | { + type: "sql_quality" + timestamp: number + session_id: string + tool_name: string + tool_category: string + finding_count: number + /** JSON-encoded Record — count per issue category */ + by_category: string + has_schema: boolean + dialect?: string + duration_ms: number + } + // altimate_change end const ERROR_PATTERNS: Array<{ class: Telemetry.Event & { type: "core_failure" } extends { error_class: infer C } ? C : never @@ -476,20 +484,40 @@ export namespace Telemetry { // Mirrors altimate-sdk (Rust) SENSITIVE_KEYS — keep in sync. const SENSITIVE_KEYS: string[] = [ - "key", "api_key", "apikey", "apiKey", "token", "access_token", "refresh_token", - "secret", "secret_key", "password", "passwd", "pwd", - "credential", "credentials", "authorization", "auth", - "signature", "sig", "private_key", "connection_string", + "key", + "api_key", + "apikey", + "apiKey", + "token", + "access_token", + "refresh_token", + "secret", + "secret_key", + "password", + "passwd", + "pwd", + "credential", + "credentials", + "authorization", + "auth", + "signature", + "sig", + "private_key", + "connection_string", // camelCase variants not caught by prefix/suffix matching - "authtoken", "accesstoken", "refreshtoken", "bearertoken", "jwttoken", - "jwtsecret", "clientsecret", "appsecret", + "authtoken", + "accesstoken", + "refreshtoken", + "bearertoken", + "jwttoken", + "jwtsecret", + "clientsecret", + "appsecret", ] function isSensitiveKey(key: string): boolean { const lower = key.toLowerCase() - return SENSITIVE_KEYS.some( - (k) => lower === k || lower.endsWith(`_${k}`) || lower.startsWith(`${k}_`), - ) + return SENSITIVE_KEYS.some((k) => lower === k || lower.endsWith(`_${k}`) || lower.startsWith(`${k}_`)) } export function maskString(s: string): string { @@ -674,7 +702,7 @@ export namespace Telemetry { // before Instance.provide()). Treat config failures as "not disabled" — // the env var check above is the early-init escape hatch. try { - const userConfig = await Config.get() as any + const userConfig = (await Config.get()) as any if (userConfig.telemetry?.disabled) { buffer = [] return @@ -789,6 +817,22 @@ export namespace Telemetry { } } + // altimate_change start — sql quality telemetry types + /** Lightweight finding record for quality telemetry. Only category — never SQL content. */ + export interface Finding { + category: string + } + + /** Aggregate an array of findings into category counts suitable for the sql_quality event. */ + export function aggregateFindings(findings: Finding[]): Record { + const by_category: Record = {} + for (const f of findings) { + by_category[f.category] = (by_category[f.category] ?? 0) + 1 + } + return by_category + } + // altimate_change end + export async function shutdown() { // Wait for init to complete so we know whether telemetry is enabled // and have a valid endpoint to flush to. init() is fire-and-forget diff --git a/packages/opencode/src/altimate/tools/altimate-core-check.ts b/packages/opencode/src/altimate/tools/altimate-core-check.ts index 63b36fd58..803861487 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-check.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-check.ts @@ -1,31 +1,58 @@ import z from "zod" import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" +import type { Telemetry } from "../telemetry" export const AltimateCoreCheckTool = Tool.define("altimate_core_check", { description: - "Run full analysis pipeline: validate + lint + safety scan + PII check using the Rust-based altimate-core engine. Single call for comprehensive SQL analysis.", + "Run full analysis pipeline: validate + lint + safety scan + PII check. Single call for comprehensive SQL analysis. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql: z.string().describe("SQL query to analyze"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), schema_context: z.record(z.string(), z.any()).optional().describe("Inline schema definition"), }), async execute(args, ctx) { + const hasSchema = !!(args.schema_path || (args.schema_context && Object.keys(args.schema_context).length > 0)) try { const result = await Dispatcher.call("altimate_core.check", { sql: args.sql, schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error + // altimate_change start — sql quality findings for telemetry + const findings: Telemetry.Finding[] = [] + for (const err of data.validation?.errors ?? []) { + findings.push({ category: "validation_error" }) + } + for (const f of data.lint?.findings ?? []) { + findings.push({ category: f.rule ?? "lint" }) + } + for (const t of data.safety?.threats ?? []) { + findings.push({ category: t.type ?? "safety_threat" }) + } + for (const p of data.pii?.findings ?? []) { + findings.push({ category: "pii_detected" }) + } + // altimate_change end return { title: `Check: ${formatCheckTitle(data)}`, - metadata: { success: result.success }, + metadata: { + success: result.success, + has_schema: hasSchema, + ...(error && { error }), + ...(findings.length > 0 && { findings }), + }, output: formatCheck(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Check: ERROR", metadata: { success: false }, output: `Failed: ${msg}` } + return { + title: "Check: ERROR", + metadata: { success: false, has_schema: hasSchema, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-classify-pii.ts b/packages/opencode/src/altimate/tools/altimate-core-classify-pii.ts index 8a5bbb099..56ba3a130 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-classify-pii.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-classify-pii.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreClassifyPiiTool = Tool.define("altimate_core_classify_pii", { description: - "Classify PII columns in a schema using the Rust-based altimate-core engine. Identifies columns likely containing personal identifiable information by name patterns and data types.", + "Classify PII columns in a schema. Identifies columns likely containing personal identifiable information by name patterns and data types. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), schema_context: z.record(z.string(), z.any()).optional().describe("Inline schema definition"), @@ -15,17 +15,22 @@ export const AltimateCoreClassifyPiiTool = Tool.define("altimate_core_classify_p schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const piiColumns = data.columns ?? data.findings ?? [] const findingCount = piiColumns.length + const error = result.error ?? data.error return { title: `PII Classification: ${findingCount} finding(s)`, - metadata: { success: result.success, finding_count: findingCount }, + metadata: { success: result.success, finding_count: findingCount, ...(error && { error }) }, output: formatClassifyPii(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "PII Classification: ERROR", metadata: { success: false, finding_count: 0 }, output: `Failed: ${msg}` } + return { + title: "PII Classification: ERROR", + metadata: { success: false, finding_count: 0, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-column-lineage.ts b/packages/opencode/src/altimate/tools/altimate-core-column-lineage.ts index d19e412d6..3481d424c 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-column-lineage.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-column-lineage.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreColumnLineageTool = Tool.define("altimate_core_column_lineage", { description: - "Trace schema-aware column lineage using the Rust-based altimate-core engine. Maps how columns flow through a query from source tables to output. Requires altimate_core.init() with API key.", + "Trace schema-aware column lineage. Maps how columns flow through a query from source tables to output. Requires altimate_core.init() with API key. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql: z.string().describe("SQL query to trace lineage for"), dialect: z.string().optional().describe("SQL dialect (e.g. snowflake, bigquery)"), @@ -19,16 +19,21 @@ export const AltimateCoreColumnLineageTool = Tool.define("altimate_core_column_l schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const edgeCount = data.column_lineage?.length ?? 0 + const error = result.error ?? data.error return { title: `Column Lineage: ${edgeCount} edge(s)`, - metadata: { success: result.success, edge_count: edgeCount }, + metadata: { success: result.success, edge_count: edgeCount, ...(error && { error }) }, output: formatColumnLineage(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Column Lineage: ERROR", metadata: { success: false, edge_count: 0 }, output: `Failed: ${msg}` } + return { + title: "Column Lineage: ERROR", + metadata: { success: false, edge_count: 0, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-compare.ts b/packages/opencode/src/altimate/tools/altimate-core-compare.ts index d877eee60..21c4b0bcc 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-compare.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-compare.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreCompareTool = Tool.define("altimate_core_compare", { description: - "Structurally compare two SQL queries using the Rust-based altimate-core engine. Identifies differences in table references, join conditions, filters, projections, and aggregations.", + "Structurally compare two SQL queries. Identifies differences in table references, join conditions, filters, projections, and aggregations.", parameters: z.object({ left_sql: z.string().describe("First SQL query"), right_sql: z.string().describe("Second SQL query"), @@ -17,16 +17,21 @@ export const AltimateCoreCompareTool = Tool.define("altimate_core_compare", { right_sql: args.right_sql, dialect: args.dialect ?? "", }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const diffCount = data.differences?.length ?? 0 + const error = result.error ?? data.error return { title: `Compare: ${diffCount === 0 ? "IDENTICAL" : `${diffCount} difference(s)`}`, - metadata: { success: result.success, difference_count: diffCount }, + metadata: { success: result.success, difference_count: diffCount, ...(error && { error }) }, output: formatCompare(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Compare: ERROR", metadata: { success: false, difference_count: 0 }, output: `Failed: ${msg}` } + return { + title: "Compare: ERROR", + metadata: { success: false, difference_count: 0, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-complete.ts b/packages/opencode/src/altimate/tools/altimate-core-complete.ts index 758414617..a95cedbcc 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-complete.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-complete.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreCompleteTool = Tool.define("altimate_core_complete", { description: - "Get cursor-aware SQL completion suggestions using the Rust-based altimate-core engine. Returns table names, column names, functions, and keywords relevant to the cursor position.", + "Get cursor-aware SQL completion suggestions. Returns table names, column names, functions, and keywords relevant to the cursor position. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql: z.string().describe("Partial SQL query"), cursor_pos: z.number().describe("Cursor position (0-indexed character offset)"), @@ -19,16 +19,21 @@ export const AltimateCoreCompleteTool = Tool.define("altimate_core_complete", { schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const count = data.items?.length ?? data.suggestions?.length ?? 0 + const error = result.error ?? (data as any).error return { title: `Complete: ${count} suggestion(s)`, - metadata: { success: result.success, suggestion_count: count }, + metadata: { success: result.success, suggestion_count: count, ...(error && { error }) }, output: formatComplete(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Complete: ERROR", metadata: { success: false, suggestion_count: 0 }, output: `Failed: ${msg}` } + return { + title: "Complete: ERROR", + metadata: { success: false, suggestion_count: 0, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-correct.ts b/packages/opencode/src/altimate/tools/altimate-core-correct.ts index d2ef172f1..62ece364c 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-correct.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-correct.ts @@ -1,35 +1,66 @@ import z from "zod" import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" +import type { Telemetry } from "../telemetry" export const AltimateCoreCorrectTool = Tool.define("altimate_core_correct", { description: - "Iteratively correct SQL using a propose-verify-refine loop via the Rust-based altimate-core engine. More thorough than fix — applies multiple correction rounds to produce valid SQL.", + "Iteratively correct SQL using a propose-verify-refine loop. More thorough than fix — applies multiple correction rounds to produce valid SQL. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql: z.string().describe("SQL query to correct"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), schema_context: z.record(z.string(), z.any()).optional().describe("Inline schema definition"), }), async execute(args, ctx) { + const hasSchema = !!(args.schema_path || (args.schema_context && Object.keys(args.schema_context).length > 0)) try { const result = await Dispatcher.call("altimate_core.correct", { sql: args.sql, schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error ?? extractCorrectErrors(data) + // altimate_change start — sql quality findings for telemetry + const changes = Array.isArray(data.changes) ? data.changes : [] + const findings: Telemetry.Finding[] = changes.map(() => ({ + category: "correction_applied", + })) + // altimate_change end return { title: `Correct: ${data.success ? "CORRECTED" : "COULD NOT CORRECT"}`, - metadata: { success: result.success, iterations: data.iterations }, + metadata: { + success: result.success, + iterations: data.iterations, + has_schema: hasSchema, + ...(error && { error }), + ...(findings.length > 0 && { findings }), + }, output: formatCorrect(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Correct: ERROR", metadata: { success: false, iterations: 0 }, output: `Failed: ${msg}` } + return { + title: "Correct: ERROR", + metadata: { success: false, iterations: 0, has_schema: hasSchema, error: msg }, + output: `Failed: ${msg}`, + } } }, }) +function extractCorrectErrors(data: Record): string | undefined { + if (data.final_validation?.errors?.length > 0) { + const msgs = data.final_validation.errors.map((e: any) => e.message ?? String(e)).filter(Boolean) + if (msgs.length > 0) return msgs.join("; ") + } + if (Array.isArray(data.errors) && data.errors.length > 0) { + const msgs = data.errors.map((e: any) => e.message ?? String(e)).filter(Boolean) + if (msgs.length > 0) return msgs.join("; ") + } + return undefined +} + function formatCorrect(data: Record): string { if (data.error) return `Error: ${data.error}` const lines: string[] = [] diff --git a/packages/opencode/src/altimate/tools/altimate-core-equivalence.ts b/packages/opencode/src/altimate/tools/altimate-core-equivalence.ts index 4d1589672..11be1d0d7 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-equivalence.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-equivalence.ts @@ -1,10 +1,11 @@ import z from "zod" import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" +import type { Telemetry } from "../telemetry" export const AltimateCoreEquivalenceTool = Tool.define("altimate_core_equivalence", { description: - "Check semantic equivalence of two SQL queries using the Rust-based altimate-core engine. Determines if two queries produce the same result set regardless of syntactic differences.", + "Check semantic equivalence of two SQL queries. Determines if two queries produce the same result set regardless of syntactic differences. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql1: z.string().describe("First SQL query"), sql2: z.string().describe("Second SQL query"), @@ -12,6 +13,16 @@ export const AltimateCoreEquivalenceTool = Tool.define("altimate_core_equivalenc schema_context: z.record(z.string(), z.any()).optional().describe("Inline schema definition"), }), async execute(args, ctx) { + const hasSchema = !!(args.schema_path || (args.schema_context && Object.keys(args.schema_context).length > 0)) + if (!hasSchema) { + const error = + "No schema provided. Provide schema_context or schema_path so table/column references can be resolved." + return { + title: "Equivalence: NO SCHEMA", + metadata: { success: false, equivalent: false, has_schema: false, error }, + output: `Error: ${error}`, + } + } try { const result = await Dispatcher.call("altimate_core.equivalence", { sql1: args.sql1, @@ -19,19 +30,51 @@ export const AltimateCoreEquivalenceTool = Tool.define("altimate_core_equivalenc schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error ?? extractEquivalenceErrors(data) + // "Not equivalent" is a valid analysis result, not a failure. + // Only treat it as failure when there's an actual error. + const isRealFailure = !!error + // altimate_change start — sql quality findings for telemetry + const findings: Telemetry.Finding[] = [] + if (!data.equivalent && data.differences?.length) { + for (const d of data.differences) { + findings.push({ category: "equivalence_difference" }) + } + } + // altimate_change end return { - title: `Equivalence: ${data.equivalent ? "EQUIVALENT" : "DIFFERENT"}`, - metadata: { success: result.success, equivalent: data.equivalent }, + title: isRealFailure ? "Equivalence: ERROR" : `Equivalence: ${data.equivalent ? "EQUIVALENT" : "DIFFERENT"}`, + metadata: { + success: !isRealFailure, + equivalent: data.equivalent, + has_schema: hasSchema, + ...(error && { error }), + ...(findings.length > 0 && { findings }), + }, output: formatEquivalence(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Equivalence: ERROR", metadata: { success: false, equivalent: false }, output: `Failed: ${msg}` } + return { + title: "Equivalence: ERROR", + metadata: { success: false, equivalent: false, has_schema: hasSchema, error: msg }, + output: `Failed: ${msg}`, + } } }, }) +function extractEquivalenceErrors(data: Record): string | undefined { + if (Array.isArray(data.validation_errors) && data.validation_errors.length > 0) { + const msgs = data.validation_errors + .map((e: any) => (typeof e === "string" ? e : (e.message ?? String(e)))) + .filter(Boolean) + return msgs.length > 0 ? msgs.join("; ") : undefined + } + return undefined +} + function formatEquivalence(data: Record): string { if (data.error) return `Error: ${data.error}` const lines: string[] = [] diff --git a/packages/opencode/src/altimate/tools/altimate-core-export-ddl.ts b/packages/opencode/src/altimate/tools/altimate-core-export-ddl.ts index af71567aa..efdc96755 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-export-ddl.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-export-ddl.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreExportDdlTool = Tool.define("altimate_core_export_ddl", { description: - "Export a YAML/JSON schema as CREATE TABLE DDL statements using the Rust-based altimate-core engine.", + "Export a YAML/JSON schema as CREATE TABLE DDL statements. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), schema_context: z.record(z.string(), z.any()).optional().describe("Inline schema definition"), @@ -15,15 +15,16 @@ export const AltimateCoreExportDdlTool = Tool.define("altimate_core_export_ddl", schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error return { title: "Export DDL: done", - metadata: { success: result.success }, + metadata: { success: result.success, ...(error && { error }) }, output: data.ddl ?? JSON.stringify(data, null, 2), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Export DDL: ERROR", metadata: { success: false }, output: `Failed: ${msg}` } + return { title: "Export DDL: ERROR", metadata: { success: false, error: msg }, output: `Failed: ${msg}` } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-extract-metadata.ts b/packages/opencode/src/altimate/tools/altimate-core-extract-metadata.ts index bd842b81c..fb498ef33 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-extract-metadata.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-extract-metadata.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreExtractMetadataTool = Tool.define("altimate_core_extract_metadata", { description: - "Extract metadata from SQL using the Rust-based altimate-core engine. Identifies tables, columns, functions, CTEs, and other structural elements referenced in a query.", + "Extract metadata from SQL. Identifies tables, columns, functions, CTEs, and other structural elements referenced in a query.", parameters: z.object({ sql: z.string().describe("SQL query to extract metadata from"), dialect: z.string().optional().describe("SQL dialect (e.g. snowflake, bigquery, postgres)"), @@ -15,15 +15,16 @@ export const AltimateCoreExtractMetadataTool = Tool.define("altimate_core_extrac sql: args.sql, dialect: args.dialect ?? "", }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error return { title: `Metadata: ${data.tables?.length ?? 0} tables, ${data.columns?.length ?? 0} columns`, - metadata: { success: result.success }, + metadata: { success: result.success, ...(error && { error }) }, output: formatMetadata(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Metadata: ERROR", metadata: { success: false }, output: `Failed: ${msg}` } + return { title: "Metadata: ERROR", metadata: { success: false, error: msg }, output: `Failed: ${msg}` } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-fingerprint.ts b/packages/opencode/src/altimate/tools/altimate-core-fingerprint.ts index d73124459..5555b6307 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-fingerprint.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-fingerprint.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreFingerprintTool = Tool.define("altimate_core_fingerprint", { description: - "Compute a SHA-256 fingerprint of a schema using the Rust-based altimate-core engine. Useful for cache invalidation and change detection.", + "Compute a SHA-256 fingerprint of a schema. Useful for cache invalidation and change detection. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), schema_context: z.record(z.string(), z.any()).optional().describe("Inline schema definition"), @@ -15,15 +15,20 @@ export const AltimateCoreFingerprintTool = Tool.define("altimate_core_fingerprin schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error return { title: `Fingerprint: ${data.fingerprint?.substring(0, 12) ?? "computed"}...`, - metadata: { success: result.success, fingerprint: data.fingerprint }, + metadata: { success: result.success, fingerprint: data.fingerprint, ...(error && { error }) }, output: `SHA-256: ${data.fingerprint ?? "unknown"}`, } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Fingerprint: ERROR", metadata: { success: false, fingerprint: null }, output: `Failed: ${msg}` } + return { + title: "Fingerprint: ERROR", + metadata: { success: false, fingerprint: null, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-fix.ts b/packages/opencode/src/altimate/tools/altimate-core-fix.ts index 4e9dddb20..b01110f39 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-fix.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-fix.ts @@ -1,10 +1,11 @@ import z from "zod" import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" +import type { Telemetry } from "../telemetry" export const AltimateCoreFixTool = Tool.define("altimate_core_fix", { description: - "Auto-fix SQL errors using the Rust-based altimate-core engine. Uses fuzzy matching and iterative re-validation to correct syntax errors, typos, and schema reference issues.", + "Auto-fix SQL errors using fuzzy matching and iterative re-validation. Corrects syntax errors, typos, and schema reference issues. IMPORTANT: Provide schema_context or schema_path — without schema, table/column references cannot be resolved or fixed.", parameters: z.object({ sql: z.string().describe("SQL query to fix"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), @@ -12,6 +13,7 @@ export const AltimateCoreFixTool = Tool.define("altimate_core_fix", { max_iterations: z.number().optional().describe("Maximum fix iterations (default: 5)"), }), async execute(args, ctx) { + const hasSchema = !!(args.schema_path || (args.schema_context && Object.keys(args.schema_context).length > 0)) try { const result = await Dispatcher.call("altimate_core.fix", { sql: args.sql, @@ -19,19 +21,56 @@ export const AltimateCoreFixTool = Tool.define("altimate_core_fix", { schema_context: args.schema_context, max_iterations: args.max_iterations ?? 5, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error ?? extractFixErrors(data) + // post_fix_valid=true with no errors means SQL was already valid (nothing to fix) + const alreadyValid = data.post_fix_valid && !error + const success = result.success || alreadyValid + // altimate_change start — sql quality findings for telemetry + const findings: Telemetry.Finding[] = [] + for (const fix of data.fixes_applied ?? data.changes ?? []) { + findings.push({ category: "fix_applied" }) + } + for (const err of data.unfixable_errors ?? []) { + findings.push({ category: "unfixable_error" }) + } + // altimate_change end return { - title: `Fix: ${data.success ? "FIXED" : "COULD NOT FIX"}`, - metadata: { success: result.success, fixed: !!data.fixed_sql }, + title: `Fix: ${alreadyValid ? "ALREADY VALID" : data.fixed ? "FIXED" : "COULD NOT FIX"}`, + metadata: { + success, + fixed: !!data.fixed_sql, + has_schema: hasSchema, + ...(error && { error }), + ...(findings.length > 0 && { findings }), + }, output: formatFix(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Fix: ERROR", metadata: { success: false, fixed: false }, output: `Failed: ${msg}` } + return { + title: "Fix: ERROR", + metadata: { success: false, fixed: false, has_schema: hasSchema, error: msg }, + output: `Failed: ${msg}`, + } } }, }) +// Safety net: the native handler (register.ts) also extracts unfixable_errors into +// result.error, but we extract here too in case the handler is updated without setting it. +function extractFixErrors(data: Record): string | undefined { + if (Array.isArray(data.unfixable_errors) && data.unfixable_errors.length > 0) { + const msgs = data.unfixable_errors.map((e: any) => e.error?.message ?? e.reason ?? String(e)).filter(Boolean) + if (msgs.length > 0) return msgs.join("; ") + } + if (Array.isArray(data.errors) && data.errors.length > 0) { + const msgs = data.errors.map((e: any) => e.message ?? String(e)).filter(Boolean) + if (msgs.length > 0) return msgs.join("; ") + } + return undefined +} + function formatFix(data: Record): string { if (data.error) return `Error: ${data.error}` const lines: string[] = [] diff --git a/packages/opencode/src/altimate/tools/altimate-core-grade.ts b/packages/opencode/src/altimate/tools/altimate-core-grade.ts index 1a1adf155..f67f0d4d7 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-grade.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-grade.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreGradeTool = Tool.define("altimate_core_grade", { description: - "Grade SQL quality on an A-F scale using the Rust-based altimate-core engine. Evaluates readability, performance, correctness, and best practices to produce an overall quality grade.", + "Grade SQL quality on an A-F scale. Evaluates readability, performance, correctness, and best practices to produce an overall quality grade. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql: z.string().describe("SQL query to grade"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), @@ -17,17 +17,22 @@ export const AltimateCoreGradeTool = Tool.define("altimate_core_grade", { schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const grade = data.overall_grade ?? data.grade const score = data.scores?.overall != null ? Math.round(data.scores.overall * 100) : data.score + const error = result.error ?? data.error return { title: `Grade: ${grade ?? "?"}`, - metadata: { success: result.success, grade, score }, + metadata: { success: result.success, grade, score, ...(error && { error }) }, output: formatGrade(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Grade: ERROR", metadata: { success: false, grade: null, score: null }, output: `Failed: ${msg}` } + return { + title: "Grade: ERROR", + metadata: { success: false, grade: null, score: null, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-import-ddl.ts b/packages/opencode/src/altimate/tools/altimate-core-import-ddl.ts index b4436a4bd..2aa89379d 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-import-ddl.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-import-ddl.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreImportDdlTool = Tool.define("altimate_core_import_ddl", { description: - "Convert CREATE TABLE DDL into YAML schema definition using the Rust-based altimate-core engine. Parses DDL statements and produces a structured schema that other altimate-core tools can consume.", + "Convert CREATE TABLE DDL into YAML schema definition. Parses DDL statements and produces a structured schema that other altimate-core tools can consume.", parameters: z.object({ ddl: z.string().describe("CREATE TABLE DDL statements to parse"), dialect: z.string().optional().describe("SQL dialect of the DDL"), @@ -15,15 +15,16 @@ export const AltimateCoreImportDdlTool = Tool.define("altimate_core_import_ddl", ddl: args.ddl, dialect: args.dialect ?? "", }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error return { title: "Import DDL: done", - metadata: { success: result.success }, + metadata: { success: result.success, ...(error && { error }) }, output: formatImportDdl(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Import DDL: ERROR", metadata: { success: false }, output: `Failed: ${msg}` } + return { title: "Import DDL: ERROR", metadata: { success: false, error: msg }, output: `Failed: ${msg}` } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-introspection-sql.ts b/packages/opencode/src/altimate/tools/altimate-core-introspection-sql.ts index bf2959de1..1bb591a05 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-introspection-sql.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-introspection-sql.ts @@ -17,15 +17,20 @@ export const AltimateCoreIntrospectionSqlTool = Tool.define("altimate_core_intro database: args.database, schema_name: args.schema_name, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error return { title: `Introspection SQL: ${args.db_type}`, - metadata: { success: result.success, db_type: args.db_type }, + metadata: { success: result.success, db_type: args.db_type, ...(error && { error }) }, output: formatIntrospectionSql(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Introspection SQL: ERROR", metadata: { success: false, db_type: args.db_type }, output: `Failed: ${msg}` } + return { + title: "Introspection SQL: ERROR", + metadata: { success: false, db_type: args.db_type, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-migration.ts b/packages/opencode/src/altimate/tools/altimate-core-migration.ts index 0fae1f80d..694cd300c 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-migration.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-migration.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreMigrationTool = Tool.define("altimate_core_migration", { description: - "Analyze DDL migration safety using the Rust-based altimate-core engine. Detects potential data loss, type narrowing, missing defaults, and other risks in schema migration statements.", + "Analyze DDL migration safety. Detects potential data loss, type narrowing, missing defaults, and other risks in schema migration statements.", parameters: z.object({ old_ddl: z.string().describe("Original DDL (before migration)"), new_ddl: z.string().describe("New DDL (after migration)"), @@ -17,16 +17,21 @@ export const AltimateCoreMigrationTool = Tool.define("altimate_core_migration", new_ddl: args.new_ddl, dialect: args.dialect ?? "", }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const riskCount = data.risks?.length ?? 0 + const error = result.error ?? data.error return { title: `Migration: ${riskCount === 0 ? "SAFE" : `${riskCount} risk(s)`}`, - metadata: { success: result.success, risk_count: riskCount }, + metadata: { success: result.success, risk_count: riskCount, ...(error && { error }) }, output: formatMigration(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Migration: ERROR", metadata: { success: false, risk_count: 0 }, output: `Failed: ${msg}` } + return { + title: "Migration: ERROR", + metadata: { success: false, risk_count: 0, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-optimize-context.ts b/packages/opencode/src/altimate/tools/altimate-core-optimize-context.ts index f9b348396..1907889ec 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-optimize-context.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-optimize-context.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreOptimizeContextTool = Tool.define("altimate_core_optimize_context", { description: - "Optimize schema for LLM context window using the Rust-based altimate-core engine. Applies 5-level progressive disclosure to reduce schema size while preserving essential information.", + "Optimize schema for LLM context window. Applies 5-level progressive disclosure to reduce schema size while preserving essential information. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), schema_context: z.record(z.string(), z.any()).optional().describe("Inline schema definition"), @@ -15,15 +15,16 @@ export const AltimateCoreOptimizeContextTool = Tool.define("altimate_core_optimi schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error return { title: `Optimize Context: ${data.levels?.length ?? 0} level(s)`, - metadata: { success: result.success }, + metadata: { success: result.success, ...(error && { error }) }, output: formatOptimizeContext(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Optimize Context: ERROR", metadata: { success: false }, output: `Failed: ${msg}` } + return { title: "Optimize Context: ERROR", metadata: { success: false, error: msg }, output: `Failed: ${msg}` } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-parse-dbt.ts b/packages/opencode/src/altimate/tools/altimate-core-parse-dbt.ts index eff8a7ed5..24e48533b 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-parse-dbt.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-parse-dbt.ts @@ -3,8 +3,7 @@ import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" export const AltimateCoreParseDbtTool = Tool.define("altimate_core_parse_dbt", { - description: - "Parse a dbt project directory using the Rust-based altimate-core engine. Extracts models, sources, tests, and project structure for analysis.", + description: "Parse a dbt project directory. Extracts models, sources, tests, and project structure for analysis.", parameters: z.object({ project_dir: z.string().describe("Path to the dbt project directory"), }), @@ -13,15 +12,16 @@ export const AltimateCoreParseDbtTool = Tool.define("altimate_core_parse_dbt", { const result = await Dispatcher.call("altimate_core.parse_dbt", { project_dir: args.project_dir, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error return { title: `Parse dbt: ${data.models?.length ?? 0} models`, - metadata: { success: result.success }, + metadata: { success: result.success, ...(error && { error }) }, output: formatParseDbt(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Parse dbt: ERROR", metadata: { success: false }, output: `Failed: ${msg}` } + return { title: "Parse dbt: ERROR", metadata: { success: false, error: msg }, output: `Failed: ${msg}` } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-policy.ts b/packages/opencode/src/altimate/tools/altimate-core-policy.ts index b1e2abbdf..e55c02d29 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-policy.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-policy.ts @@ -1,10 +1,11 @@ import z from "zod" import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" +import type { Telemetry } from "../telemetry" export const AltimateCorePolicyTool = Tool.define("altimate_core_policy", { description: - "Check SQL against YAML-based governance policy guardrails using the Rust-based altimate-core engine. Validates compliance with custom rules like allowed tables, forbidden operations, and data access restrictions.", + "Check SQL against YAML-based governance policy guardrails. Validates compliance with custom rules like allowed tables, forbidden operations, and data access restrictions. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql: z.string().describe("SQL query to check against policy"), policy_json: z.string().describe("JSON string defining the policy rules"), @@ -12,6 +13,7 @@ export const AltimateCorePolicyTool = Tool.define("altimate_core_policy", { schema_context: z.record(z.string(), z.any()).optional().describe("Inline schema definition"), }), async execute(args, ctx) { + const hasSchema = !!(args.schema_path || (args.schema_context && Object.keys(args.schema_context).length > 0)) try { const result = await Dispatcher.call("altimate_core.policy", { sql: args.sql, @@ -19,15 +21,32 @@ export const AltimateCorePolicyTool = Tool.define("altimate_core_policy", { schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error + // altimate_change start — sql quality findings for telemetry + const violations = Array.isArray(data.violations) ? data.violations : [] + const findings: Telemetry.Finding[] = violations.map((v: any) => ({ + category: v.rule ?? "policy_violation", + })) + // altimate_change end return { title: `Policy: ${data.pass ? "PASS" : "VIOLATIONS FOUND"}`, - metadata: { success: result.success, pass: data.pass }, + metadata: { + success: true, // engine ran — violations are findings, not failures + pass: data.pass, + has_schema: hasSchema, + ...(error && { error }), + ...(findings.length > 0 && { findings }), + }, output: formatPolicy(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Policy: ERROR", metadata: { success: false, pass: false }, output: `Failed: ${msg}` } + return { + title: "Policy: ERROR", + metadata: { success: false, pass: false, has_schema: hasSchema, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-prune-schema.ts b/packages/opencode/src/altimate/tools/altimate-core-prune-schema.ts index f6d7ffa91..8fb69ba67 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-prune-schema.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-prune-schema.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCorePruneSchemaTool = Tool.define("altimate_core_prune_schema", { description: - "Filter schema to only tables and columns referenced by a SQL query using the Rust-based altimate-core engine. Progressive schema disclosure for minimal context.", + "Filter schema to only tables and columns referenced by a SQL query. Progressive schema disclosure for minimal context. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql: z.string().describe("SQL query to determine relevant schema for"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), @@ -17,15 +17,16 @@ export const AltimateCorePruneSchemaTool = Tool.define("altimate_core_prune_sche schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error return { title: "Prune Schema: done", - metadata: { success: result.success }, + metadata: { success: result.success, ...(error && { error }) }, output: formatPruneSchema(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Prune Schema: ERROR", metadata: { success: false }, output: `Failed: ${msg}` } + return { title: "Prune Schema: ERROR", metadata: { success: false, error: msg }, output: `Failed: ${msg}` } } }, }) @@ -34,7 +35,9 @@ function formatPruneSchema(data: Record): string { if (data.error) return `Error: ${data.error}` const lines: string[] = [] if (data.tables_pruned != null) { - lines.push(`Pruned ${data.tables_pruned} of ${data.total_tables} tables to ${data.relevant_tables?.length ?? "?"} relevant.`) + lines.push( + `Pruned ${data.tables_pruned} of ${data.total_tables} tables to ${data.relevant_tables?.length ?? "?"} relevant.`, + ) } if (data.relevant_tables?.length) { lines.push(`Relevant tables: ${data.relevant_tables.join(", ")}`) diff --git a/packages/opencode/src/altimate/tools/altimate-core-query-pii.ts b/packages/opencode/src/altimate/tools/altimate-core-query-pii.ts index 30a839b9b..463b9ba44 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-query-pii.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-query-pii.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreQueryPiiTool = Tool.define("altimate_core_query_pii", { description: - "Analyze query-level PII exposure using the Rust-based altimate-core engine. Checks if a SQL query accesses columns classified as PII and reports the exposure risk.", + "Analyze query-level PII exposure. Checks if a SQL query accesses columns classified as PII and reports the exposure risk. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql: z.string().describe("SQL query to check for PII access"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), @@ -17,17 +17,22 @@ export const AltimateCoreQueryPiiTool = Tool.define("altimate_core_query_pii", { schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const piiCols = data.pii_columns ?? data.exposures ?? [] const exposureCount = piiCols.length + const error = result.error ?? data.error return { title: `Query PII: ${exposureCount === 0 ? "CLEAN" : `${exposureCount} exposure(s)`}`, - metadata: { success: result.success, exposure_count: exposureCount }, + metadata: { success: result.success, exposure_count: exposureCount, ...(error && { error }) }, output: formatQueryPii(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Query PII: ERROR", metadata: { success: false, exposure_count: 0 }, output: `Failed: ${msg}` } + return { + title: "Query PII: ERROR", + metadata: { success: false, exposure_count: 0, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-resolve-term.ts b/packages/opencode/src/altimate/tools/altimate-core-resolve-term.ts index 1f215f88f..c711d110a 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-resolve-term.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-resolve-term.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreResolveTermTool = Tool.define("altimate_core_resolve_term", { description: - "Resolve a business glossary term to schema elements using fuzzy matching via the Rust-based altimate-core engine. Maps human-readable terms like 'revenue' or 'customer' to actual table/column names.", + "Resolve a business glossary term to schema elements using fuzzy matching. Maps human-readable terms like 'revenue' or 'customer' to actual table/column names. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ term: z.string().describe("Business term to resolve (e.g. 'revenue', 'customer email')"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), @@ -17,16 +17,21 @@ export const AltimateCoreResolveTermTool = Tool.define("altimate_core_resolve_te schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const matchCount = data.matches?.length ?? 0 + const error = result.error ?? data.error return { title: `Resolve: ${matchCount} match(es) for "${args.term}"`, - metadata: { success: result.success, match_count: matchCount }, + metadata: { success: result.success, match_count: matchCount, ...(error && { error }) }, output: formatResolveTerm(data, args.term), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Resolve: ERROR", metadata: { success: false, match_count: 0 }, output: `Failed: ${msg}` } + return { + title: "Resolve: ERROR", + metadata: { success: false, match_count: 0, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-rewrite.ts b/packages/opencode/src/altimate/tools/altimate-core-rewrite.ts index 91626cb4f..02dae25e8 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-rewrite.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-rewrite.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreRewriteTool = Tool.define("altimate_core_rewrite", { description: - "Suggest query optimization rewrites using the Rust-based altimate-core engine. Analyzes SQL and proposes concrete rewrites for better performance.", + "Suggest query optimization rewrites. Analyzes SQL and proposes concrete rewrites for better performance. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql: z.string().describe("SQL query to optimize"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), @@ -17,17 +17,22 @@ export const AltimateCoreRewriteTool = Tool.define("altimate_core_rewrite", { schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const suggestions = data.suggestions ?? data.rewrites ?? [] const rewriteCount = suggestions.length || (data.rewritten_sql && data.rewritten_sql !== args.sql ? 1 : 0) + const error = result.error ?? data.error return { title: `Rewrite: ${rewriteCount} suggestion(s)`, - metadata: { success: result.success, rewrite_count: rewriteCount }, + metadata: { success: result.success, rewrite_count: rewriteCount, ...(error && { error }) }, output: formatRewrite(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Rewrite: ERROR", metadata: { success: false, rewrite_count: 0 }, output: `Failed: ${msg}` } + return { + title: "Rewrite: ERROR", + metadata: { success: false, rewrite_count: 0, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-schema-diff.ts b/packages/opencode/src/altimate/tools/altimate-core-schema-diff.ts index bda0a0737..98234e635 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-schema-diff.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-schema-diff.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreSchemaDiffTool = Tool.define("altimate_core_schema_diff", { description: - "Diff two schemas and detect breaking changes using the Rust-based altimate-core engine. Compares old vs new schema files and identifies added, removed, and modified tables/columns.", + "Diff two schemas and detect breaking changes. Compares old vs new schema files and identifies added, removed, and modified tables/columns.", parameters: z.object({ schema1_path: z.string().optional().describe("Path to the old/baseline schema file"), schema2_path: z.string().optional().describe("Path to the new/changed schema file"), @@ -19,17 +19,27 @@ export const AltimateCoreSchemaDiffTool = Tool.define("altimate_core_schema_diff schema1_context: args.schema1_context, schema2_context: args.schema2_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const changeCount = data.changes?.length ?? 0 const hasBreaking = data.has_breaking_changes ?? data.has_breaking ?? false + const error = result.error ?? data.error return { title: `Schema Diff: ${changeCount} change(s)${hasBreaking ? " (BREAKING)" : ""}`, - metadata: { success: result.success, change_count: changeCount, has_breaking: hasBreaking }, + metadata: { + success: result.success, + change_count: changeCount, + has_breaking: hasBreaking, + ...(error && { error }), + }, output: formatSchemaDiff(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Schema Diff: ERROR", metadata: { success: false, change_count: 0, has_breaking: false }, output: `Failed: ${msg}` } + return { + title: "Schema Diff: ERROR", + metadata: { success: false, change_count: 0, has_breaking: false, error: msg }, + output: `Failed: ${msg}`, + } } }, }) @@ -44,8 +54,8 @@ function formatSchemaDiff(data: Record): string { // Rust SchemaChange uses tagged enum: { type: "column_added", table: "...", ... } const breakingTypes = new Set(["table_removed", "column_removed", "column_type_changed"]) for (const c of data.changes) { - const isBreaking = breakingTypes.has(c.type) || - (c.type === "nullability_changed" && c.old_nullable && !c.new_nullable) + const isBreaking = + breakingTypes.has(c.type) || (c.type === "nullability_changed" && c.old_nullable && !c.new_nullable) const marker = isBreaking ? "BREAKING" : "info" const desc = formatChange(c) lines.push(` [${marker}] ${desc}`) @@ -57,12 +67,19 @@ function formatSchemaDiff(data: Record): string { function formatChange(c: Record): string { switch (c.type) { - case "table_added": return `Table '${c.table}' added` - case "table_removed": return `Table '${c.table}' removed` - case "column_added": return `Column '${c.table}.${c.column}' added (${c.data_type})` - case "column_removed": return `Column '${c.table}.${c.column}' removed` - case "column_type_changed": return `Column '${c.table}.${c.column}' type changed: ${c.old_type} → ${c.new_type}` - case "nullability_changed": return `Column '${c.table}.${c.column}' nullability: ${c.old_nullable ? "nullable" : "not null"} → ${c.new_nullable ? "nullable" : "not null"}` - default: return `${c.type}: ${c.description ?? c.message ?? JSON.stringify(c)}` + case "table_added": + return `Table '${c.table}' added` + case "table_removed": + return `Table '${c.table}' removed` + case "column_added": + return `Column '${c.table}.${c.column}' added (${c.data_type})` + case "column_removed": + return `Column '${c.table}.${c.column}' removed` + case "column_type_changed": + return `Column '${c.table}.${c.column}' type changed: ${c.old_type} → ${c.new_type}` + case "nullability_changed": + return `Column '${c.table}.${c.column}' nullability: ${c.old_nullable ? "nullable" : "not null"} → ${c.new_nullable ? "nullable" : "not null"}` + default: + return `${c.type}: ${c.description ?? c.message ?? JSON.stringify(c)}` } } diff --git a/packages/opencode/src/altimate/tools/altimate-core-semantics.ts b/packages/opencode/src/altimate/tools/altimate-core-semantics.ts index 74ccfe0d4..8fe2dec0c 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-semantics.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-semantics.ts @@ -1,36 +1,76 @@ import z from "zod" import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" +import type { Telemetry } from "../telemetry" export const AltimateCoreSemanticsTool = Tool.define("altimate_core_semantics", { description: - "Run semantic validation rules against SQL using the Rust-based altimate-core engine. Detects logical issues like cartesian products, wrong JOIN conditions, NULL misuse, and type mismatches that syntax checking alone misses.", + "Run semantic validation rules against SQL. Detects logical issues like cartesian products, wrong JOIN conditions, NULL misuse, and type mismatches that syntax checking alone misses. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql: z.string().describe("SQL query to validate semantically"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), schema_context: z.record(z.string(), z.any()).optional().describe("Inline schema definition"), }), async execute(args, ctx) { + const hasSchema = !!(args.schema_path || (args.schema_context && Object.keys(args.schema_context).length > 0)) + if (!hasSchema) { + const error = + "No schema provided. Provide schema_context or schema_path so table/column references can be resolved." + return { + title: "Semantics: NO SCHEMA", + metadata: { success: false, valid: false, issue_count: 0, has_schema: false, error }, + output: `Error: ${error}`, + } + } try { const result = await Dispatcher.call("altimate_core.semantics", { sql: args.sql, schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const issueCount = data.issues?.length ?? 0 + const error = result.error ?? data.error ?? extractSemanticsErrors(data) + const hasError = Boolean(error) + // altimate_change start — sql quality findings for telemetry + const issues = Array.isArray(data.issues) ? data.issues : [] + const findings: Telemetry.Finding[] = issues.map(() => ({ + category: "semantic_issue", + })) + // altimate_change end return { - title: `Semantics: ${data.valid ? "VALID" : `${issueCount} issues`}`, - metadata: { success: result.success, valid: data.valid, issue_count: issueCount }, - output: formatSemantics(data), + title: hasError ? "Semantics: ERROR" : `Semantics: ${data.valid ? "VALID" : `${issueCount} issues`}`, + metadata: { + success: true, // engine ran — semantic issues are findings, not failures + valid: data.valid, + issue_count: issueCount, + has_schema: hasSchema, + ...(error && { error }), + ...(findings.length > 0 && { findings }), + }, + output: formatSemantics(hasError ? { ...data, error } : data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Semantics: ERROR", metadata: { success: false, valid: false, issue_count: 0 }, output: `Failed: ${msg}` } + return { + title: "Semantics: ERROR", + metadata: { success: false, valid: false, issue_count: 0, has_schema: hasSchema, error: msg }, + output: `Failed: ${msg}`, + } } }, }) +function extractSemanticsErrors(data: Record): string | undefined { + if (Array.isArray(data.validation_errors) && data.validation_errors.length > 0) { + const msgs = data.validation_errors + .map((e: any) => (typeof e === "string" ? e : (e.message ?? String(e)))) + .filter(Boolean) + return msgs.length > 0 ? msgs.join("; ") : undefined + } + return undefined +} + function formatSemantics(data: Record): string { if (data.error) return `Error: ${data.error}` if (data.valid) return "No semantic issues found." diff --git a/packages/opencode/src/altimate/tools/altimate-core-testgen.ts b/packages/opencode/src/altimate/tools/altimate-core-testgen.ts index 462edab9a..5a85eb2f7 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-testgen.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-testgen.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreTestgenTool = Tool.define("altimate_core_testgen", { description: - "Generate automated SQL test cases using the Rust-based altimate-core engine. Produces boundary value tests, NULL handling tests, edge cases, and expected result assertions for a given SQL query.", + "Generate automated SQL test cases. Produces boundary value tests, NULL handling tests, edge cases, and expected result assertions for a given SQL query. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ sql: z.string().describe("SQL query to generate tests for"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), @@ -17,17 +17,22 @@ export const AltimateCoreTestgenTool = Tool.define("altimate_core_testgen", { schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const tests = data.tests ?? data.test_cases ?? data.generated_tests ?? [] const testCount = tests.length + const error = result.error ?? data.error return { title: `TestGen: ${testCount} test(s) generated`, - metadata: { success: result.success, test_count: testCount }, + metadata: { success: result.success, test_count: testCount, ...(error && { error }) }, output: formatTestgen(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "TestGen: ERROR", metadata: { success: false, test_count: 0 }, output: `Failed: ${msg}` } + return { + title: "TestGen: ERROR", + metadata: { success: false, test_count: 0, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-track-lineage.ts b/packages/opencode/src/altimate/tools/altimate-core-track-lineage.ts index 9e961b0b9..13b6dda9c 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-track-lineage.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-track-lineage.ts @@ -4,7 +4,7 @@ import { Dispatcher } from "../native" export const AltimateCoreTrackLineageTool = Tool.define("altimate_core_track_lineage", { description: - "Track lineage across multiple SQL queries using the Rust-based altimate-core engine. Builds a combined lineage graph from a sequence of queries. Requires altimate_core.init() with API key.", + "Track lineage across multiple SQL queries. Builds a combined lineage graph from a sequence of queries. Requires altimate_core.init() with API key. Provide schema_context or schema_path for accurate table/column resolution.", parameters: z.object({ queries: z.array(z.string()).describe("List of SQL queries to track lineage across"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), @@ -17,16 +17,21 @@ export const AltimateCoreTrackLineageTool = Tool.define("altimate_core_track_lin schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record const edgeCount = data.edges?.length ?? 0 + const error = result.error ?? data.error return { title: `Track Lineage: ${edgeCount} edge(s) across ${args.queries.length} queries`, - metadata: { success: result.success, edge_count: edgeCount }, + metadata: { success: result.success, edge_count: edgeCount, ...(error && { error }) }, output: formatTrackLineage(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Track Lineage: ERROR", metadata: { success: false, edge_count: 0 }, output: `Failed: ${msg}` } + return { + title: "Track Lineage: ERROR", + metadata: { success: false, edge_count: 0, error: msg }, + output: `Failed: ${msg}`, + } } }, }) diff --git a/packages/opencode/src/altimate/tools/altimate-core-validate.ts b/packages/opencode/src/altimate/tools/altimate-core-validate.ts index d35836d13..384b5faed 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-validate.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-validate.ts @@ -1,35 +1,82 @@ import z from "zod" import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" +import type { Telemetry } from "../telemetry" export const AltimateCoreValidateTool = Tool.define("altimate_core_validate", { description: - "Validate SQL syntax and schema references using the Rust-based altimate-core engine. Checks if tables/columns exist in the schema and if SQL is valid for the target dialect.", + "Validate SQL syntax and schema references. Checks if tables/columns exist in the schema and if SQL is valid for the target dialect. IMPORTANT: Provide schema_context or schema_path — without schema, all table/column references will report as 'not found'.", parameters: z.object({ sql: z.string().describe("SQL query to validate"), schema_path: z.string().optional().describe("Path to YAML/JSON schema file"), schema_context: z.record(z.string(), z.any()).optional().describe("Inline schema definition"), }), async execute(args, ctx) { + const hasSchema = !!(args.schema_path || (args.schema_context && Object.keys(args.schema_context).length > 0)) + const noSchema = !hasSchema + if (noSchema) { + const error = + "No schema provided. Provide schema_context or schema_path so table/column references can be resolved." + return { + title: "Validate: NO SCHEMA", + metadata: { success: false, valid: false, has_schema: false, error }, + output: `Error: ${error}`, + } + } try { const result = await Dispatcher.call("altimate_core.validate", { sql: args.sql, schema_path: args.schema_path ?? "", schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record + const error = result.error ?? data.error ?? extractValidationErrors(data) + // altimate_change start — sql quality findings for telemetry + const errors = Array.isArray(data.errors) ? data.errors : [] + const findings: Telemetry.Finding[] = errors.map((err: any) => ({ + category: classifyValidationError(err.message ?? ""), + })) + // altimate_change end return { title: `Validate: ${data.valid ? "VALID" : "INVALID"}`, - metadata: { success: result.success, valid: data.valid }, + metadata: { + success: true, // engine ran — validation errors are findings, not failures + valid: data.valid, + has_schema: hasSchema, + ...(error && { error }), + ...(findings.length > 0 && { findings }), + }, output: formatValidate(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) - return { title: "Validate: ERROR", metadata: { success: false, valid: false }, output: `Failed: ${msg}` } + return { + title: "Validate: ERROR", + metadata: { success: false, valid: false, has_schema: hasSchema, error: msg }, + output: `Failed: ${msg}`, + } } }, }) +function extractValidationErrors(data: Record): string | undefined { + if (Array.isArray(data.errors) && data.errors.length > 0) { + const msgs = data.errors.map((e: any) => e.message ?? String(e)).filter(Boolean) + return msgs.length > 0 ? msgs.join("; ") : undefined + } + return undefined +} + +function classifyValidationError(message: string): string { + const lower = message.toLowerCase() + // Column check before table — "column not found in table" would match both + if (lower.includes("column") && lower.includes("not found")) return "missing_column" + if (lower.includes("table") && lower.includes("not found")) return "missing_table" + if (lower.includes("syntax")) return "syntax_error" + if (lower.includes("type")) return "type_mismatch" + return "validation_error" +} + function formatValidate(data: Record): string { if (data.error) return `Error: ${data.error}` if (data.valid) return "SQL is valid." diff --git a/packages/opencode/src/altimate/tools/dbt-lineage.ts b/packages/opencode/src/altimate/tools/dbt-lineage.ts index 4bcf5ab8d..0aeb93568 100644 --- a/packages/opencode/src/altimate/tools/dbt-lineage.ts +++ b/packages/opencode/src/altimate/tools/dbt-lineage.ts @@ -33,7 +33,7 @@ export const DbtLineageTool = Tool.define("dbt_lineage", { const msg = e instanceof Error ? e.message : String(e) return { title: "dbt Lineage: ERROR", - metadata: { model_name: args.model, confidence: "unknown" }, + metadata: { model_name: args.model, confidence: "unknown", error: msg }, output: `Failed: ${msg}`, } } diff --git a/packages/opencode/src/altimate/tools/finops-analyze-credits.ts b/packages/opencode/src/altimate/tools/finops-analyze-credits.ts index 0fd01417a..4b2c74e02 100644 --- a/packages/opencode/src/altimate/tools/finops-analyze-credits.ts +++ b/packages/opencode/src/altimate/tools/finops-analyze-credits.ts @@ -80,10 +80,11 @@ export const FinopsAnalyzeCreditsTool = Tool.define("finops_analyze_credits", { }) if (!result.success) { + const error = result.error ?? "Unknown error" return { title: "Credit Analysis: FAILED", - metadata: { success: false, total_credits: 0 }, - output: `Failed to analyze credits: ${result.error ?? "Unknown error"}`, + metadata: { success: false, total_credits: 0, error }, + output: `Failed to analyze credits: ${error}`, } } @@ -101,7 +102,7 @@ export const FinopsAnalyzeCreditsTool = Tool.define("finops_analyze_credits", { const msg = e instanceof Error ? e.message : String(e) return { title: "Credit Analysis: ERROR", - metadata: { success: false, total_credits: 0 }, + metadata: { success: false, total_credits: 0, error: msg }, output: `Failed to analyze credits: ${msg}`, } } diff --git a/packages/opencode/src/altimate/tools/finops-expensive-queries.ts b/packages/opencode/src/altimate/tools/finops-expensive-queries.ts index 16dbb17bb..63e86f024 100644 --- a/packages/opencode/src/altimate/tools/finops-expensive-queries.ts +++ b/packages/opencode/src/altimate/tools/finops-expensive-queries.ts @@ -47,10 +47,11 @@ export const FinopsExpensiveQueriesTool = Tool.define("finops_expensive_queries" }) if (!result.success) { + const error = result.error ?? "Unknown error" return { title: "Expensive Queries: FAILED", - metadata: { success: false, query_count: 0 }, - output: `Failed to find expensive queries: ${result.error ?? "Unknown error"}`, + metadata: { success: false, query_count: 0, error }, + output: `Failed to find expensive queries: ${error}`, } } @@ -63,7 +64,7 @@ export const FinopsExpensiveQueriesTool = Tool.define("finops_expensive_queries" const msg = e instanceof Error ? e.message : String(e) return { title: "Expensive Queries: ERROR", - metadata: { success: false, query_count: 0 }, + metadata: { success: false, query_count: 0, error: msg }, output: `Failed to find expensive queries: ${msg}`, } } diff --git a/packages/opencode/src/altimate/tools/finops-query-history.ts b/packages/opencode/src/altimate/tools/finops-query-history.ts index cf298ab31..e2e112545 100644 --- a/packages/opencode/src/altimate/tools/finops-query-history.ts +++ b/packages/opencode/src/altimate/tools/finops-query-history.ts @@ -13,8 +13,7 @@ function formatQueryHistory(summary: Record, queries: unknown[] lines.push(`Avg execution time: ${Number(summary.avg_execution_time).toFixed(2)}s`) if (summary.total_bytes_scanned !== undefined) lines.push(`Total bytes scanned: ${formatBytes(Number(summary.total_bytes_scanned))}`) - if (summary.period_days !== undefined) - lines.push(`Period: ${summary.period_days} days`) + if (summary.period_days !== undefined) lines.push(`Period: ${summary.period_days} days`) const arr = Array.isArray(queries) ? queries : [] if (arr.length === 0) { @@ -65,10 +64,11 @@ export const FinopsQueryHistoryTool = Tool.define("finops_query_history", { }) if (!result.success) { + const error = result.error ?? "Unknown error" return { title: "Query History: FAILED", - metadata: { success: false, query_count: 0 }, - output: `Failed to fetch query history: ${result.error ?? "Unknown error"}`, + metadata: { success: false, query_count: 0, error }, + output: `Failed to fetch query history: ${error}`, } } @@ -82,7 +82,7 @@ export const FinopsQueryHistoryTool = Tool.define("finops_query_history", { const msg = e instanceof Error ? e.message : String(e) return { title: "Query History: ERROR", - metadata: { success: false, query_count: 0 }, + metadata: { success: false, query_count: 0, error: msg }, output: `Failed to fetch query history: ${msg}`, } } diff --git a/packages/opencode/src/altimate/tools/finops-role-access.ts b/packages/opencode/src/altimate/tools/finops-role-access.ts index 76c964af0..d963ae41e 100644 --- a/packages/opencode/src/altimate/tools/finops-role-access.ts +++ b/packages/opencode/src/altimate/tools/finops-role-access.ts @@ -98,8 +98,7 @@ function formatUserRoles(assignments: unknown[]): string { } export const FinopsRoleGrantsTool = Tool.define("finops_role_grants", { - description: - "Query RBAC grants — see what permissions are granted to roles and on which objects. Snowflake only.", + description: "Query RBAC grants — see what permissions are granted to roles and on which objects. Snowflake only.", parameters: z.object({ warehouse: z.string().describe("Warehouse connection name"), role: z.string().optional().describe("Filter to grants for a specific role"), @@ -132,7 +131,7 @@ export const FinopsRoleGrantsTool = Tool.define("finops_role_grants", { const msg = e instanceof Error ? e.message : String(e) return { title: "Role Grants: ERROR", - metadata: { success: false, grant_count: 0 }, + metadata: { success: false, grant_count: 0, error: msg }, output: `Failed to query grants: ${msg}`, } } @@ -165,7 +164,7 @@ export const FinopsRoleHierarchyTool = Tool.define("finops_role_hierarchy", { const msg = e instanceof Error ? e.message : String(e) return { title: "Role Hierarchy: ERROR", - metadata: { success: false, role_count: 0 }, + metadata: { success: false, role_count: 0, error: msg }, output: `Failed to query role hierarchy: ${msg}`, } } @@ -204,7 +203,7 @@ export const FinopsUserRolesTool = Tool.define("finops_user_roles", { const msg = e instanceof Error ? e.message : String(e) return { title: "User Roles: ERROR", - metadata: { success: false, assignment_count: 0 }, + metadata: { success: false, assignment_count: 0, error: msg }, output: `Failed to query user roles: ${msg}`, } } diff --git a/packages/opencode/src/altimate/tools/finops-unused-resources.ts b/packages/opencode/src/altimate/tools/finops-unused-resources.ts index c0c91c39a..9911c10a4 100644 --- a/packages/opencode/src/altimate/tools/finops-unused-resources.ts +++ b/packages/opencode/src/altimate/tools/finops-unused-resources.ts @@ -75,10 +75,11 @@ export const FinopsUnusedResourcesTool = Tool.define("finops_unused_resources", }) if (!result.success) { + const error = result.error ?? "Unknown error" return { title: "Unused Resources: FAILED", - metadata: { success: false, unused_count: 0 }, - output: `Failed to find unused resources: ${result.error ?? "Unknown error"}`, + metadata: { success: false, unused_count: 0, error }, + output: `Failed to find unused resources: ${error}`, } } @@ -88,17 +89,13 @@ export const FinopsUnusedResourcesTool = Tool.define("finops_unused_resources", return { title: `Unused Resources: ${total} found`, metadata: { success: true, unused_count: total }, - output: formatUnusedResources( - summary, - result.unused_tables as unknown[], - result.idle_warehouses as unknown[], - ), + output: formatUnusedResources(summary, result.unused_tables as unknown[], result.idle_warehouses as unknown[]), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) return { title: "Unused Resources: ERROR", - metadata: { success: false, unused_count: 0 }, + metadata: { success: false, unused_count: 0, error: msg }, output: `Failed to find unused resources: ${msg}`, } } diff --git a/packages/opencode/src/altimate/tools/finops-warehouse-advice.ts b/packages/opencode/src/altimate/tools/finops-warehouse-advice.ts index 7b9415fe7..cd9d22617 100644 --- a/packages/opencode/src/altimate/tools/finops-warehouse-advice.ts +++ b/packages/opencode/src/altimate/tools/finops-warehouse-advice.ts @@ -83,10 +83,11 @@ export const FinopsWarehouseAdviceTool = Tool.define("finops_warehouse_advice", }) if (!result.success) { + const error = result.error ?? "Unknown error" return { title: "Warehouse Advice: FAILED", - metadata: { success: false, recommendation_count: 0 }, - output: `Failed to analyze warehouses: ${result.error ?? "Unknown error"}`, + metadata: { success: false, recommendation_count: 0, error }, + output: `Failed to analyze warehouses: ${error}`, } } @@ -103,7 +104,7 @@ export const FinopsWarehouseAdviceTool = Tool.define("finops_warehouse_advice", const msg = e instanceof Error ? e.message : String(e) return { title: "Warehouse Advice: ERROR", - metadata: { success: false, recommendation_count: 0 }, + metadata: { success: false, recommendation_count: 0, error: msg }, output: `Failed to analyze warehouses: ${msg}`, } } diff --git a/packages/opencode/src/altimate/tools/impact-analysis.ts b/packages/opencode/src/altimate/tools/impact-analysis.ts index 39cc6f96b..25c559bdd 100644 --- a/packages/opencode/src/altimate/tools/impact-analysis.ts +++ b/packages/opencode/src/altimate/tools/impact-analysis.ts @@ -5,6 +5,7 @@ import z from "zod" import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" +import type { Telemetry } from "../telemetry" export const ImpactAnalysisTool = Tool.define("impact_analysis", { description: [ @@ -19,28 +20,15 @@ export const ImpactAnalysisTool = Tool.define("impact_analysis", { '- impact_analysis({ manifest_path: "target/manifest.json", model: "dim_customers", change_type: "retype" })', ].join("\n"), parameters: z.object({ - model: z - .string() - .describe("dbt model name to analyze impact for (e.g., 'stg_orders', 'dim_customers')"), + model: z.string().describe("dbt model name to analyze impact for (e.g., 'stg_orders', 'dim_customers')"), column: z .string() .optional() .describe("Specific column to trace impact for. If omitted, analyzes model-level impact."), - change_type: z - .enum(["remove", "rename", "retype", "add", "modify"]) - .describe("Type of change being considered"), - manifest_path: z - .string() - .optional() - .default("target/manifest.json") - .describe("Path to dbt manifest.json file"), - dialect: z - .string() - .optional() - .default("snowflake") - .describe("SQL dialect for lineage analysis"), + change_type: z.enum(["remove", "rename", "retype", "add", "modify"]).describe("Type of change being considered"), + manifest_path: z.string().optional().default("target/manifest.json").describe("Path to dbt manifest.json file"), + dialect: z.string().optional().default("snowflake").describe("SQL dialect for lineage analysis"), }), - // @ts-expect-error tsgo TS2719 false positive — identical pattern works in other tools async execute(args, ctx) { try { // Step 1: Parse the dbt manifest to get the full DAG @@ -121,14 +109,20 @@ export const ImpactAnalysisTool = Tool.define("impact_analysis", { const totalAffected = downstream.length const severity = - totalAffected === 0 - ? "SAFE" - : totalAffected <= 3 - ? "LOW" - : totalAffected <= 10 - ? "MEDIUM" - : "HIGH" + totalAffected === 0 ? "SAFE" : totalAffected <= 3 ? "LOW" : totalAffected <= 10 ? "MEDIUM" : "HIGH" + // altimate_change start — sql quality findings for telemetry + const findings: Telemetry.Finding[] = [] + if (totalAffected > 0) { + findings.push({ category: `impact_${severity.toLowerCase()}` }) + for (const d of direct) { + findings.push({ category: "impact_direct_dependent" }) + } + for (const t of transitive) { + findings.push({ category: "impact_transitive_dependent" }) + } + } + // altimate_change end return { title: `Impact: ${severity} — ${totalAffected} downstream model${totalAffected !== 1 ? "s" : ""} affected`, metadata: { @@ -138,6 +132,8 @@ export const ImpactAnalysisTool = Tool.define("impact_analysis", { transitive_count: transitive.length, test_count: affectedTestCount, column_impact: columnImpact.length, + has_schema: false, + ...(findings.length > 0 && { findings }), }, output, } @@ -145,7 +141,7 @@ export const ImpactAnalysisTool = Tool.define("impact_analysis", { const msg = e instanceof Error ? e.message : String(e) return { title: "Impact: ERROR", - metadata: { success: false }, + metadata: { success: false, has_schema: false, error: msg }, output: `Failed to analyze impact: ${msg}\n\nEnsure the dbt manifest exists (run \`dbt compile\`) and the dispatcher is running.`, } } diff --git a/packages/opencode/src/altimate/tools/lineage-check.ts b/packages/opencode/src/altimate/tools/lineage-check.ts index d7965f832..dbe19fc0f 100644 --- a/packages/opencode/src/altimate/tools/lineage-check.ts +++ b/packages/opencode/src/altimate/tools/lineage-check.ts @@ -26,25 +26,26 @@ export const LineageCheckTool = Tool.define("lineage_check", { schema_context: args.schema_context, }) - const data = result.data as Record + const data = (result.data ?? {}) as Record if (result.error) { return { title: "Lineage: ERROR", - metadata: { success: false }, + metadata: { success: false, error: result.error }, output: `Error: ${result.error}`, } } + const error = data.error return { title: `Lineage: ${result.success ? "OK" : "PARTIAL"}`, - metadata: { success: result.success }, + metadata: { success: result.success, ...(error && { error }) }, output: formatLineage(data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) return { title: "Lineage: ERROR", - metadata: { success: false }, + metadata: { success: false, error: msg }, output: `Failed to check lineage: ${msg}\n\nEnsure the dispatcher is running and altimate-core is initialized.`, } } diff --git a/packages/opencode/src/altimate/tools/schema-cache-status.ts b/packages/opencode/src/altimate/tools/schema-cache-status.ts index 96cdd0788..8d803ab8b 100644 --- a/packages/opencode/src/altimate/tools/schema-cache-status.ts +++ b/packages/opencode/src/altimate/tools/schema-cache-status.ts @@ -4,7 +4,8 @@ import { Dispatcher } from "../native" import type { SchemaCacheStatusResult } from "../native/types" export const SchemaCacheStatusTool = Tool.define("schema_cache_status", { - description: "Show status of the local schema cache — which warehouses are indexed, how many tables/columns, when last refreshed.", + description: + "Show status of the local schema cache — which warehouses are indexed, how many tables/columns, when last refreshed.", parameters: z.object({}), async execute(args, ctx) { try { @@ -23,7 +24,7 @@ export const SchemaCacheStatusTool = Tool.define("schema_cache_status", { const msg = e instanceof Error ? e.message : String(e) return { title: "Schema Cache Status: ERROR", - metadata: { totalTables: 0, totalColumns: 0, warehouseCount: 0 }, + metadata: { success: false, totalTables: 0, totalColumns: 0, warehouseCount: 0, error: msg }, output: `Failed to get cache status: ${msg}\n\nEnsure the dispatcher is running.`, } } @@ -45,9 +46,7 @@ function formatStatus(result: SchemaCacheStatusResult): string { lines.push("----------|------|---------|--------|---------|-------------") for (const w of result.warehouses) { const indexed = w.last_indexed ? new Date(w.last_indexed).toLocaleString() : "never" - lines.push( - `${w.name} | ${w.type} | ${w.schemas_count} | ${w.tables_count} | ${w.columns_count} | ${indexed}`, - ) + lines.push(`${w.name} | ${w.type} | ${w.schemas_count} | ${w.tables_count} | ${w.columns_count} | ${indexed}`) } } diff --git a/packages/opencode/src/altimate/tools/schema-detect-pii.ts b/packages/opencode/src/altimate/tools/schema-detect-pii.ts index ca81071ba..f1134b859 100644 --- a/packages/opencode/src/altimate/tools/schema-detect-pii.ts +++ b/packages/opencode/src/altimate/tools/schema-detect-pii.ts @@ -36,7 +36,7 @@ export const SchemaDetectPiiTool = Tool.define("schema_detect_pii", { const msg = e instanceof Error ? e.message : String(e) return { title: "PII Scan: ERROR", - metadata: { finding_count: 0, columns_scanned: 0 }, + metadata: { finding_count: 0, columns_scanned: 0, error: msg }, output: `Failed to scan for PII: ${msg}`, } } @@ -45,7 +45,9 @@ export const SchemaDetectPiiTool = Tool.define("schema_detect_pii", { function formatPii(result: PiiDetectResult): string { const lines: string[] = [] - lines.push(`Scanned ${result.columns_scanned} columns, found ${result.finding_count} potential PII columns in ${result.tables_with_pii} tables.`) + lines.push( + `Scanned ${result.columns_scanned} columns, found ${result.finding_count} potential PII columns in ${result.tables_with_pii} tables.`, + ) lines.push("") lines.push("=== By Category ===") diff --git a/packages/opencode/src/altimate/tools/schema-diff.ts b/packages/opencode/src/altimate/tools/schema-diff.ts index 9c3cdb8b0..799fdb23c 100644 --- a/packages/opencode/src/altimate/tools/schema-diff.ts +++ b/packages/opencode/src/altimate/tools/schema-diff.ts @@ -2,6 +2,7 @@ import z from "zod" import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" import type { SchemaDiffResult, ColumnChange } from "../native/types" +import type { Telemetry } from "../telemetry" export const SchemaDiffTool = Tool.define("schema_diff", { description: @@ -31,6 +32,11 @@ export const SchemaDiffTool = Tool.define("schema_diff", { const changeCount = result.changes.length const breakingCount = result.changes.filter((c) => c.severity === "breaking").length + // altimate_change start — sql quality findings for telemetry + const findings: Telemetry.Finding[] = result.changes.map((c) => ({ + category: c.change_type ?? (c.severity === "breaking" ? "breaking_change" : "schema_change"), + })) + // altimate_change end return { title: `Schema Diff: ${result.success ? `${changeCount} change${changeCount !== 1 ? "s" : ""}${breakingCount > 0 ? ` (${breakingCount} BREAKING)` : ""}` : "PARSE ERROR"}`, metadata: { @@ -38,6 +44,9 @@ export const SchemaDiffTool = Tool.define("schema_diff", { changeCount, breakingCount, hasBreakingChanges: result.has_breaking_changes, + has_schema: false, + dialect: args.dialect, + ...(findings.length > 0 && { findings }), }, output: formatSchemaDiff(result), } @@ -45,7 +54,15 @@ export const SchemaDiffTool = Tool.define("schema_diff", { const msg = e instanceof Error ? e.message : String(e) return { title: "Schema Diff: ERROR", - metadata: { success: false, changeCount: 0, breakingCount: 0, hasBreakingChanges: false }, + metadata: { + success: false, + changeCount: 0, + breakingCount: 0, + hasBreakingChanges: false, + has_schema: false, + dialect: args.dialect, + error: msg, + }, output: `Failed to diff schema: ${msg}\n\nCheck your connection configuration and try again.`, } } @@ -77,16 +94,16 @@ function formatSchemaDiff(result: SchemaDiffResult): string { const lines: string[] = [] const summary = result.summary - lines.push( - `Schema comparison: ${summary.old_column_count ?? "?"} → ${summary.new_column_count ?? "?"} columns`, - ) + lines.push(`Schema comparison: ${summary.old_column_count ?? "?"} → ${summary.new_column_count ?? "?"} columns`) if (result.has_breaking_changes) { lines.push("⚠ BREAKING CHANGES DETECTED") } lines.push("") - lines.push(` Dropped: ${summary.dropped ?? 0} | Added: ${summary.added ?? 0} | Type Changed: ${summary.type_changed ?? 0} | Renamed: ${summary.renamed ?? 0}`) + lines.push( + ` Dropped: ${summary.dropped ?? 0} | Added: ${summary.added ?? 0} | Type Changed: ${summary.type_changed ?? 0} | Renamed: ${summary.renamed ?? 0}`, + ) lines.push("") // Group by severity diff --git a/packages/opencode/src/altimate/tools/schema-search.ts b/packages/opencode/src/altimate/tools/schema-search.ts index 0f331eb63..c4f4259ca 100644 --- a/packages/opencode/src/altimate/tools/schema-search.ts +++ b/packages/opencode/src/altimate/tools/schema-search.ts @@ -7,7 +7,9 @@ export const SchemaSearchTool = Tool.define("schema_search", { description: "Search indexed warehouse metadata for tables and columns. Supports natural language queries like 'customer tables', 'price columns', 'order date fields'. Requires schema_index to be run first.", parameters: z.object({ - query: z.string().describe("Search query — table names, column names, data types, or natural language descriptions"), + query: z + .string() + .describe("Search query — table names, column names, data types, or natural language descriptions"), warehouse: z.string().optional().describe("Limit search to a specific warehouse connection"), limit: z.number().optional().describe("Max results per category (default 20)"), }), @@ -40,7 +42,7 @@ export const SchemaSearchTool = Tool.define("schema_search", { const msg = e instanceof Error ? e.message : String(e) return { title: "Schema Search: ERROR", - metadata: { matchCount: 0, tableCount: 0, columnCount: 0 }, + metadata: { matchCount: 0, tableCount: 0, columnCount: 0, error: msg }, output: `Failed to search schema: ${msg}\n\nEnsure schema_index has been run and the dispatcher is running.`, } } diff --git a/packages/opencode/src/altimate/tools/sql-analyze.ts b/packages/opencode/src/altimate/tools/sql-analyze.ts index 870c8f992..00cf57a1d 100644 --- a/packages/opencode/src/altimate/tools/sql-analyze.ts +++ b/packages/opencode/src/altimate/tools/sql-analyze.ts @@ -1,11 +1,12 @@ import z from "zod" import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" +import type { Telemetry } from "../telemetry" import type { SqlAnalyzeResult } from "../native/types" export const SqlAnalyzeTool = Tool.define("sql_analyze", { description: - "Analyze SQL for anti-patterns, performance issues, and optimization opportunities. Performs static analysis without executing the query. Detects issues like SELECT *, cartesian products, missing LIMIT, function-in-filter, correlated subqueries, and more.", + "Analyze SQL for anti-patterns, performance issues, and optimization opportunities. Performs lint, semantic, and safety analysis without executing the query. Provide schema_context or schema_path for accurate semantic analysis — without schema, table/column references cannot be resolved.", parameters: z.object({ sql: z.string().describe("SQL query to analyze"), dialect: z @@ -13,21 +14,41 @@ export const SqlAnalyzeTool = Tool.define("sql_analyze", { .optional() .default("snowflake") .describe("SQL dialect (snowflake, postgres, bigquery, duckdb, etc.)"), + schema_path: z.string().optional().describe("Path to YAML/JSON schema file for table/column resolution"), + schema_context: z + .record(z.string(), z.any()) + .optional() + .describe('Inline schema definition, e.g. {"table_name": {"col": "TYPE"}}'), }), async execute(args, ctx) { + const hasSchema = !!(args.schema_path || (args.schema_context && Object.keys(args.schema_context).length > 0)) try { const result = await Dispatcher.call("sql.analyze", { sql: args.sql, dialect: args.dialect, + schema_path: args.schema_path, + schema_context: args.schema_context, }) + // The handler returns success=true when analysis completes (issues are + // reported via issues/issue_count). Only treat it as a failure when + // there's an actual error (e.g. parse failure). + const isRealFailure = !!result.error + // altimate_change start — sql quality findings for telemetry + const findings: Telemetry.Finding[] = result.issues.map((issue) => ({ + category: issue.rule ?? issue.type, + })) + // altimate_change end return { - title: `Analyze: ${result.error ? "PARSE ERROR" : `${result.issue_count} issue${result.issue_count !== 1 ? "s" : ""}`} [${result.confidence}]`, + title: `Analyze: ${result.error ? "ERROR" : `${result.issue_count} issue${result.issue_count !== 1 ? "s" : ""}`} [${result.confidence}]`, metadata: { - success: result.success, + success: !isRealFailure, issueCount: result.issue_count, confidence: result.confidence, + dialect: args.dialect, + has_schema: hasSchema, ...(result.error && { error: result.error }), + ...(findings.length > 0 && { findings }), }, output: formatAnalysis(result), } @@ -35,7 +56,14 @@ export const SqlAnalyzeTool = Tool.define("sql_analyze", { const msg = e instanceof Error ? e.message : String(e) return { title: "Analyze: ERROR", - metadata: { success: false, issueCount: 0, confidence: "unknown", error: msg }, + metadata: { + success: false, + issueCount: 0, + confidence: "unknown", + dialect: args.dialect, + has_schema: hasSchema, + error: msg, + }, output: `Failed to analyze SQL: ${msg}\n\nCheck your connection configuration and try again.`, } } @@ -51,7 +79,9 @@ function formatAnalysis(result: SqlAnalyzeResult): string { return "No anti-patterns or issues detected." } - const lines: string[] = [`Found ${result.issue_count} issue${result.issue_count !== 1 ? "s" : ""} (confidence: ${result.confidence}):`] + const lines: string[] = [ + `Found ${result.issue_count} issue${result.issue_count !== 1 ? "s" : ""} (confidence: ${result.confidence}):`, + ] if (result.confidence_factors.length > 0) { lines.push(` Note: ${result.confidence_factors.join("; ")}`) } diff --git a/packages/opencode/src/altimate/tools/sql-diff.ts b/packages/opencode/src/altimate/tools/sql-diff.ts index ca08bc867..777823be2 100644 --- a/packages/opencode/src/altimate/tools/sql-diff.ts +++ b/packages/opencode/src/altimate/tools/sql-diff.ts @@ -27,7 +27,9 @@ export const SqlDiffTool = Tool.define("sql_diff", { } const lines: string[] = [] - lines.push(`${result.change_count} change${result.change_count !== 1 ? "s" : ""} (+${result.additions} -${result.deletions}), ${(result.similarity * 100).toFixed(1)}% similar`) + lines.push( + `${result.change_count} change${result.change_count !== 1 ? "s" : ""} (+${result.additions} -${result.deletions}), ${(result.similarity * 100).toFixed(1)}% similar`, + ) lines.push("") lines.push(result.unified_diff) @@ -40,7 +42,7 @@ export const SqlDiffTool = Tool.define("sql_diff", { const msg = e instanceof Error ? e.message : String(e) return { title: "Diff: ERROR", - metadata: { has_changes: false, change_count: 0, similarity: 0 }, + metadata: { has_changes: false, change_count: 0, similarity: 0, error: msg }, output: `Failed to diff SQL: ${msg}`, } } diff --git a/packages/opencode/src/altimate/tools/sql-execute.ts b/packages/opencode/src/altimate/tools/sql-execute.ts index 4908e8d9b..7aa34b574 100644 --- a/packages/opencode/src/altimate/tools/sql-execute.ts +++ b/packages/opencode/src/altimate/tools/sql-execute.ts @@ -47,7 +47,7 @@ export const SqlExecuteTool = Tool.define("sql_execute", { const msg = e instanceof Error ? e.message : String(e) return { title: "SQL: ERROR", - metadata: { rowCount: 0, truncated: false }, + metadata: { rowCount: 0, truncated: false, error: msg }, output: `Failed to execute SQL: ${msg}\n\nEnsure the dispatcher is running and a warehouse connection is configured.`, } } diff --git a/packages/opencode/src/altimate/tools/sql-explain.ts b/packages/opencode/src/altimate/tools/sql-explain.ts index db984d5d1..0786be6a2 100644 --- a/packages/opencode/src/altimate/tools/sql-explain.ts +++ b/packages/opencode/src/altimate/tools/sql-explain.ts @@ -9,7 +9,11 @@ export const SqlExplainTool = Tool.define("sql_explain", { parameters: z.object({ sql: z.string().describe("SQL query to explain"), warehouse: z.string().optional().describe("Warehouse connection name"), - analyze: z.boolean().optional().default(false).describe("Run EXPLAIN ANALYZE (actually executes the query, slower but more accurate)"), + analyze: z + .boolean() + .optional() + .default(false) + .describe("Run EXPLAIN ANALYZE (actually executes the query, slower but more accurate)"), }), async execute(args, ctx) { try { @@ -20,10 +24,11 @@ export const SqlExplainTool = Tool.define("sql_explain", { }) if (!result.success) { + const error = result.error ?? "Unknown error" return { title: "Explain: FAILED", - metadata: { success: false, analyzed: false, warehouse_type: result.warehouse_type ?? "unknown" }, - output: `Failed to get execution plan: ${result.error ?? "Unknown error"}`, + metadata: { success: false, analyzed: false, warehouse_type: result.warehouse_type ?? "unknown", error }, + output: `Failed to get execution plan: ${error}`, } } @@ -36,7 +41,7 @@ export const SqlExplainTool = Tool.define("sql_explain", { const msg = e instanceof Error ? e.message : String(e) return { title: "Explain: ERROR", - metadata: { success: false, analyzed: false, warehouse_type: "unknown" }, + metadata: { success: false, analyzed: false, warehouse_type: "unknown", error: msg }, output: `Failed to run EXPLAIN: ${msg}\n\nEnsure a warehouse connection is configured and the dispatcher is running.`, } } diff --git a/packages/opencode/src/altimate/tools/sql-fix.ts b/packages/opencode/src/altimate/tools/sql-fix.ts index 7bcfcb067..368a93981 100644 --- a/packages/opencode/src/altimate/tools/sql-fix.ts +++ b/packages/opencode/src/altimate/tools/sql-fix.ts @@ -25,6 +25,7 @@ export const SqlFixTool = Tool.define("sql_fix", { success: result.success, suggestion_count: result.suggestion_count, has_fix: !!result.fixed_sql, + ...(result.error && { error: result.error }), }, output: formatFix(result), } @@ -32,7 +33,7 @@ export const SqlFixTool = Tool.define("sql_fix", { const msg = e instanceof Error ? e.message : String(e) return { title: "Fix: ERROR", - metadata: { success: false, suggestion_count: 0, has_fix: false }, + metadata: { success: false, suggestion_count: 0, has_fix: false, error: msg }, output: `Failed to analyze error: ${msg}\n\nCheck your connection configuration and try again.`, } } diff --git a/packages/opencode/src/altimate/tools/sql-format.ts b/packages/opencode/src/altimate/tools/sql-format.ts index 85b66cef1..08eebc7de 100644 --- a/packages/opencode/src/altimate/tools/sql-format.ts +++ b/packages/opencode/src/altimate/tools/sql-format.ts @@ -7,7 +7,11 @@ export const SqlFormatTool = Tool.define("sql_format", { "Format and beautify SQL code with consistent indentation, keyword casing, and line breaks. Supports all major SQL dialects.", parameters: z.object({ sql: z.string().describe("SQL to format"), - dialect: z.string().optional().default("snowflake").describe("SQL dialect (snowflake, postgres, bigquery, duckdb, etc.)"), + dialect: z + .string() + .optional() + .default("snowflake") + .describe("SQL dialect (snowflake, postgres, bigquery, duckdb, etc.)"), indent: z.number().optional().default(2).describe("Indentation width in spaces"), }), async execute(args, ctx) { @@ -21,7 +25,7 @@ export const SqlFormatTool = Tool.define("sql_format", { if (!result.success) { return { title: "Format: FAILED", - metadata: { success: false, statement_count: 0 }, + metadata: { success: false, statement_count: 0, error: result.error }, output: `Failed to format SQL: ${result.error ?? "Unknown error"}`, } } @@ -35,7 +39,7 @@ export const SqlFormatTool = Tool.define("sql_format", { const msg = e instanceof Error ? e.message : String(e) return { title: "Format: ERROR", - metadata: { success: false, statement_count: 0 }, + metadata: { success: false, statement_count: 0, error: msg }, output: `Failed to format SQL: ${msg}\n\nCheck your connection configuration and try again.`, } } diff --git a/packages/opencode/src/altimate/tools/sql-optimize.ts b/packages/opencode/src/altimate/tools/sql-optimize.ts index 5e147288c..8f0a609f3 100644 --- a/packages/opencode/src/altimate/tools/sql-optimize.ts +++ b/packages/opencode/src/altimate/tools/sql-optimize.ts @@ -2,6 +2,7 @@ import z from "zod" import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" import type { SqlOptimizeResult, SqlOptimizeSuggestion, SqlAntiPattern } from "../native/types" +import type { Telemetry } from "../telemetry" export const SqlOptimizeTool = Tool.define("sql_optimize", { description: @@ -16,9 +17,7 @@ export const SqlOptimizeTool = Tool.define("sql_optimize", { schema_context: z .record(z.string(), z.any()) .optional() - .describe( - 'Optional schema mapping for full optimization. Format: {"table_name": {"col_name": "TYPE", ...}}', - ), + .describe('Optional schema mapping for full optimization. Format: {"table_name": {"col_name": "TYPE", ...}}'), }), async execute(args, ctx) { try { @@ -31,6 +30,13 @@ export const SqlOptimizeTool = Tool.define("sql_optimize", { const suggestionCount = result.suggestions.length const antiPatternCount = result.anti_patterns.length + // altimate_change start — sql quality findings for telemetry + const hasSchema = !!(args.schema_context && Object.keys(args.schema_context).length > 0) + const findings: Telemetry.Finding[] = [ + ...result.anti_patterns.map((ap) => ({ category: ap.type ?? "anti_pattern" })), + ...result.suggestions.map((s) => ({ category: s.type ?? "optimization_suggestion" })), + ] + // altimate_change end return { title: `Optimize: ${result.success ? `${suggestionCount} suggestion${suggestionCount !== 1 ? "s" : ""}, ${antiPatternCount} anti-pattern${antiPatternCount !== 1 ? "s" : ""}` : "PARSE ERROR"} [${result.confidence}]`, metadata: { @@ -39,6 +45,10 @@ export const SqlOptimizeTool = Tool.define("sql_optimize", { antiPatternCount, hasOptimizedSql: !!result.optimized_sql, confidence: result.confidence, + has_schema: hasSchema, + dialect: args.dialect, + ...(result.error && { error: result.error }), + ...(findings.length > 0 && { findings }), }, output: formatOptimization(result), } @@ -46,7 +56,16 @@ export const SqlOptimizeTool = Tool.define("sql_optimize", { const msg = e instanceof Error ? e.message : String(e) return { title: "Optimize: ERROR", - metadata: { success: false, suggestionCount: 0, antiPatternCount: 0, hasOptimizedSql: false, confidence: "unknown" }, + metadata: { + success: false, + suggestionCount: 0, + antiPatternCount: 0, + hasOptimizedSql: false, + confidence: "unknown", + has_schema: false, + dialect: args.dialect, + error: msg, + }, output: `Failed to optimize SQL: ${msg}\n\nCheck your connection configuration and try again.`, } } diff --git a/packages/opencode/src/altimate/tools/sql-rewrite.ts b/packages/opencode/src/altimate/tools/sql-rewrite.ts index 527f1ab44..2b2a3bf5c 100644 --- a/packages/opencode/src/altimate/tools/sql-rewrite.ts +++ b/packages/opencode/src/altimate/tools/sql-rewrite.ts @@ -16,9 +16,7 @@ export const SqlRewriteTool = Tool.define("sql_rewrite", { schema_context: z .record(z.string(), z.any()) .optional() - .describe( - 'Optional schema mapping for SELECT * expansion. Format: {"table_name": {"col_name": "TYPE", ...}}', - ), + .describe('Optional schema mapping for SELECT * expansion. Format: {"table_name": {"col_name": "TYPE", ...}}'), }), async execute(args, ctx) { try { @@ -45,7 +43,7 @@ export const SqlRewriteTool = Tool.define("sql_rewrite", { const msg = e instanceof Error ? e.message : String(e) return { title: "Rewrite: ERROR", - metadata: { success: false, rewriteCount: 0, autoApplyCount: 0, hasRewrittenSql: false }, + metadata: { success: false, rewriteCount: 0, autoApplyCount: 0, hasRewrittenSql: false, error: msg }, output: `Failed to rewrite SQL: ${msg}\n\nCheck your connection configuration and try again.`, } } diff --git a/packages/opencode/src/altimate/tools/sql-translate.ts b/packages/opencode/src/altimate/tools/sql-translate.ts index 83242b166..ab589d4cc 100644 --- a/packages/opencode/src/altimate/tools/sql-translate.ts +++ b/packages/opencode/src/altimate/tools/sql-translate.ts @@ -30,6 +30,7 @@ export const SqlTranslateTool = Tool.define("sql_translate", { source_dialect: result.source_dialect, target_dialect: result.target_dialect, warningCount: result.warnings.length, + ...(result.error && { error: result.error }), }, output: formatTranslation(result, args.sql), } @@ -37,7 +38,13 @@ export const SqlTranslateTool = Tool.define("sql_translate", { const msg = e instanceof Error ? e.message : String(e) return { title: `Translate: ERROR`, - metadata: { success: false, source_dialect: args.source_dialect, target_dialect: args.target_dialect, warningCount: 0 }, + metadata: { + success: false, + source_dialect: args.source_dialect, + target_dialect: args.target_dialect, + warningCount: 0, + error: msg, + }, output: `Failed to translate SQL: ${msg}\n\nCheck your connection configuration and try again.`, } } diff --git a/packages/opencode/src/altimate/tools/training-import.ts b/packages/opencode/src/altimate/tools/training-import.ts index 90c2f1e3e..085c00cb8 100644 --- a/packages/opencode/src/altimate/tools/training-import.ts +++ b/packages/opencode/src/altimate/tools/training-import.ts @@ -40,7 +40,6 @@ export const TrainingImportTool = Tool.define("training_import", { .default(20) .describe("Maximum number of entries to import from the document"), }), - // @ts-expect-error tsgo TS2719 false positive — identical pattern works in other tools async execute(args, ctx) { try { // Read the markdown file diff --git a/packages/opencode/src/altimate/tools/warehouse-remove.ts b/packages/opencode/src/altimate/tools/warehouse-remove.ts index 6e136a104..2d6469d35 100644 --- a/packages/opencode/src/altimate/tools/warehouse-remove.ts +++ b/packages/opencode/src/altimate/tools/warehouse-remove.ts @@ -28,7 +28,7 @@ export const WarehouseRemoveTool = Tool.define("warehouse_remove", { const msg = e instanceof Error ? e.message : String(e) return { title: `Remove '${args.name}': ERROR`, - metadata: { success: false }, + metadata: { success: false, error: msg }, output: `Failed to remove warehouse: ${msg}`, } } diff --git a/packages/opencode/src/altimate/tools/warehouse-test.ts b/packages/opencode/src/altimate/tools/warehouse-test.ts index acb9801a4..e05e45b3d 100644 --- a/packages/opencode/src/altimate/tools/warehouse-test.ts +++ b/packages/opencode/src/altimate/tools/warehouse-test.ts @@ -3,7 +3,8 @@ import { Tool } from "../../tool/tool" import { Dispatcher } from "../native" export const WarehouseTestTool = Tool.define("warehouse_test", { - description: "Test connectivity to a named warehouse connection. Verifies the connection is reachable and credentials are valid.", + description: + "Test connectivity to a named warehouse connection. Verifies the connection is reachable and credentials are valid.", parameters: z.object({ name: z.string().describe("Name of the warehouse connection to test"), }), @@ -28,7 +29,7 @@ export const WarehouseTestTool = Tool.define("warehouse_test", { const msg = e instanceof Error ? e.message : String(e) return { title: `Connection '${args.name}': ERROR`, - metadata: { connected: false }, + metadata: { connected: false, error: msg }, output: `Failed to test connection: ${msg}\n\nCheck your connection configuration and try again.`, } } diff --git a/packages/opencode/src/tool/tool.ts b/packages/opencode/src/tool/tool.ts index bbc4f8e49..d0a8ec0e5 100644 --- a/packages/opencode/src/tool/tool.ts +++ b/packages/opencode/src/tool/tool.ts @@ -11,6 +11,10 @@ import { Telemetry } from "../altimate/telemetry" export namespace Tool { interface Metadata { [key: string]: any + // altimate_change start — standard error field for telemetry extraction + /** Standard error field — set by tools on failure so telemetry can extract it. */ + error?: string + // altimate_change end } export interface InitContext { @@ -49,10 +53,12 @@ export namespace Tool { export type InferParameters = T extends Info ? z.infer

: never export type InferMetadata = T extends Info ? M : never - export function define( + // altimate_change start — simplify define() signature, Result generic was unused + export function define( id: string, - init: Info["init"] | Awaited["init"]>>, - ): Info { + init: Info["init"] | Awaited["init"]>>, + ): Info { + // altimate_change end return { id, init: async (initCtx) => { @@ -138,6 +144,7 @@ export namespace Tool { }) } if (isSoftFailure) { + // prettier-ignore const errorMsg = typeof result.metadata?.error === "string" ? result.metadata.error @@ -156,6 +163,25 @@ export namespace Tool { duration_ms: durationMs, }) } + // altimate_change start — emit sql_quality when tools report findings + // Only emit for successful tool runs — soft failures already emit core_failure + const findings = result.metadata?.findings as Telemetry.Finding[] | undefined + if (!isSoftFailure && Array.isArray(findings) && findings.length > 0) { + const by_category = Telemetry.aggregateFindings(findings) + Telemetry.track({ + type: "sql_quality", + timestamp: Date.now(), + session_id: ctx.sessionID, + tool_name: id, + tool_category: toolCategory, + finding_count: findings.length, + by_category: JSON.stringify(by_category), + has_schema: result.metadata?.has_schema ?? false, + ...(result.metadata?.dialect && { dialect: result.metadata.dialect as string }), + duration_ms: durationMs, + }) + } + // altimate_change end } catch { // Telemetry must never break tool execution } diff --git a/packages/opencode/test/altimate/e2e-tool-errors.ts b/packages/opencode/test/altimate/e2e-tool-errors.ts new file mode 100644 index 000000000..57326af99 --- /dev/null +++ b/packages/opencode/test/altimate/e2e-tool-errors.ts @@ -0,0 +1,544 @@ +#!/usr/bin/env bun +/** + * E2E test: calls actual tool execute() functions through real dispatcher + * with real altimate-core napi bindings. No mocks. + * + * Run: cd packages/opencode && bun run test/altimate/e2e-tool-errors.ts + */ + +import { Dispatcher } from "../../src/altimate/native" +import * as fs from "fs" +import * as path from "path" +import * as os from "os" + +// Disable telemetry +process.env.ALTIMATE_TELEMETRY_DISABLED = "true" + +// Stub context for tool.execute() +function stubCtx(): any { + return { + sessionID: "e2e-test", + messageID: "e2e-test", + agent: "test", + abort: new AbortController().signal, + messages: [], + metadata: () => {}, + } +} + +// Telemetry extraction logic from tool.ts +function telemetryError(metadata: Record): string { + return typeof metadata?.error === "string" ? metadata.error : "unknown error" +} + +let passed = 0 +let failed = 0 + +async function check( + name: string, + fn: () => Promise<{ metadata: Record; output: string; title: string }>, + expect: { + metadataErrorContains?: string + metadataErrorUndefined?: boolean + metadataSuccess?: boolean + noUnknownError?: boolean + outputContains?: string + }, +) { + try { + const result = await fn() + const errors: string[] = [] + + if (expect.metadataErrorContains) { + if (!result.metadata.error?.includes(expect.metadataErrorContains)) { + errors.push( + `metadata.error should contain "${expect.metadataErrorContains}" but got: ${JSON.stringify(result.metadata.error)}`, + ) + } + } + if (expect.metadataErrorUndefined) { + if (result.metadata.error !== undefined) { + errors.push(`metadata.error should be undefined but got: ${JSON.stringify(result.metadata.error)}`) + } + } + if (expect.metadataSuccess !== undefined) { + if (result.metadata.success !== expect.metadataSuccess) { + errors.push(`metadata.success should be ${expect.metadataSuccess} but got: ${result.metadata.success}`) + } + } + if (expect.noUnknownError) { + const extracted = telemetryError(result.metadata) + if (result.metadata.success === false && extracted === "unknown error") { + errors.push(`telemetry would log "unknown error" — metadata.error is missing on failure path`) + } + } + if (expect.outputContains) { + if (!result.output?.includes(expect.outputContains)) { + errors.push(`output should contain "${expect.outputContains}" but got: ${result.output?.slice(0, 200)}`) + } + } + + if (errors.length > 0) { + console.log(` FAIL ${name}`) + for (const e of errors) console.log(` ${e}`) + console.log(` metadata: ${JSON.stringify(result.metadata)}`) + failed++ + } else { + console.log(` PASS ${name}`) + passed++ + } + } catch (e) { + console.log(` FAIL ${name}`) + console.log(` THREW: ${e instanceof Error ? e.message : String(e)}`) + failed++ + } +} + +// Create a temp schema file for schema_path testing +const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "e2e-schema-")) +const schemaJsonPath = path.join(tmpDir, "schema.json") +fs.writeFileSync( + schemaJsonPath, + JSON.stringify({ + tables: { + users: { + columns: [ + { name: "id", type: "INTEGER" }, + { name: "name", type: "VARCHAR" }, + { name: "email", type: "VARCHAR" }, + ], + }, + orders: { + columns: [ + { name: "id", type: "INTEGER" }, + { name: "user_id", type: "INTEGER" }, + { name: "total", type: "DECIMAL" }, + { name: "created_at", type: "TIMESTAMP" }, + ], + }, + }, + }), +) + +const schemaYamlPath = path.join(tmpDir, "schema.yaml") +fs.writeFileSync( + schemaYamlPath, + `tables: + users: + columns: + - name: id + type: INTEGER + - name: name + type: VARCHAR + - name: email + type: VARCHAR + orders: + columns: + - name: id + type: INTEGER + - name: user_id + type: INTEGER + - name: total + type: DECIMAL +`, +) + +const testSql = "SELECT u.id, u.name, o.total FROM users u JOIN orders o ON u.id = o.user_id WHERE o.total > 100" +const badSql = "SELCT * FORM users" + +const flatSchema = { + users: { id: "INTEGER", name: "VARCHAR", email: "VARCHAR" }, + orders: { id: "INTEGER", user_id: "INTEGER", total: "DECIMAL", created_at: "TIMESTAMP" }, +} + +async function main() { + // Force lazy registration + await Dispatcher.call("altimate_core.validate" as any, { + sql: "SELECT 1", + schema_path: "", + schema_context: undefined, + }) + + console.log("\n" + "=".repeat(70)) + console.log("E2E TOOL ERROR PROPAGATION TESTS") + console.log("=".repeat(70)) + + // ========================================================================= + // 1. altimate_core_validate + // ========================================================================= + console.log("\n--- altimate_core_validate ---") + + const { AltimateCoreValidateTool } = await import("../../src/altimate/tools/altimate-core-validate") + const validateTool = await AltimateCoreValidateTool.init() + + await check( + "validate: no schema → early return with 'No schema provided'", + async () => { + return validateTool.execute({ sql: testSql }, stubCtx()) + }, + { metadataSuccess: false, metadataErrorContains: "No schema provided", noUnknownError: true }, + ) + + await check( + "validate: with schema_context (flat) → success", + async () => { + return validateTool.execute({ sql: testSql, schema_context: flatSchema }, stubCtx()) + }, + { metadataSuccess: true }, + ) + + await check( + "validate: with schema_path (JSON file) → success", + async () => { + return validateTool.execute({ sql: testSql, schema_path: schemaJsonPath }, stubCtx()) + }, + { metadataSuccess: true }, + ) + + await check( + "validate: with schema_path (YAML file) → success", + async () => { + return validateTool.execute({ sql: testSql, schema_path: schemaYamlPath }, stubCtx()) + }, + { metadataSuccess: true }, + ) + + await check( + "validate: with schema_path (nonexistent file) → error", + async () => { + return validateTool.execute({ sql: testSql, schema_path: "/tmp/nonexistent-schema-abc123.json" }, stubCtx()) + }, + { metadataSuccess: false, noUnknownError: true }, + ) + + await check( + "validate: syntax error SQL with schema → error propagated", + async () => { + return validateTool.execute({ sql: badSql, schema_context: flatSchema }, stubCtx()) + }, + { metadataSuccess: true, metadataErrorContains: "Syntax error", noUnknownError: true }, + ) + + // ========================================================================= + // 2. altimate_core_semantics + // ========================================================================= + console.log("\n--- altimate_core_semantics ---") + + const { AltimateCoreSemanticsTool } = await import("../../src/altimate/tools/altimate-core-semantics") + const semanticsTool = await AltimateCoreSemanticsTool.init() + + await check( + "semantics: no schema → early return with 'No schema provided'", + async () => { + return semanticsTool.execute({ sql: testSql }, stubCtx()) + }, + { metadataSuccess: false, metadataErrorContains: "No schema provided", noUnknownError: true }, + ) + + await check( + "semantics: with schema_context → runs (may find issues)", + async () => { + return semanticsTool.execute({ sql: testSql, schema_context: flatSchema }, stubCtx()) + }, + { noUnknownError: true }, + ) + + await check( + "semantics: with schema_path → runs", + async () => { + return semanticsTool.execute({ sql: testSql, schema_path: schemaJsonPath }, stubCtx()) + }, + { noUnknownError: true }, + ) + + // ========================================================================= + // 3. altimate_core_equivalence + // ========================================================================= + console.log("\n--- altimate_core_equivalence ---") + + const { AltimateCoreEquivalenceTool } = await import("../../src/altimate/tools/altimate-core-equivalence") + const equivTool = await AltimateCoreEquivalenceTool.init() + + const sql2 = "SELECT u.id, u.name, o.total FROM users u INNER JOIN orders o ON u.id = o.user_id WHERE o.total > 100" + + await check( + "equivalence: no schema → early return with 'No schema provided'", + async () => { + return equivTool.execute({ sql1: testSql, sql2 }, stubCtx()) + }, + { metadataSuccess: false, metadataErrorContains: "No schema provided", noUnknownError: true }, + ) + + await check( + "equivalence: with schema_context → runs", + async () => { + return equivTool.execute({ sql1: testSql, sql2, schema_context: flatSchema }, stubCtx()) + }, + { noUnknownError: true }, + ) + + await check( + "equivalence: with schema_path → runs", + async () => { + return equivTool.execute({ sql1: testSql, sql2, schema_path: schemaJsonPath }, stubCtx()) + }, + { noUnknownError: true }, + ) + + // ========================================================================= + // 4. altimate_core_fix + // ========================================================================= + console.log("\n--- altimate_core_fix ---") + + const { AltimateCoreFixTool } = await import("../../src/altimate/tools/altimate-core-fix") + const fixTool = await AltimateCoreFixTool.init() + + await check( + "fix: unfixable syntax error → error propagated", + async () => { + return fixTool.execute({ sql: badSql }, stubCtx()) + }, + { metadataSuccess: true, noUnknownError: true }, + ) + + await check( + "fix: valid SQL → success (already valid)", + async () => { + return fixTool.execute({ sql: "SELECT 1", schema_context: flatSchema }, stubCtx()) + }, + { metadataSuccess: true }, + ) + + // ========================================================================= + // 5. altimate_core_correct + // ========================================================================= + console.log("\n--- altimate_core_correct ---") + + const { AltimateCoreCorrectTool } = await import("../../src/altimate/tools/altimate-core-correct") + const correctTool = await AltimateCoreCorrectTool.init() + + await check( + "correct: unfixable syntax error → error propagated", + async () => { + return correctTool.execute({ sql: badSql }, stubCtx()) + }, + { metadataSuccess: true, noUnknownError: true }, + ) + + // ========================================================================= + // 6. sql_analyze + // ========================================================================= + console.log("\n--- sql_analyze ---") + + const { SqlAnalyzeTool } = await import("../../src/altimate/tools/sql-analyze") + const analyzeTool = await SqlAnalyzeTool.init() + + await check( + "analyze: no schema → lint issues found (partial success)", + async () => { + return analyzeTool.execute({ sql: testSql, dialect: "snowflake" }, stubCtx()) + }, + { noUnknownError: true }, + ) + + await check( + "analyze: with schema_context → richer analysis", + async () => { + const result = await analyzeTool.execute( + { sql: testSql, dialect: "snowflake", schema_context: flatSchema }, + stubCtx(), + ) + // With schema, should get more issues (semantic + lint) + const issueCount = result.metadata.issueCount ?? 0 + if (issueCount <= 1) { + console.log(` NOTE: only ${issueCount} issues with schema (expected > 1 for semantic analysis)`) + } + return result + }, + { noUnknownError: true }, + ) + + await check( + "analyze: with schema_path → richer analysis", + async () => { + return analyzeTool.execute({ sql: testSql, dialect: "snowflake", schema_path: schemaJsonPath }, stubCtx()) + }, + { noUnknownError: true }, + ) + + // ========================================================================= + // 7. sql_explain + // ========================================================================= + console.log("\n--- sql_explain ---") + + const { SqlExplainTool } = await import("../../src/altimate/tools/sql-explain") + const explainTool = await SqlExplainTool.init() + + await check( + "explain: no warehouse → error propagated (not 'unknown error')", + async () => { + return explainTool.execute({ sql: testSql, analyze: false }, stubCtx()) + }, + { metadataSuccess: false, noUnknownError: true }, + ) + + // ========================================================================= + // 8. finops_query_history + // ========================================================================= + console.log("\n--- finops_query_history ---") + + const { FinopsQueryHistoryTool } = await import("../../src/altimate/tools/finops-query-history") + const queryHistTool = await FinopsQueryHistoryTool.init() + + await check( + "query_history: no warehouse → error propagated", + async () => { + return queryHistTool.execute({ warehouse: "nonexistent", days: 7, limit: 10 }, stubCtx()) + }, + { metadataSuccess: false, noUnknownError: true }, + ) + + // ========================================================================= + // 9. finops_expensive_queries + // ========================================================================= + console.log("\n--- finops_expensive_queries ---") + + const { FinopsExpensiveQueriesTool } = await import("../../src/altimate/tools/finops-expensive-queries") + const expensiveTool = await FinopsExpensiveQueriesTool.init() + + await check( + "expensive_queries: no warehouse → error propagated", + async () => { + return expensiveTool.execute({ warehouse: "nonexistent", days: 7, limit: 20 }, stubCtx()) + }, + { metadataSuccess: false, noUnknownError: true }, + ) + + // ========================================================================= + // 10. finops_analyze_credits + // ========================================================================= + console.log("\n--- finops_analyze_credits ---") + + const { FinopsAnalyzeCreditsTool } = await import("../../src/altimate/tools/finops-analyze-credits") + const creditsTool = await FinopsAnalyzeCreditsTool.init() + + await check( + "analyze_credits: no warehouse → error propagated", + async () => { + return creditsTool.execute({ warehouse: "nonexistent", days: 30, limit: 50 }, stubCtx()) + }, + { metadataSuccess: false, noUnknownError: true }, + ) + + // ========================================================================= + // 11. finops_unused_resources + // ========================================================================= + console.log("\n--- finops_unused_resources ---") + + const { FinopsUnusedResourcesTool } = await import("../../src/altimate/tools/finops-unused-resources") + const unusedTool = await FinopsUnusedResourcesTool.init() + + await check( + "unused_resources: no warehouse → error propagated", + async () => { + return unusedTool.execute({ warehouse: "nonexistent", days: 30, limit: 50 }, stubCtx()) + }, + { metadataSuccess: false, noUnknownError: true }, + ) + + // ========================================================================= + // 12. finops_warehouse_advice + // ========================================================================= + console.log("\n--- finops_warehouse_advice ---") + + const { FinopsWarehouseAdviceTool } = await import("../../src/altimate/tools/finops-warehouse-advice") + const adviceTool = await FinopsWarehouseAdviceTool.init() + + await check( + "warehouse_advice: no warehouse → error propagated", + async () => { + return adviceTool.execute({ warehouse: "nonexistent", days: 14 }, stubCtx()) + }, + { metadataSuccess: false, noUnknownError: true }, + ) + + // ========================================================================= + // Schema resolution edge cases + // ========================================================================= + console.log("\n--- schema resolution edge cases ---") + + await check( + "schema_path: empty string → treated as no schema", + async () => { + return validateTool.execute({ sql: testSql, schema_path: "" }, stubCtx()) + }, + { metadataSuccess: false, metadataErrorContains: "No schema provided", noUnknownError: true }, + ) + + await check( + "schema_context: empty object → treated as no schema", + async () => { + return validateTool.execute({ sql: testSql, schema_context: {} }, stubCtx()) + }, + { metadataSuccess: false, metadataErrorContains: "No schema provided", noUnknownError: true }, + ) + + await check( + "schema_context: array format → works", + async () => { + return validateTool.execute( + { + sql: "SELECT id FROM users", + schema_context: { + users: [ + { name: "id", type: "INTEGER" }, + { name: "name", type: "VARCHAR" }, + ], + }, + }, + stubCtx(), + ) + }, + { metadataSuccess: true }, + ) + + await check( + "schema_context: SchemaDefinition format → works", + async () => { + return validateTool.execute( + { + sql: "SELECT id FROM users", + schema_context: { + tables: { + users: { + columns: [ + { name: "id", type: "INTEGER" }, + { name: "name", type: "VARCHAR" }, + ], + }, + }, + }, + }, + stubCtx(), + ) + }, + { metadataSuccess: true }, + ) + + // ========================================================================= + // Summary + // ========================================================================= + console.log("\n" + "=".repeat(70)) + console.log(`RESULTS: ${passed} passed, ${failed} failed, ${passed + failed} total`) + console.log("=".repeat(70)) + + // Cleanup + fs.rmSync(tmpDir, { recursive: true }) + + process.exit(failed > 0 ? 1 : 0) +} + +main().catch((e) => { + console.error("FATAL:", e) + process.exit(1) +}) diff --git a/packages/opencode/test/altimate/sql-quality-telemetry.test.ts b/packages/opencode/test/altimate/sql-quality-telemetry.test.ts new file mode 100644 index 000000000..f09a1c4a2 --- /dev/null +++ b/packages/opencode/test/altimate/sql-quality-telemetry.test.ts @@ -0,0 +1,333 @@ +/** + * SQL Quality Telemetry Tests + * + * Verifies the aggregation logic, event payload shape, and finding + * extraction patterns used for the `sql_quality` telemetry event, and + * that scenarios with no findings result in empty finding arrays (the + * condition used by tool.ts to decide not to emit the event). + */ + +import { describe, expect, test } from "bun:test" +import { Telemetry } from "../../src/altimate/telemetry" + +// --------------------------------------------------------------------------- +// 1. aggregateFindings +// --------------------------------------------------------------------------- +describe("Telemetry.aggregateFindings", () => { + test("aggregates findings by category", () => { + const findings: Telemetry.Finding[] = [ + { category: "missing_table" }, + { category: "missing_column" }, + { category: "lint" }, + { category: "missing_table" }, + ] + const result = Telemetry.aggregateFindings(findings) + expect(result).toEqual({ + missing_table: 2, + missing_column: 1, + lint: 1, + }) + }) + + test("returns empty object for empty findings", () => { + const result = Telemetry.aggregateFindings([]) + expect(result).toEqual({}) + }) + + test("handles single finding", () => { + const findings: Telemetry.Finding[] = [{ category: "syntax_error" }] + const result = Telemetry.aggregateFindings(findings) + expect(result).toEqual({ syntax_error: 1 }) + }) + + test("handles all same category", () => { + const findings: Telemetry.Finding[] = [{ category: "lint" }, { category: "lint" }, { category: "lint" }] + const result = Telemetry.aggregateFindings(findings) + expect(result).toEqual({ lint: 3 }) + }) +}) + +// --------------------------------------------------------------------------- +// 2. sql_quality event shape validation +// --------------------------------------------------------------------------- +describe("sql_quality event shape", () => { + test("by_category serializes to valid JSON string", () => { + const findings: Telemetry.Finding[] = [{ category: "lint" }, { category: "lint" }, { category: "safety" }] + const by_category = Telemetry.aggregateFindings(findings) + const json = JSON.stringify(by_category) + + // Should round-trip through JSON + expect(JSON.parse(json)).toEqual({ lint: 2, safety: 1 }) + }) + + test("aggregated counts match finding_count", () => { + const findings: Telemetry.Finding[] = [{ category: "a" }, { category: "b" }, { category: "c" }, { category: "a" }] + const by_category = Telemetry.aggregateFindings(findings) + const total = Object.values(by_category).reduce((a, b) => a + b, 0) + expect(total).toBe(findings.length) + }) +}) + +// --------------------------------------------------------------------------- +// 3. Finding extraction patterns (validates what tools produce) +// --------------------------------------------------------------------------- +describe("tool finding extraction patterns", () => { + test("sql_analyze issues use rule for lint, fall back to type otherwise", () => { + // Lint issues have rule (e.g. "select_star"), semantic/safety don't + const issues = [ + { + type: "lint", + rule: "select_star", + severity: "warning", + message: "...", + recommendation: "...", + confidence: "high", + }, + { + type: "lint", + rule: "filter_has_func", + severity: "warning", + message: "...", + recommendation: "...", + confidence: "high", + }, + { type: "semantic", severity: "warning", message: "...", recommendation: "...", confidence: "medium" }, + { type: "safety", severity: "high", message: "...", recommendation: "...", confidence: "high" }, + ] + const findings: Telemetry.Finding[] = issues.map((i: any) => ({ + category: i.rule ?? i.type, + })) + expect(findings).toEqual([ + { category: "select_star" }, + { category: "filter_has_func" }, + { category: "semantic" }, + { category: "safety" }, + ]) + }) + + test("validate errors map to findings with classification", () => { + const errors = [ + { message: "Table 'users' not found in schema" }, + { message: "Column 'email' not found in table 'orders'" }, + { message: "Syntax error near 'SELCT'" }, + ] + // Simulates classifyValidationError logic (column check before table check) + function classify(msg: string): string { + const lower = msg.toLowerCase() + if (lower.includes("column") && lower.includes("not found")) return "missing_column" + if (lower.includes("table") && lower.includes("not found")) return "missing_table" + if (lower.includes("syntax")) return "syntax_error" + return "validation_error" + } + const findings: Telemetry.Finding[] = errors.map((e) => ({ + category: classify(e.message), + })) + const by_category = Telemetry.aggregateFindings(findings) + expect(by_category).toEqual({ + missing_table: 1, + missing_column: 1, + syntax_error: 1, + }) + }) + + test("semantics issues all map to semantic_issue category", () => { + // Semantic findings don't have rule/type — always "semantic_issue" + const issues = [ + { severity: "error", message: "..." }, + { severity: "warning", message: "..." }, + { severity: "warning", message: "..." }, + ] + const findings: Telemetry.Finding[] = issues.map(() => ({ + category: "semantic_issue", + })) + expect(findings).toEqual([ + { category: "semantic_issue" }, + { category: "semantic_issue" }, + { category: "semantic_issue" }, + ]) + const by_category = Telemetry.aggregateFindings(findings) + expect(by_category).toEqual({ semantic_issue: 3 }) + }) + + test("fix tool produces fix_applied and unfixable_error categories", () => { + const data = { + fixes_applied: [{ description: "Fixed typo" }, { description: "Fixed reference" }], + unfixable_errors: [{ error: { message: "Cannot resolve" } }], + } + const findings: Telemetry.Finding[] = [] + for (const _ of data.fixes_applied) { + findings.push({ category: "fix_applied" }) + } + for (const _ of data.unfixable_errors) { + findings.push({ category: "unfixable_error" }) + } + const by_category = Telemetry.aggregateFindings(findings) + expect(by_category).toEqual({ fix_applied: 2, unfixable_error: 1 }) + }) + + test("equivalence differences produce findings only when not equivalent", () => { + // Equivalent — no findings + const equivData = { equivalent: true, differences: [] } + const equivFindings: Telemetry.Finding[] = [] + if (!equivData.equivalent && equivData.differences?.length) { + for (const _ of equivData.differences) { + equivFindings.push({ category: "equivalence_difference" }) + } + } + expect(equivFindings).toEqual([]) + + // Different — findings + const diffData = { equivalent: false, differences: [{ description: "..." }, { description: "..." }] } + const diffFindings: Telemetry.Finding[] = [] + if (!diffData.equivalent && diffData.differences?.length) { + for (const _ of diffData.differences) { + diffFindings.push({ category: "equivalence_difference" }) + } + } + expect(diffFindings.length).toBe(2) + const by_category = Telemetry.aggregateFindings(diffFindings) + expect(by_category).toEqual({ equivalence_difference: 2 }) + }) + + test("correct tool changes produce findings", () => { + const data = { changes: [{ description: "a" }, { description: "b" }] } + const findings: Telemetry.Finding[] = data.changes.map(() => ({ + category: "correction_applied", + })) + expect(findings.length).toBe(2) + const by_category = Telemetry.aggregateFindings(findings) + expect(by_category).toEqual({ correction_applied: 2 }) + }) + + test("check tool aggregates validation, lint, safety, and pii findings", () => { + const data = { + validation: { valid: false, errors: [{ message: "syntax error" }] }, + lint: { + clean: false, + findings: [ + { rule: "select_star", severity: "warning", message: "..." }, + { rule: "filter_has_func", severity: "warning", message: "..." }, + ], + }, + safety: { safe: false, threats: [{ type: "sql_injection", severity: "high", description: "..." }] }, + pii: { findings: [{ column: "email", category: "email", confidence: "high" }] }, + } + const findings: Telemetry.Finding[] = [] + for (const _ of data.validation.errors) findings.push({ category: "validation_error" }) + for (const f of data.lint.findings) findings.push({ category: f.rule ?? "lint" }) + for (const t of data.safety.threats) findings.push({ category: (t as any).type ?? "safety_threat" }) + for (const _ of data.pii.findings) findings.push({ category: "pii_detected" }) + const by_category = Telemetry.aggregateFindings(findings) + expect(by_category).toEqual({ + validation_error: 1, + select_star: 1, + filter_has_func: 1, + sql_injection: 1, + pii_detected: 1, + }) + }) + + test("policy violations use rule as category", () => { + const data = { + pass: false, + violations: [ + { rule: "no_select_star", severity: "error", message: "..." }, + { rule: "require_where", severity: "error", message: "..." }, + { severity: "warning", message: "..." }, // no rule + ], + } + const findings: Telemetry.Finding[] = data.violations.map((v: any) => ({ + category: v.rule ?? "policy_violation", + })) + const by_category = Telemetry.aggregateFindings(findings) + expect(by_category).toEqual({ + no_select_star: 1, + require_where: 1, + policy_violation: 1, + }) + }) + + test("schema diff uses change_type as category", () => { + const changes = [ + { severity: "breaking", change_type: "column_dropped", message: "..." }, + { severity: "warning", change_type: "type_changed", message: "..." }, + { severity: "info", change_type: "column_added", message: "..." }, + { severity: "breaking", change_type: "column_dropped", message: "..." }, + ] + const findings: Telemetry.Finding[] = changes.map((c) => ({ + category: c.change_type ?? (c.severity === "breaking" ? "breaking_change" : "schema_change"), + })) + const by_category = Telemetry.aggregateFindings(findings) + expect(by_category).toEqual({ + column_dropped: 2, + type_changed: 1, + column_added: 1, + }) + }) + + test("optimize tool combines anti-patterns and suggestions", () => { + const result = { + anti_patterns: [ + { type: "cartesian_product", severity: "error", message: "..." }, + { type: "select_star", severity: "warning", message: "..." }, + ], + suggestions: [{ type: "cte_elimination", impact: "high", description: "..." }], + } + const findings: Telemetry.Finding[] = [ + ...result.anti_patterns.map((ap) => ({ category: ap.type ?? "anti_pattern" })), + ...result.suggestions.map((s) => ({ category: s.type ?? "optimization_suggestion" })), + ] + const by_category = Telemetry.aggregateFindings(findings) + expect(by_category).toEqual({ + cartesian_product: 1, + select_star: 1, + cte_elimination: 1, + }) + }) + + test("impact analysis produces findings only when downstream affected", () => { + // No impact — no findings + const safeFindings: Telemetry.Finding[] = [] + expect(safeFindings).toEqual([]) + + // High impact — findings per dependent + const findings: Telemetry.Finding[] = [] + const direct = [{ name: "model_a" }, { name: "model_b" }] + const transitive = [{ name: "model_c" }] + const totalAffected = direct.length + transitive.length + if (totalAffected > 0) { + findings.push({ category: "impact_medium" }) + for (const _ of direct) findings.push({ category: "impact_direct_dependent" }) + for (const _ of transitive) findings.push({ category: "impact_transitive_dependent" }) + } + const by_category = Telemetry.aggregateFindings(findings) + expect(by_category).toEqual({ + impact_medium: 1, + impact_direct_dependent: 2, + impact_transitive_dependent: 1, + }) + }) +}) + +// --------------------------------------------------------------------------- +// 4. No findings = no event +// --------------------------------------------------------------------------- +describe("no findings = no sql_quality event", () => { + test("empty issues array produces empty findings", () => { + const issues: any[] = [] + const findings: Telemetry.Finding[] = issues.map((i: any) => ({ + category: i.type, + })) + expect(findings.length).toBe(0) + // tool.ts guards: !isSoftFailure && Array.isArray(findings) && findings.length > 0 + // So no event would be emitted + }) + + test("valid SQL with no errors produces no findings", () => { + const data = { valid: true, errors: [] } + const findings: Telemetry.Finding[] = (data.errors ?? []).map(() => ({ + category: "validation_error", + })) + expect(findings.length).toBe(0) + }) +}) diff --git a/packages/opencode/test/altimate/tool-error-propagation-complete.test.ts b/packages/opencode/test/altimate/tool-error-propagation-complete.test.ts new file mode 100644 index 000000000..db3404348 --- /dev/null +++ b/packages/opencode/test/altimate/tool-error-propagation-complete.test.ts @@ -0,0 +1,512 @@ +/** + * Tests error propagation for ALL altimate-core tool wrappers. + * + * Covers the 20 tools that were missing error propagation (Issue #1), + * plus targeted fixes for impact-analysis (Issue #2), sql-fix (Issue #8), + * complete/grade ?? {} guard (Issue #11), and lineage-check. + * + * Each test verifies: + * 1. Success path: result.error propagates to metadata.error + * 2. Data error: data.error propagates to metadata.error + * 3. Catch path: exception message propagates to metadata.error + * 4. Clean path: no error key when everything succeeds + */ + +import { describe, expect, test, beforeAll, afterAll, beforeEach } from "bun:test" +import * as Dispatcher from "../../src/altimate/native/dispatcher" + +beforeAll(async () => { + process.env.ALTIMATE_TELEMETRY_DISABLED = "true" + // Import native/index.ts to set the lazy registration hook, then consume it. + // This prevents the hook from firing during tool.execute() and overwriting mocks. + await import("../../src/altimate/native/index") + try { + await Dispatcher.call("__trigger_hook__" as any, {} as any) + } catch {} + Dispatcher.reset() +}) +afterAll(() => { + delete process.env.ALTIMATE_TELEMETRY_DISABLED +}) + +function stubCtx(): any { + return { + sessionID: "test", + messageID: "test", + agent: "test", + abort: new AbortController().signal, + messages: [], + metadata: () => {}, + } +} + +function telemetryWouldExtract(metadata: Record): string { + return typeof metadata?.error === "string" ? metadata.error : "unknown error" +} + +// --------------------------------------------------------------------------- +// Helper: test a tool for all 3 error paths + clean path +// --------------------------------------------------------------------------- +function describeToolErrorPropagation(opts: { + name: string + dispatcherMethod: string + importPath: string + exportName: string + args: Record + successResponse: Record + dataErrorResponse: Record +}) { + describe(`${opts.name} error propagation`, () => { + beforeEach(() => Dispatcher.reset()) + + test("propagates result.error to metadata", async () => { + Dispatcher.register(opts.dispatcherMethod as any, async () => ({ + success: false, + error: "Dispatcher-level failure", + data: {}, + })) + + const mod = await import(opts.importPath) + const tool = await mod[opts.exportName].init() + const result = await tool.execute(opts.args, stubCtx()) + + expect(result.metadata.error).toContain("Dispatcher-level failure") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) + + test("propagates data.error to metadata", async () => { + Dispatcher.register(opts.dispatcherMethod as any, async () => opts.dataErrorResponse) + + const mod = await import(opts.importPath) + const tool = await mod[opts.exportName].init() + const result = await tool.execute(opts.args, stubCtx()) + + expect(result.metadata.error).toBeDefined() + expect(typeof result.metadata.error).toBe("string") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) + + test("propagates exception to metadata.error in catch", async () => { + Dispatcher.register(opts.dispatcherMethod as any, async () => { + throw new Error("Connection refused") + }) + + const mod = await import(opts.importPath) + const tool = await mod[opts.exportName].init() + const result = await tool.execute(opts.args, stubCtx()) + + expect(result.metadata.success).toBe(false) + expect(result.metadata.error).toBe("Connection refused") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) + + test("no error key in metadata on clean success", async () => { + Dispatcher.register(opts.dispatcherMethod as any, async () => opts.successResponse) + + const mod = await import(opts.importPath) + const tool = await mod[opts.exportName].init() + const result = await tool.execute(opts.args, stubCtx()) + + expect(result.metadata.error).toBeUndefined() + }) + }) +} + +// --------------------------------------------------------------------------- +// The 20 previously-untreated altimate-core tools +// --------------------------------------------------------------------------- + +describeToolErrorPropagation({ + name: "altimate_core_check", + dispatcherMethod: "altimate_core.check", + importPath: "../../src/altimate/tools/altimate-core-check", + exportName: "AltimateCoreCheckTool", + args: { sql: "SELECT 1", dialect: "snowflake" }, + successResponse: { + success: true, + data: { validation: { valid: true }, lint: { clean: true }, safety: { safe: true }, pii: { findings: [] } }, + }, + dataErrorResponse: { success: true, data: { error: "Internal check engine failure" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_classify_pii", + dispatcherMethod: "altimate_core.classify_pii", + importPath: "../../src/altimate/tools/altimate-core-classify-pii", + exportName: "AltimateCoreClassifyPiiTool", + args: { schema_context: { users: { id: "INT" } } }, + successResponse: { success: true, data: { columns: [], findings: [] } }, + dataErrorResponse: { success: true, data: { error: "Schema parse failed" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_column_lineage", + dispatcherMethod: "altimate_core.column_lineage", + importPath: "../../src/altimate/tools/altimate-core-column-lineage", + exportName: "AltimateCoreColumnLineageTool", + args: { sql: "SELECT id FROM users" }, + successResponse: { success: true, data: { column_lineage: [{ source: "users.id", target: "id" }] } }, + dataErrorResponse: { success: true, data: { error: "Failed to resolve table" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_compare", + dispatcherMethod: "altimate_core.compare", + importPath: "../../src/altimate/tools/altimate-core-compare", + exportName: "AltimateCoreCompareTool", + args: { left_sql: "SELECT 1", right_sql: "SELECT 2" }, + successResponse: { success: true, data: { differences: [] } }, + dataErrorResponse: { success: true, data: { error: "Parse failure in left SQL" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_export_ddl", + dispatcherMethod: "altimate_core.export_ddl", + importPath: "../../src/altimate/tools/altimate-core-export-ddl", + exportName: "AltimateCoreExportDdlTool", + args: { schema_context: { users: { id: "INT" } } }, + successResponse: { success: true, data: { ddl: "CREATE TABLE users (id INT)" } }, + dataErrorResponse: { success: true, data: { error: "No tables in schema" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_extract_metadata", + dispatcherMethod: "altimate_core.metadata", + importPath: "../../src/altimate/tools/altimate-core-extract-metadata", + exportName: "AltimateCoreExtractMetadataTool", + args: { sql: "SELECT id FROM users" }, + successResponse: { success: true, data: { tables: ["users"], columns: ["id"] } }, + dataErrorResponse: { success: true, data: { error: "Parse failure" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_fingerprint", + dispatcherMethod: "altimate_core.fingerprint", + importPath: "../../src/altimate/tools/altimate-core-fingerprint", + exportName: "AltimateCoreFingerprintTool", + args: { schema_context: { users: { id: "INT" } } }, + successResponse: { success: true, data: { fingerprint: "abc123def456" } }, + dataErrorResponse: { success: true, data: { error: "Empty schema" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_import_ddl", + dispatcherMethod: "altimate_core.import_ddl", + importPath: "../../src/altimate/tools/altimate-core-import-ddl", + exportName: "AltimateCoreImportDdlTool", + args: { ddl: "CREATE TABLE users (id INT)" }, + successResponse: { success: true, data: { schema: { users: { id: "INT" } } } }, + dataErrorResponse: { success: true, data: { error: "Invalid DDL syntax" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_introspection_sql", + dispatcherMethod: "altimate_core.introspection_sql", + importPath: "../../src/altimate/tools/altimate-core-introspection-sql", + exportName: "AltimateCoreIntrospectionSqlTool", + args: { db_type: "postgres", database: "mydb" }, + successResponse: { success: true, data: { queries: { tables: "SELECT * FROM information_schema.tables" } } }, + dataErrorResponse: { success: true, data: { error: "Unsupported database type" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_migration", + dispatcherMethod: "altimate_core.migration", + importPath: "../../src/altimate/tools/altimate-core-migration", + exportName: "AltimateCoreMigrationTool", + args: { old_ddl: "CREATE TABLE users (id INT)", new_ddl: "CREATE TABLE users (id BIGINT)" }, + successResponse: { success: true, data: { risks: [] } }, + dataErrorResponse: { success: true, data: { error: "Failed to parse old DDL" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_optimize_context", + dispatcherMethod: "altimate_core.optimize_context", + importPath: "../../src/altimate/tools/altimate-core-optimize-context", + exportName: "AltimateCoreOptimizeContextTool", + args: { schema_context: { users: { id: "INT" } } }, + successResponse: { success: true, data: { levels: [{ level: 1, tokens: 100 }] } }, + dataErrorResponse: { success: true, data: { error: "Schema too large" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_parse_dbt", + dispatcherMethod: "altimate_core.parse_dbt", + importPath: "../../src/altimate/tools/altimate-core-parse-dbt", + exportName: "AltimateCoreParseDbtTool", + args: { project_dir: "/tmp/fake" }, + successResponse: { success: true, data: { models: [{ name: "stg_users" }] } }, + dataErrorResponse: { success: true, data: { error: "dbt_project.yml not found" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_policy", + dispatcherMethod: "altimate_core.policy", + importPath: "../../src/altimate/tools/altimate-core-policy", + exportName: "AltimateCorePolicyTool", + args: { sql: "DELETE FROM users", policy_json: '{"rules": []}' }, + successResponse: { success: true, data: { pass: true, violations: [] } }, + dataErrorResponse: { success: true, data: { error: "Invalid policy JSON" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_prune_schema", + dispatcherMethod: "altimate_core.prune_schema", + importPath: "../../src/altimate/tools/altimate-core-prune-schema", + exportName: "AltimateCorePruneSchemaTool", + args: { sql: "SELECT 1", schema_context: { users: { id: "INT" } } }, + successResponse: { success: true, data: { relevant_tables: ["users"], tables_pruned: 0, total_tables: 1 } }, + dataErrorResponse: { success: true, data: { error: "SQL parse failure" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_query_pii", + dispatcherMethod: "altimate_core.query_pii", + importPath: "../../src/altimate/tools/altimate-core-query-pii", + exportName: "AltimateCoreQueryPiiTool", + args: { sql: "SELECT email FROM users", schema_context: { users: { email: "VARCHAR" } } }, + successResponse: { success: true, data: { pii_columns: [], exposures: [] } }, + dataErrorResponse: { success: true, data: { error: "PII classification unavailable" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_resolve_term", + dispatcherMethod: "altimate_core.resolve_term", + importPath: "../../src/altimate/tools/altimate-core-resolve-term", + exportName: "AltimateCoreResolveTermTool", + args: { term: "revenue", schema_context: { orders: { total: "DECIMAL" } } }, + successResponse: { success: true, data: { matches: [{ table: "orders", column: "total", confidence: 0.9 }] } }, + dataErrorResponse: { success: true, data: { error: "No schema loaded" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_rewrite", + dispatcherMethod: "altimate_core.rewrite", + importPath: "../../src/altimate/tools/altimate-core-rewrite", + exportName: "AltimateCoreRewriteTool", + args: { sql: "SELECT * FROM users" }, + successResponse: { success: true, data: { suggestions: [], rewrites: [] } }, + dataErrorResponse: { success: true, data: { error: "Rewrite engine unavailable" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_schema_diff", + dispatcherMethod: "altimate_core.schema_diff", + importPath: "../../src/altimate/tools/altimate-core-schema-diff", + exportName: "AltimateCoreSchemaDiffTool", + args: { schema1_context: { users: { id: "INT" } }, schema2_context: { users: { id: "BIGINT" } } }, + successResponse: { success: true, data: { changes: [], has_breaking_changes: false } }, + dataErrorResponse: { success: true, data: { error: "Schema1 parse failure" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_testgen", + dispatcherMethod: "altimate_core.testgen", + importPath: "../../src/altimate/tools/altimate-core-testgen", + exportName: "AltimateCoreTestgenTool", + args: { sql: "SELECT id FROM users" }, + successResponse: { success: true, data: { tests: [{ name: "boundary_test", sql: "SELECT 1" }] } }, + dataErrorResponse: { success: true, data: { error: "Test generation failed" } }, +}) + +describeToolErrorPropagation({ + name: "altimate_core_track_lineage", + dispatcherMethod: "altimate_core.track_lineage", + importPath: "../../src/altimate/tools/altimate-core-track-lineage", + exportName: "AltimateCoreTrackLineageTool", + args: { queries: ["SELECT 1", "SELECT 2"] }, + successResponse: { success: true, data: { edges: [{ source: "a", target: "b" }] } }, + dataErrorResponse: { success: true, data: { error: "Lineage tracking failed" } }, +}) + +// --------------------------------------------------------------------------- +// Issue #2: impact-analysis.ts catch block missing error in metadata +// --------------------------------------------------------------------------- +describe("impact_analysis catch error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("propagates exception to metadata.error in catch", async () => { + Dispatcher.register("dbt.manifest" as any, async () => { + throw new Error("Manifest not found") + }) + + const { ImpactAnalysisTool } = await import("../../src/altimate/tools/impact-analysis") + const tool = await ImpactAnalysisTool.init() + const result = await tool.execute( + { + model: "stg_orders", + change_type: "remove" as const, + manifest_path: "target/manifest.json", + dialect: "snowflake", + }, + stubCtx(), + ) + + expect(result.metadata.success).toBe(false) + expect(result.metadata.error).toBe("Manifest not found") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// Issue #8: sql-fix.ts unconditional error spread — no error key on success +// --------------------------------------------------------------------------- +describe("sql_fix conditional error spread", () => { + beforeEach(() => Dispatcher.reset()) + + test("no error key in metadata on successful fix", async () => { + Dispatcher.register("sql.fix" as any, async () => ({ + success: true, + error_message: "Column 'foo' not found", + fixed_sql: "SELECT bar FROM t", + suggestions: [{ type: "column_fix", confidence: "high", message: "Did you mean 'bar'?" }], + suggestion_count: 1, + })) + + const { SqlFixTool } = await import("../../src/altimate/tools/sql-fix") + const tool = await SqlFixTool.init() + const result = await tool.execute( + { sql: "SELECT foo FROM t", error_message: "Column 'foo' not found", dialect: "snowflake" }, + stubCtx(), + ) + + expect(result.metadata.success).toBe(true) + expect(result.metadata.error).toBeUndefined() + }) + + test("error key present when result.error exists", async () => { + Dispatcher.register("sql.fix" as any, async () => ({ + success: false, + error: "Parse failure", + error_message: "", + fixed_sql: null, + suggestions: [], + suggestion_count: 0, + })) + + const { SqlFixTool } = await import("../../src/altimate/tools/sql-fix") + const tool = await SqlFixTool.init() + const result = await tool.execute({ sql: "SELCT", error_message: "syntax error", dialect: "snowflake" }, stubCtx()) + + expect(result.metadata.error).toBe("Parse failure") + }) +}) + +// --------------------------------------------------------------------------- +// Issue #11: altimate_core_complete missing ?? {} guard +// --------------------------------------------------------------------------- +describe("altimate_core_complete null data guard", () => { + beforeEach(() => Dispatcher.reset()) + + test("handles null result.data without TypeError", async () => { + Dispatcher.register("altimate_core.complete" as any, async () => ({ + success: false, + error: "Engine crashed", + data: null, + })) + + const { AltimateCoreCompleteTool } = await import("../../src/altimate/tools/altimate-core-complete") + const tool = await AltimateCoreCompleteTool.init() + const result = await tool.execute({ sql: "SEL", cursor_pos: 3 }, stubCtx()) + + expect(result.metadata.error).toBe("Engine crashed") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) + + test("handles undefined result.data without TypeError", async () => { + Dispatcher.register("altimate_core.complete" as any, async () => ({ + success: false, + error: "No handler", + })) + + const { AltimateCoreCompleteTool } = await import("../../src/altimate/tools/altimate-core-complete") + const tool = await AltimateCoreCompleteTool.init() + const result = await tool.execute({ sql: "SEL", cursor_pos: 3 }, stubCtx()) + + expect(result.metadata.error).toBe("No handler") + }) +}) + +// --------------------------------------------------------------------------- +// altimate_core_grade missing ?? {} guard +// --------------------------------------------------------------------------- +describe("altimate_core_grade null data guard", () => { + beforeEach(() => Dispatcher.reset()) + + test("handles null result.data without TypeError", async () => { + Dispatcher.register("altimate_core.grade" as any, async () => ({ + success: false, + error: "Grading engine unavailable", + data: null, + })) + + const { AltimateCoreGradeTool } = await import("../../src/altimate/tools/altimate-core-grade") + const tool = await AltimateCoreGradeTool.init() + const result = await tool.execute({ sql: "SELECT 1" }, stubCtx()) + + expect(result.metadata.error).toBe("Grading engine unavailable") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// lineage_check error propagation +// --------------------------------------------------------------------------- +describe("lineage_check error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("propagates result.error to metadata", async () => { + Dispatcher.register("lineage.check" as any, async () => ({ + success: false, + error: "Lineage engine not initialized", + data: {}, + })) + + const { LineageCheckTool } = await import("../../src/altimate/tools/lineage-check") + const tool = await LineageCheckTool.init() + const result = await tool.execute({ sql: "SELECT 1", dialect: "snowflake" }, stubCtx()) + + expect(result.metadata.success).toBe(false) + expect(result.metadata.error).toBe("Lineage engine not initialized") + }) + + test("propagates data.error to metadata on partial success", async () => { + Dispatcher.register("lineage.check" as any, async () => ({ + success: true, + data: { error: "Partial lineage: some tables unresolved", column_dict: {} }, + })) + + const { LineageCheckTool } = await import("../../src/altimate/tools/lineage-check") + const tool = await LineageCheckTool.init() + const result = await tool.execute({ sql: "SELECT 1", dialect: "snowflake" }, stubCtx()) + + expect(result.metadata.error).toContain("Partial lineage") + }) + + test("propagates exception to metadata.error in catch", async () => { + Dispatcher.register("lineage.check" as any, async () => { + throw new Error("NAPI crash") + }) + + const { LineageCheckTool } = await import("../../src/altimate/tools/lineage-check") + const tool = await LineageCheckTool.init() + const result = await tool.execute({ sql: "SELECT 1", dialect: "snowflake" }, stubCtx()) + + expect(result.metadata.success).toBe(false) + expect(result.metadata.error).toBe("NAPI crash") + }) + + test("handles null result.data without TypeError", async () => { + Dispatcher.register("lineage.check" as any, async () => ({ + success: false, + error: "Engine crashed", + data: null, + })) + + const { LineageCheckTool } = await import("../../src/altimate/tools/lineage-check") + const tool = await LineageCheckTool.init() + const result = await tool.execute({ sql: "SELECT 1", dialect: "snowflake" }, stubCtx()) + + expect(result.metadata.error).toBe("Engine crashed") + }) +}) diff --git a/packages/opencode/test/altimate/tool-error-propagation.test.ts b/packages/opencode/test/altimate/tool-error-propagation.test.ts new file mode 100644 index 000000000..9270440b6 --- /dev/null +++ b/packages/opencode/test/altimate/tool-error-propagation.test.ts @@ -0,0 +1,487 @@ +/** + * Tests that tool error messages propagate to metadata.error + * so telemetry can extract them (instead of logging "unknown error"). + * + * Verifies the fix for AI-5975: 6,905 "unknown error" entries in telemetry + * caused by tools not setting metadata.error on failure paths. + * + * Strategy: register mock dispatcher handlers that return real failure shapes + * (copied from actual production responses), then call the tool's execute() + * and assert metadata.error is populated. + */ + +import { describe, expect, test, beforeAll, afterAll, beforeEach } from "bun:test" +import * as Dispatcher from "../../src/altimate/native/dispatcher" + +// Disable telemetry so tests don't need AppInsights +beforeAll(async () => { + process.env.ALTIMATE_TELEMETRY_DISABLED = "true" + // Trigger the lazy registration hook so it doesn't overwrite mocks later. + // The hook loads all real native handlers on first Dispatcher.call(). + // We trigger it now, then reset() in beforeEach clears them for mock isolation. + try { + await Dispatcher.call("__trigger_hook__" as any, {} as any) + } catch {} +}) +afterAll(() => { + delete process.env.ALTIMATE_TELEMETRY_DISABLED +}) + +// Stub context — tools need a Context object but only use metadata() +function stubCtx(): any { + return { + sessionID: "test", + messageID: "test", + agent: "test", + abort: new AbortController().signal, + messages: [], + metadata: () => {}, + } +} + +// --------------------------------------------------------------------------- +// Helper: mirrors telemetry extraction logic from tool.ts (lines 142-146) +// --------------------------------------------------------------------------- +function telemetryWouldExtract(metadata: Record): string { + return typeof metadata?.error === "string" ? metadata.error : "unknown error" +} + +// --------------------------------------------------------------------------- +// altimate_core_validate — errors in data.errors[].message +// --------------------------------------------------------------------------- +describe("altimate_core_validate error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("returns early with clear error when no schema provided", async () => { + const { AltimateCoreValidateTool } = await import("../../src/altimate/tools/altimate-core-validate") + const tool = await AltimateCoreValidateTool.init() + const result = await tool.execute({ sql: "SELECT * FROM users" }, stubCtx()) + + expect(result.metadata.success).toBe(false) + expect(result.metadata.error).toContain("No schema provided") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) + + test("surfaces errors when schema provided but table missing from schema", async () => { + // Mock: dispatcher returns what the real handler produces when SQL references + // a table not in the schema — valid=false with error details in data.errors[]. + // The handler completes normally (success=true), findings live in data fields. + Dispatcher.register("altimate_core.validate" as any, async () => ({ + success: true, + data: { + valid: false, + errors: [ + { + code: "E001", + kind: { type: "TableNotFound" }, + message: "Table 'users' not found", + suggestions: ["orders"], + }, + ], + warnings: [], + }, + })) + + const { AltimateCoreValidateTool } = await import("../../src/altimate/tools/altimate-core-validate") + const tool = await AltimateCoreValidateTool.init() + const result = await tool.execute( + { sql: "SELECT * FROM users", schema_context: { orders: { id: "INT" } } }, + stubCtx(), + ) + + expect(result.metadata.success).toBe(true) + // Validation findings are reported via data fields, not as tool errors + expect(result.metadata.valid).toBe(false) + // The finding message is surfaced in metadata.error for telemetry + expect(result.metadata.error).toContain("Table 'users' not found") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// altimate_core_semantics — errors in data.validation_errors[] +// --------------------------------------------------------------------------- +describe("altimate_core_semantics error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("returns early with clear error when no schema provided", async () => { + const { AltimateCoreSemanticsTool } = await import("../../src/altimate/tools/altimate-core-semantics") + const tool = await AltimateCoreSemanticsTool.init() + const result = await tool.execute({ sql: "SELECT * FROM users" }, stubCtx()) + + expect(result.metadata.success).toBe(false) + expect(result.metadata.error).toContain("No schema provided") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) + + test("surfaces errors when schema provided but table missing from schema", async () => { + // Mock: dispatcher returns validation_errors when semantic check can't plan the query. + // Handler completes normally (success=true per ok() contract), but validation_errors + // in the data signal the engine couldn't analyze. The tool wrapper treats this as failure. + Dispatcher.register("altimate_core.semantics" as any, async () => ({ + success: true, + data: { + valid: false, + issues: [], + validation_errors: ["Failed to resolve table 'users' in schema"], + }, + })) + + const { AltimateCoreSemanticsTool } = await import("../../src/altimate/tools/altimate-core-semantics") + const tool = await AltimateCoreSemanticsTool.init() + const result = await tool.execute( + { sql: "SELECT * FROM users", schema_context: { orders: { id: "INT" } } }, + stubCtx(), + ) + + // Handler completed (success=true), but validation_errors are surfaced in metadata.error + expect(result.metadata.success).toBe(true) + expect(result.metadata.error).toContain("Failed to resolve table") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// altimate_core_equivalence — errors in data.validation_errors[] +// --------------------------------------------------------------------------- +describe("altimate_core_equivalence error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("returns early with clear error when no schema provided", async () => { + const { AltimateCoreEquivalenceTool } = await import("../../src/altimate/tools/altimate-core-equivalence") + const tool = await AltimateCoreEquivalenceTool.init() + const result = await tool.execute({ sql1: "SELECT * FROM users", sql2: "SELECT * FROM users" }, stubCtx()) + + expect(result.metadata.success).toBe(false) + expect(result.metadata.error).toContain("No schema provided") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) + + test("surfaces errors when schema provided but table missing from schema", async () => { + // Mock: handler completes normally (success=true per ok() contract), but + // validation_errors signal the engine couldn't plan the query. + // The equivalence wrapper uses isRealFailure to override success to false. + Dispatcher.register("altimate_core.equivalence" as any, async () => ({ + success: true, + data: { + equivalent: false, + validation_errors: ["Failed to resolve table 'users' in schema"], + }, + })) + + const { AltimateCoreEquivalenceTool } = await import("../../src/altimate/tools/altimate-core-equivalence") + const tool = await AltimateCoreEquivalenceTool.init() + const result = await tool.execute( + { sql1: "SELECT * FROM users", sql2: "SELECT * FROM users", schema_context: { orders: { id: "INT" } } }, + stubCtx(), + ) + + // Equivalence wrapper overrides success via isRealFailure when validation_errors exist + expect(result.metadata.success).toBe(false) + expect(result.metadata.error).toContain("Failed to resolve table") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// altimate_core_fix — errors in data.unfixable_errors[].error.message +// --------------------------------------------------------------------------- +describe("altimate_core_fix error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("surfaces unfixable syntax errors from nested structure", async () => { + Dispatcher.register("altimate_core.fix" as any, async () => ({ + success: false, + data: { + fixed: false, + fixed_sql: "SELCT * FORM users", + original_sql: "SELCT * FORM users", + fixes_applied: [], + iterations: 1, + fix_time_ms: 0, + post_fix_valid: false, + unfixable_errors: [ + { + error: { + code: "E000", + kind: { type: "SyntaxError" }, + message: "Syntax error: Expected an SQL statement, found: SELCT", + suggestions: [], + }, + reason: "No automatic fix available for E000", + }, + ], + }, + })) + + const { AltimateCoreFixTool } = await import("../../src/altimate/tools/altimate-core-fix") + const tool = await AltimateCoreFixTool.init() + const result = await tool.execute({ sql: "SELCT * FORM users" }, stubCtx()) + + expect(result.metadata.error).toContain("Syntax error") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// altimate_core_correct — errors in data.final_validation.errors[] +// --------------------------------------------------------------------------- +describe("altimate_core_correct error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("surfaces errors from data.final_validation.errors[]", async () => { + Dispatcher.register("altimate_core.correct" as any, async () => ({ + success: false, + data: { + original_sql: "SELCT * FORM users", + status: "unfixable", + total_time_ms: 1, + iterations: [ + { iteration: 1, input_sql: "SELCT * FORM users", result: "skipped", validation_errors: ["Syntax error"] }, + ], + final_validation: { + valid: false, + errors: [ + { + code: "E000", + kind: { type: "SyntaxError" }, + message: "Syntax error: Expected an SQL statement, found: SELCT", + suggestions: [], + }, + ], + warnings: [], + }, + final_score: { syntax_valid: true, lint_score: 1, safety_score: 1, complexity_score: 1, overall: 1 }, + }, + })) + + const { AltimateCoreCorrectTool } = await import("../../src/altimate/tools/altimate-core-correct") + const tool = await AltimateCoreCorrectTool.init() + const result = await tool.execute({ sql: "SELCT * FORM users" }, stubCtx()) + + expect(result.metadata.error).toContain("Syntax error") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// sql_explain — error from dispatcher result.error +// --------------------------------------------------------------------------- +describe("sql_explain error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("surfaces missing password error", async () => { + Dispatcher.register("sql.explain" as any, async () => ({ + success: false, + plan_rows: [], + error: "MissingParameterError: A password must be specified.", + analyzed: false, + })) + + const { SqlExplainTool } = await import("../../src/altimate/tools/sql-explain") + const tool = await SqlExplainTool.init() + const result = await tool.execute({ sql: "SELECT 1", analyze: false }, stubCtx()) + + expect(result.metadata.error).toBe("MissingParameterError: A password must be specified.") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// finops_query_history — error from result.error +// --------------------------------------------------------------------------- +describe("finops_query_history error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("surfaces 'not available for unknown warehouses' error", async () => { + Dispatcher.register("finops.query_history" as any, async () => ({ + success: false, + queries: [], + summary: {}, + error: "Query history is not available for unknown warehouses.", + })) + + const { FinopsQueryHistoryTool } = await import("../../src/altimate/tools/finops-query-history") + const tool = await FinopsQueryHistoryTool.init() + const result = await tool.execute({ warehouse: "default", days: 7, limit: 10 }, stubCtx()) + + expect(result.metadata.error).toBe("Query history is not available for unknown warehouses.") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// finops_expensive_queries — error from result.error +// --------------------------------------------------------------------------- +describe("finops_expensive_queries error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("surfaces error on failure path", async () => { + Dispatcher.register("finops.expensive_queries" as any, async () => ({ + success: false, + queries: [], + query_count: 0, + error: "No warehouse connection configured.", + })) + + const { FinopsExpensiveQueriesTool } = await import("../../src/altimate/tools/finops-expensive-queries") + const tool = await FinopsExpensiveQueriesTool.init() + const result = await tool.execute({ warehouse: "default", days: 7, limit: 20 }, stubCtx()) + + expect(result.metadata.error).toBe("No warehouse connection configured.") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// finops_analyze_credits — error on both !result.success and catch paths +// --------------------------------------------------------------------------- +describe("finops_analyze_credits error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("surfaces error on failure path", async () => { + Dispatcher.register("finops.analyze_credits" as any, async () => ({ + success: false, + total_credits: 0, + error: "ACCOUNT_USAGE access denied.", + })) + + const { FinopsAnalyzeCreditsTool } = await import("../../src/altimate/tools/finops-analyze-credits") + const tool = await FinopsAnalyzeCreditsTool.init() + const result = await tool.execute({ warehouse: "default", days: 30, limit: 50 }, stubCtx()) + + expect(result.metadata.error).toBe("ACCOUNT_USAGE access denied.") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) + + test("surfaces error on catch path", async () => { + Dispatcher.register("finops.analyze_credits" as any, async () => { + throw new Error("Connection refused") + }) + + const { FinopsAnalyzeCreditsTool } = await import("../../src/altimate/tools/finops-analyze-credits") + const tool = await FinopsAnalyzeCreditsTool.init() + const result = await tool.execute({ warehouse: "default", days: 30, limit: 50 }, stubCtx()) + + expect(result.metadata.error).toBe("Connection refused") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// finops_unused_resources — error from result.error +// --------------------------------------------------------------------------- +describe("finops_unused_resources error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("surfaces error on failure path", async () => { + Dispatcher.register("finops.unused_resources" as any, async () => ({ + success: false, + summary: {}, + unused_tables: [], + idle_warehouses: [], + error: "Insufficient privileges.", + })) + + const { FinopsUnusedResourcesTool } = await import("../../src/altimate/tools/finops-unused-resources") + const tool = await FinopsUnusedResourcesTool.init() + const result = await tool.execute({ warehouse: "default", days: 30, limit: 50 }, stubCtx()) + + expect(result.metadata.error).toBe("Insufficient privileges.") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// finops_warehouse_advice — error on both paths +// --------------------------------------------------------------------------- +describe("finops_warehouse_advice error propagation", () => { + beforeEach(() => Dispatcher.reset()) + + test("surfaces error on failure path", async () => { + Dispatcher.register("finops.warehouse_advice" as any, async () => ({ + success: false, + recommendations: [], + warehouse_load: [], + warehouse_performance: [], + error: "Warehouse not found.", + })) + + const { FinopsWarehouseAdviceTool } = await import("../../src/altimate/tools/finops-warehouse-advice") + const tool = await FinopsWarehouseAdviceTool.init() + const result = await tool.execute({ warehouse: "default", days: 14 }, stubCtx()) + + expect(result.metadata.error).toBe("Warehouse not found.") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) + + test("surfaces error on catch path", async () => { + Dispatcher.register("finops.warehouse_advice" as any, async () => { + throw new Error("Timeout") + }) + + const { FinopsWarehouseAdviceTool } = await import("../../src/altimate/tools/finops-warehouse-advice") + const tool = await FinopsWarehouseAdviceTool.init() + const result = await tool.execute({ warehouse: "default", days: 14 }, stubCtx()) + + expect(result.metadata.error).toBe("Timeout") + expect(telemetryWouldExtract(result.metadata)).not.toBe("unknown error") + }) +}) + +// --------------------------------------------------------------------------- +// Regression guard: telemetry extraction logic +// --------------------------------------------------------------------------- +describe("telemetry extraction logic (regression guard)", () => { + test("extracts string error", () => { + expect(telemetryWouldExtract({ error: "real error" })).toBe("real error") + }) + + test("falls back to 'unknown error' when error is missing", () => { + expect(telemetryWouldExtract({ success: false })).toBe("unknown error") + }) + + test("falls back to 'unknown error' when error is non-string", () => { + expect(telemetryWouldExtract({ error: 42 })).toBe("unknown error") + }) + + test("falls back to 'unknown error' when metadata is undefined", () => { + expect(telemetryWouldExtract(undefined as any)).toBe("unknown error") + }) + + test("empty string error is still extracted (extractors must avoid producing it)", () => { + // telemetry extracts "" as-is since typeof "" === "string" + // The fix is in extractors: .filter(Boolean) prevents "" from reaching metadata.error + expect(telemetryWouldExtract({ error: "" })).toBe("") + }) +}) + +// --------------------------------------------------------------------------- +// Empty string edge case: extractors must not return "" as an error +// --------------------------------------------------------------------------- +describe("extractors handle empty message fields", () => { + beforeEach(() => Dispatcher.reset()) + + test("validate extractor filters out empty messages", async () => { + // Mock: dispatcher returns errors with empty message fields + Dispatcher.register("altimate_core.validate" as any, async () => ({ + success: true, + data: { + valid: false, + errors: [{ code: "E001", kind: { type: "TableNotFound" }, message: "", suggestions: [] }], + warnings: [], + }, + })) + + const { AltimateCoreValidateTool } = await import("../../src/altimate/tools/altimate-core-validate") + const tool = await AltimateCoreValidateTool.init() + const result = await tool.execute( + { sql: "SELECT * FROM nonexistent_table", schema_context: { users: { id: "INT" } } }, + stubCtx(), + ) + // The error should never be empty string — .filter(Boolean) removes it + if (result.metadata.error !== undefined) { + expect(result.metadata.error).not.toBe("") + } + }) +}) diff --git a/packages/opencode/test/altimate/tools/sql-analyze-tool.test.ts b/packages/opencode/test/altimate/tools/sql-analyze-tool.test.ts index 02994ccee..a0a835ae2 100644 --- a/packages/opencode/test/altimate/tools/sql-analyze-tool.test.ts +++ b/packages/opencode/test/altimate/tools/sql-analyze-tool.test.ts @@ -56,10 +56,7 @@ describe("SqlAnalyzeTool.execute: success semantics", () => { }) const tool = await SqlAnalyzeTool.init() - const result = await tool.execute( - { sql: "SELECT * FROM t", dialect: "snowflake" }, - ctx as any, - ) + const result = await tool.execute({ sql: "SELECT * FROM t", dialect: "snowflake" }, ctx as any) expect(result.metadata.success).toBe(true) expect(result.metadata.error).toBeUndefined() @@ -77,10 +74,7 @@ describe("SqlAnalyzeTool.execute: success semantics", () => { }) const tool = await SqlAnalyzeTool.init() - const result = await tool.execute( - { sql: "SELECT id FROM t", dialect: "snowflake" }, - ctx as any, - ) + const result = await tool.execute({ sql: "SELECT id FROM t", dialect: "snowflake" }, ctx as any) expect(result.metadata.success).toBe(true) expect(result.output).toContain("No anti-patterns") @@ -98,14 +92,11 @@ describe("SqlAnalyzeTool.execute: success semantics", () => { }) const tool = await SqlAnalyzeTool.init() - const result = await tool.execute( - { sql: "SELEC FROM", dialect: "snowflake" }, - ctx as any, - ) + const result = await tool.execute({ sql: "SELEC FROM", dialect: "snowflake" }, ctx as any) expect(result.metadata.success).toBe(false) expect(result.metadata.error).toBe("syntax error near SELECT") - expect(result.title).toContain("PARSE ERROR") + expect(result.title).toContain("ERROR") }) test("dispatcher throws → catch block returns ERROR title", async () => { @@ -113,10 +104,7 @@ describe("SqlAnalyzeTool.execute: success semantics", () => { dispatcherSpy = spyOn(Dispatcher, "call").mockRejectedValue(new Error("native crash")) const tool = await SqlAnalyzeTool.init() - const result = await tool.execute( - { sql: "SELECT 1", dialect: "snowflake" }, - ctx as any, - ) + const result = await tool.execute({ sql: "SELECT 1", dialect: "snowflake" }, ctx as any) expect(result.title).toBe("Analyze: ERROR") expect(result.metadata.success).toBe(false) @@ -144,10 +132,7 @@ describe("SqlAnalyzeTool.execute: formatAnalysis output", () => { }) const tool = await SqlAnalyzeTool.init() - const result = await tool.execute( - { sql: "x", dialect: "snowflake" }, - ctx as any, - ) + const result = await tool.execute({ sql: "x", dialect: "snowflake" }, ctx as any) expect(result.output).toContain("Found 1 issue ") expect(result.output).not.toContain("1 issues") @@ -179,10 +164,7 @@ describe("SqlAnalyzeTool.execute: formatAnalysis output", () => { }) const tool = await SqlAnalyzeTool.init() - const result = await tool.execute( - { sql: "x", dialect: "snowflake" }, - ctx as any, - ) + const result = await tool.execute({ sql: "x", dialect: "snowflake" }, ctx as any) expect(result.output).toContain("2 issues") expect(result.output).toContain("[WARNING] lint")