-
Notifications
You must be signed in to change notification settings - Fork 9
LLM Chain: Add foundation for chain execution with database schema #616
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
9ac86ea
6451bb0
c9f94e2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,181 @@ | ||
| """Create llm_chain table | ||
|
|
||
| Revision ID: 048 | ||
| Revises: 047 | ||
| Create Date: 2026-02-20 00:00:00.000000 | ||
|
|
||
| """ | ||
|
|
||
| from alembic import op | ||
| import sqlalchemy as sa | ||
| from sqlalchemy.dialects.postgresql import JSONB | ||
|
|
||
| revision = "048" | ||
| down_revision = "047" | ||
| branch_labels = None | ||
| depends_on = None | ||
|
|
||
|
|
||
| def upgrade() -> None: | ||
| # 1. Create llm_chain table | ||
| op.create_table( | ||
| "llm_chain", | ||
| sa.Column( | ||
| "id", | ||
| sa.Uuid(), | ||
| nullable=False, | ||
| comment="Unique identifier for the LLM chain record", | ||
| ), | ||
| sa.Column( | ||
| "job_id", | ||
| sa.Uuid(), | ||
| nullable=False, | ||
| comment="Reference to the parent job (status tracked in job table)", | ||
| ), | ||
| sa.Column( | ||
| "project_id", | ||
| sa.Integer(), | ||
| nullable=False, | ||
| comment="Reference to the project this LLM call belongs to", | ||
| ), | ||
| sa.Column( | ||
| "organization_id", | ||
| sa.Integer(), | ||
| nullable=False, | ||
| comment="Reference to the organization this LLM call belongs to", | ||
| ), | ||
| sa.Column( | ||
| "status", | ||
| sa.String(), | ||
| nullable=False, | ||
| server_default="pending", | ||
| comment="Chain execution status (pending, running, failed, completed)", | ||
| ), | ||
| sa.Column( | ||
| "error", | ||
| sa.Text(), | ||
| nullable=True, | ||
| comment="Error message if the chain execution failed", | ||
| ), | ||
| sa.Column( | ||
| "block_sequences", | ||
| JSONB(), | ||
| nullable=True, | ||
| comment="Ordered list of llm_call UUIDs as blocks complete", | ||
| ), | ||
| sa.Column( | ||
| "total_blocks", | ||
| sa.Integer(), | ||
| nullable=False, | ||
| comment="Total number of blocks to execute", | ||
| ), | ||
| sa.Column( | ||
| "number_of_blocks_processed", | ||
| sa.Integer(), | ||
| nullable=False, | ||
| server_default="0", | ||
| comment="Number of blocks processed so far (used for tracking progress)", | ||
| ), | ||
| sa.Column( | ||
| "input", | ||
| sa.String(), | ||
| nullable=False, | ||
| comment="First block user's input - text string, binary data, or file path for multimodal", | ||
| ), | ||
| sa.Column( | ||
| "output", | ||
| JSONB(), | ||
| nullable=True, | ||
| comment="Last block's final output (set on chain completion)", | ||
| ), | ||
| sa.Column( | ||
| "configs", | ||
| JSONB(), | ||
| nullable=True, | ||
| comment="Ordered list of block configs as submitted in the request", | ||
| ), | ||
| sa.Column( | ||
| "total_usage", | ||
| JSONB(), | ||
| nullable=True, | ||
| comment="Aggregated token usage: {input_tokens, output_tokens, total_tokens}", | ||
| ), | ||
| sa.Column( | ||
| "metadata", | ||
| JSONB(), | ||
| nullable=True, | ||
| comment="Future-proof extensibility catch-all", | ||
| ), | ||
| sa.Column( | ||
| "started_at", | ||
| sa.DateTime(), | ||
| nullable=True, | ||
| comment="Timestamp when chain execution started", | ||
| ), | ||
| sa.Column( | ||
| "completed_at", | ||
| sa.DateTime(), | ||
| nullable=True, | ||
| comment="Timestamp when chain execution completed", | ||
| ), | ||
| sa.Column( | ||
| "created_at", | ||
| sa.DateTime(), | ||
| nullable=False, | ||
| comment="Timestamp when the chain record was created", | ||
| ), | ||
| sa.Column( | ||
| "updated_at", | ||
| sa.DateTime(), | ||
| nullable=False, | ||
| comment="Timestamp when the chain record was last updated", | ||
| ), | ||
| sa.PrimaryKeyConstraint("id"), | ||
| sa.ForeignKeyConstraint(["job_id"], ["job.id"], ondelete="CASCADE"), | ||
| sa.ForeignKeyConstraint(["project_id"], ["project.id"], ondelete="CASCADE"), | ||
| sa.ForeignKeyConstraint( | ||
| ["organization_id"], ["organization.id"], ondelete="CASCADE" | ||
| ), | ||
| ) | ||
|
|
||
| op.create_index( | ||
| "idx_llm_chain_job_id", | ||
| "llm_chain", | ||
| ["job_id"], | ||
| ) | ||
|
|
||
| # 2. Add chain_id FK column to llm_call table | ||
| op.add_column( | ||
| "llm_call", | ||
| sa.Column( | ||
| "chain_id", | ||
| sa.Uuid(), | ||
| nullable=True, | ||
| comment="Reference to the parent chain (NULL for standalone /llm/call requests)", | ||
| ), | ||
| ) | ||
| op.create_foreign_key( | ||
| "fk_llm_call_chain_id", | ||
| "llm_call", | ||
| "llm_chain", | ||
| ["chain_id"], | ||
| ["id"], | ||
| ondelete="SET NULL", | ||
| ) | ||
| op.create_index( | ||
| "idx_llm_call_chain_id", | ||
| "llm_call", | ||
| ["chain_id"], | ||
| postgresql_where=sa.text("chain_id IS NOT NULL"), | ||
| ) | ||
|
|
||
| op.execute("ALTER TYPE jobtype ADD VALUE IF NOT EXISTS 'LLM_CHAIN'") | ||
|
|
||
|
|
||
| def downgrade() -> None: | ||
| op.drop_index("idx_llm_call_chain_id", table_name="llm_call") | ||
| op.drop_constraint("fk_llm_call_chain_id", "llm_call", type_="foreignkey") | ||
| op.drop_column("llm_call", "chain_id") | ||
|
|
||
| op.drop_index("idx_llm_chain_job_id", table_name="llm_chain") | ||
| op.drop_table("llm_chain") |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,60 @@ | ||
| Execute a chain of LLM calls sequentially, where each block's output becomes the next block's input. | ||
|
|
||
| This endpoint initiates an asynchronous LLM chain job. The request is queued | ||
| for processing, and results are delivered via the callback URL when complete. | ||
|
|
||
| ### Key Parameters | ||
|
|
||
| **`query`** (required) - Initial query input for the first block in the chain: | ||
| - `input` (required, string, min 1 char): User question/prompt/query | ||
| - `conversation` (optional, object): Conversation configuration | ||
| - `id` (optional, string): Existing conversation ID to continue | ||
| - `auto_create` (optional, boolean, default false): Create new conversation if no ID provided | ||
| - **Note**: Cannot specify both `id` and `auto_create=true` | ||
|
|
||
|
|
||
| **`blocks`** (required, array, min 1 block) - Ordered list of blocks to execute sequentially. Each block contains: | ||
|
|
||
| - `config` (required) - Configuration for this block's LLM call (just choose one mode): | ||
|
|
||
| - **Mode 1: Stored Configuration** | ||
| - `id` (UUID): Configuration ID | ||
| - `version` (integer >= 1): Version number | ||
| - **Both required together** | ||
| - **Note**: When using stored configuration, do not include the `blob` field in the request body | ||
|
|
||
| - **Mode 2: Ad-hoc Configuration** | ||
| - `blob` (object): Complete configuration object | ||
| - `completion` (required, object): Completion configuration | ||
| - `provider` (required, string): Provider type - either `"openai"` (Kaapi abstraction) or `"openai-native"` (pass-through) | ||
| - `params` (required, object): Parameters structure depends on provider type (see schema for detailed structure) | ||
| - `prompt_template` (optional, object): Template for text interpolation | ||
| - `template` (required, string): Template string with `{{input}}` placeholder — replaced with the block's input before execution | ||
| - **Note** | ||
| - When using ad-hoc configuration, do not include `id` and `version` fields | ||
| - When using the Kaapi abstraction, parameters that are not supported by the selected provider or model are automatically suppressed. If any parameters are ignored, a list of warnings is included in the metadata.warnings. | ||
| - **Recommendation**: Use stored configs (Mode 1) for production; use ad-hoc configs only for testing/validation | ||
| - **Schema**: Check the API schema or examples below for the complete parameter structure for each provider type | ||
|
|
||
| - `include_provider_raw_response` (optional, boolean, default false): | ||
| - When true, includes the unmodified raw response from the LLM provider for this block | ||
|
|
||
| - `intermediate_callback` (optional, boolean, default false): | ||
| - When true, sends an intermediate callback after this block completes with the block's response, usage, and position in the chain | ||
|
|
||
| **`callback_url`** (optional, HTTPS URL): | ||
| - Webhook endpoint to receive the final response and intermediate callbacks | ||
| - Must be a valid HTTPS URL | ||
| - If not provided, response is only accessible through job status | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. "Accessible through job status" is misleading — the endpoint currently does not return a Without a 🤖 Prompt for AI Agents |
||
|
|
||
| **`request_metadata`** (optional, object): | ||
| - Custom JSON metadata | ||
| - Passed through unchanged in the response | ||
|
|
||
| ### Note | ||
| - Input guardrails from the first block's config are applied before chain execution starts | ||
| - Output guardrails from the last block's config are applied after all blocks complete | ||
| - If any block fails, the chain stops immediately — no subsequent blocks are executed | ||
| - `warnings` list is automatically added in response metadata when using Kaapi configs if any parameters are suppressed or adjusted (e.g., temperature on reasoning models) | ||
|
|
||
| --- | ||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,62 @@ | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| import logging | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| from fastapi import APIRouter, Depends | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| from app.api.deps import AuthContextDep, SessionDep | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| from app.api.permissions import Permission, require_permission | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| from app.models import LLMChainRequest, LLMChainResponse, Message | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| from app.services.llm.jobs import start_chain_job | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| from app.utils import APIResponse, validate_callback_url, load_description | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| logger = logging.getLogger(__name__) | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| router = APIRouter(tags=["LLM Chain"]) | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| llm_callback_router = APIRouter() | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| @llm_callback_router.post( | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| "{$callback_url}", | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
+16
to
+17
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: # First, find and read the file
find . -name "llm_chain.py" -path "*/backend/app/api/*" | head -5Repository: ProjectTech4DevAI/kaapi-backend Length of output: 112 🏁 Script executed: # Also check file structure to understand the path
find . -type f -name "llm_chain.py"Repository: ProjectTech4DevAI/kaapi-backend Length of output: 144 🏁 Script executed: # Let me also search for the specific file with less strict matching
fd "llm_chain\.py$"Repository: ProjectTech4DevAI/kaapi-backend Length of output: 140 🏁 Script executed: cat -n backend/app/api/routes/llm_chain.pyRepository: ProjectTech4DevAI/kaapi-backend Length of output: 2400 🏁 Script executed: # Check if the description files exist
find . -name "llm_call.md" -o -name "llm_chain.md" | grep -E "(llm_call|llm_chain)"Repository: ProjectTech4DevAI/kaapi-backend Length of output: 113 🏁 Script executed: # Search for where load_description is defined to understand its usage
rg "def load_description" -A 5Repository: ProjectTech4DevAI/kaapi-backend Length of output: 403 🏁 Script executed: # Check the LLMChainRequest model to verify callback_url optionality
rg "class LLMChainRequest" -A 20Repository: ProjectTech4DevAI/kaapi-backend Length of output: 1424 🏁 Script executed: # Check if llm_chain.md exists
find . -name "llm_chain.md"Repository: ProjectTech4DevAI/kaapi-backend Length of output: 57 🏁 Script executed: # Let's also check what description files exist in the llm directory
ls -la backend/app/api/docs/llm/Repository: ProjectTech4DevAI/kaapi-backend Length of output: 249 🌐 Web query:
💡 Result: In OpenAPI 3.0.x, the URL template key inside a Callback Object is a runtime expression (“key expression”) evaluated at runtime to produce the callback URL (e.g., Runtime expression syntax (ABNF)expression = ( "$url" / "$method" / "$statusCode"
/ "$request." source / "$response." source )
source = ( header-reference / query-reference / path-reference / body-reference )
header-reference = "header." token
query-reference = "query." name
path-reference = "path." name
body-reference = "body" ["#" json-pointer ]
json-pointer = *( "/" reference-token )
reference-token = *( unescaped / escaped )
escaped = "~" ( "0" / "1" ) ; "~0" => "~", "~1" => "/"Common callback/key examples
Embedding into strings (typical for callback URLs)Runtime expressions can be embedded in a string by wrapping them in Citations:
🏁 Script executed: # Check the entire llm_chain function to see all return paths
sed -n '39,62p' backend/app/api/routes/llm_chain.pyRepository: ProjectTech4DevAI/kaapi-backend Length of output: 750 🏁 Script executed: # Verify what the llm_call.md content says and if it's appropriate for llm_chain endpoint
cat backend/app/api/docs/llm/llm_call.md | head -20Repository: ProjectTech4DevAI/kaapi-backend Length of output: 981 🏁 Script executed: # Check if there are any return type hints elsewhere in the file or similar endpoints
rg "@router.post" -A 10 backend/app/api/routes/ | head -50Repository: ProjectTech4DevAI/kaapi-backend Length of output: 2876 🏁 Script executed: # Check what happens in start_chain_job when callback_url is None
rg "def start_chain_job" -A 20 backend/app/services/Repository: ProjectTech4DevAI/kaapi-backend Length of output: 1519 🏁 Script executed: # Also check the llm_callback_notification function return type more carefully
sed -n '20,29p' backend/app/api/routes/llm_chain.pyRepository: ProjectTech4DevAI/kaapi-backend Length of output: 484 🏁 Script executed: # Check response_model to infer what return type should be
sed -n '32,38p' backend/app/api/routes/llm_chain.pyRepository: ProjectTech4DevAI/kaapi-backend Length of output: 320 🏁 Script executed: # Check if there are other endpoints with similar callback patterns to understand the intended usage
rg "callback" -i backend/app/api/routes/ -lRepository: ProjectTech4DevAI/kaapi-backend Length of output: 284 🏁 Script executed: # Let me also check if execute_chain_job uses the callback_url
rg "def execute_chain_job" -A 30 backend/app/services/Repository: ProjectTech4DevAI/kaapi-backend Length of output: 2098 🏁 Script executed: # Check what happens when callback_url is None in the job execution
rg "callback_url" backend/app/services/llm/jobs.pyRepository: ProjectTech4DevAI/kaapi-backend Length of output: 845 🏁 Script executed: # Final verification: confirm the exact issue exists in the code
sed -n '15,19p' backend/app/api/routes/llm_chain.pyRepository: ProjectTech4DevAI/kaapi-backend Length of output: 158 Fix invalid OpenAPI runtime expression — OpenAPI 3.0 defines runtime expressions using the ABNF grammar: Proposed fix- "{$callback_url}",
+ "{$request.body#/callback_url}",🤖 Prompt for AI Agents |
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| name="llm_chain_callback", | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| def llm_callback_notification(body: APIResponse[LLMChainResponse]): | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Missing return type hints on both route handlers — coding guideline violation.
🐛 Proposed fix-def llm_callback_notification(body: APIResponse[LLMChainResponse]):
+def llm_callback_notification(body: APIResponse[LLMChainResponse]) -> None:-def llm_chain(
- _current_user: AuthContextDep, _session: SessionDep, request: LLMChainRequest
-):
+def llm_chain(
+ _current_user: AuthContextDep, _session: SessionDep, request: LLMChainRequest
+) -> APIResponse[Message]:As per coding guidelines: "Always add type hints to all function parameters and return values in Python code." Also applies to: 39-41 🤖 Prompt for AI Agents |
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| """ | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| Callback endpoint specification for LLM chain completion. | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| The callback will receive: | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - On success: APIResponse with success=True and data containing LLMChainResponse | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - On failure: APIResponse with success=False and error message | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| - metadata field will always be included if provided in the request | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| """ | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| ... | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| @router.post( | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| "/llm/chain", | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| description=load_description("llm/llm_chain.md"), | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| response_model=APIResponse[Message], | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| callbacks=llm_callback_router.routes, | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| dependencies=[Depends(require_permission(Permission.REQUIRE_PROJECT))], | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| def llm_chain( | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| _current_user: AuthContextDep, _session: SessionDep, request: LLMChainRequest | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| ): | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| """ | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| Endpoint to initiate an LLM chain as a background job. | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| """ | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| project_id = _current_user.project_.id | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| organization_id = _current_user.organization_.id | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| if request.callback_url: | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| validate_callback_url(str(request.callback_url)) | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| start_chain_job( | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| db=_session, | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| request=request, | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| project_id=project_id, | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| organization_id=organization_id, | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| return APIResponse.success_response( | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| data=Message( | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| message="Your response is being generated and will be delivered via callback." | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| ), | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
+48
to
+62
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Unconditional "callback" success message when
Consider either (a) requiring 🐛 Minimal guard (option a — require callback_url)+from fastapi import HTTPException, status
if request.callback_url:
validate_callback_url(str(request.callback_url))
+else:
+ raise HTTPException(
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
+ detail="callback_url is required for LLM chain execution.",
+ )📝 Committable suggestion
Suggested change
🤖 Prompt for AI Agents |
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
inputdocumented asstringonly — audio input support is omitted.QueryParams.inputis a discriminated union ofTextInputandAudioInput. Describing it asstringmisleads consumers who may want to use audio inputs in chains.🤖 Prompt for AI Agents