diff --git a/crates/pu-engine/src/engine.rs b/crates/pu-engine/src/engine.rs deleted file mode 100644 index c908f3a..0000000 --- a/crates/pu-engine/src/engine.rs +++ /dev/null @@ -1,4864 +0,0 @@ -use std::collections::{HashMap, HashSet}; -use std::io::{BufRead, Write}; -use std::os::fd::OwnedFd; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use std::time::{Duration, Instant}; - -use tokio::sync::Mutex; - -use indexmap::IndexMap; -use pu_core::config; -use pu_core::error::PuError; -use pu_core::manifest; -use pu_core::paths; -use pu_core::protocol::{ - AgentConfigInfo, AgentDefInfo, AgentStatusReport, GridCommand, KillTarget, PROTOCOL_VERSION, - Request, Response, ScheduleInfo, ScheduleTriggerPayload, SuspendTarget, SwarmDefInfo, - SwarmRosterEntryPayload, TemplateInfo, -}; -use pu_core::types::{AgentEntry, AgentStatus, Manifest, WorktreeEntry, WorktreeStatus}; -use tokio::sync::OnceCell; - -use crate::agent_monitor; -use crate::daemon_lifecycle; -use crate::git; -use crate::output_buffer::OutputBuffer; -use crate::pty_manager::{AgentHandle, NativePtyHost, SpawnConfig}; - -/// Parameters for spawning an agent, extracted to avoid too many positional args. -struct SpawnParams { - project_root: String, - prompt: String, - agent_type: String, - name: Option, - base: Option, - root: bool, - worktree: Option, - terminal_command: Option, - /// Skip auto-mode launch args for this spawn. One-off override; - /// does not affect resume (resume always reads from config). - no_auto: bool, - /// Extra CLI args from --agent-args, appended after launch args. - extra_args: Vec, - plan_mode: bool, - no_trigger: bool, - /// Name of trigger to bind (from --trigger flag) - trigger: Option, -} - -struct SaveTriggerParams { - project_root: String, - name: String, - description: Option, - on: String, - sequence: Vec, - variables: std::collections::HashMap, - scope: String, -} - -pub struct Engine { - start_time: Instant, - pty_host: NativePtyHost, - sessions: Arc>>, - pending_initial_inputs: Arc>>>, - login_env: Arc>>, - reaped_projects: Arc>>, - /// Per-project broadcast channels for grid commands. - grid_channels: Arc>>>, - /// Per-project broadcast channels for status push updates. - status_channels: Arc>>>, - /// Projects that have been initialized or used — scheduler scans these. - registered_projects: Arc>>, -} - -impl Default for Engine { - fn default() -> Self { - Self::new() - } -} - -/// Build a `ConfigReport` response from a loaded config. -/// Filters out the terminal agent (no launch args to configure). -fn config_to_report(cfg: &pu_core::types::Config) -> Response { - let agents = cfg - .agents - .iter() - .filter(|(name, _)| name.as_str() != "terminal") - .map(|(name, ac)| { - let resolved = pu_core::types::resolved_launch_args(name, ac.launch_args.as_deref()); - AgentConfigInfo { - name: ac.name.clone(), - command: ac.command.clone(), - launch_args: ac.launch_args.clone(), - resolved_launch_args: resolved, - interactive: ac.interactive, - } - }) - .collect(); - Response::ConfigReport { - default_agent: cfg.default_agent.clone(), - agents, - } -} - -impl Engine { - pub fn new() -> Self { - Self { - start_time: Instant::now(), - pty_host: NativePtyHost::new(), - sessions: Arc::new(Mutex::new(HashMap::new())), - pending_initial_inputs: Arc::new(Mutex::new(HashMap::new())), - login_env: Arc::new(OnceCell::new()), - reaped_projects: Arc::new(std::sync::Mutex::new(HashSet::new())), - grid_channels: Arc::new(Mutex::new(HashMap::new())), - status_channels: Arc::new(Mutex::new(HashMap::new())), - registered_projects: Arc::new(std::sync::Mutex::new(HashSet::new())), - } - } - - /// Start a background task that periodically removes session handles for - /// processes that have exited naturally, and cleans up broadcast channels - /// with no subscribers. Without this, HashMap entries leak. - pub fn start_session_reaper(self: &Arc) { - let sessions = self.sessions.clone(); - let pending_initial_inputs = self.pending_initial_inputs.clone(); - let grid_channels = self.grid_channels.clone(); - let status_channels = self.status_channels.clone(); - tokio::spawn(async move { - let mut interval = tokio::time::interval(Duration::from_secs(30)); - interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - loop { - interval.tick().await; - - // Reap dead sessions - let dead_ids: Vec = { - let mut sessions = sessions.lock().await; - let dead: Vec = sessions - .iter() - .filter(|(_, handle)| handle.exit_rx.borrow().is_some()) - .map(|(id, _)| id.clone()) - .collect(); - for id in &dead { - sessions.remove(id); - } - dead - }; - if !dead_ids.is_empty() { - let mut pending_initial_inputs = pending_initial_inputs.lock().await; - for id in &dead_ids { - pending_initial_inputs.remove(id); - } - tracing::debug!(count = dead_ids.len(), "reaped dead session handles"); - } - - // Clean up grid channels with no subscribers - { - let mut channels = grid_channels.lock().await; - channels.retain(|_, tx| tx.receiver_count() > 0); - } - - // Clean up status channels with no subscribers - { - let mut channels = status_channels.lock().await; - channels.retain(|_, tx| tx.receiver_count() > 0); - } - } - }); - } - - async fn resolve_login_env() -> Vec<(String, String)> { - let shell = std::env::var("SHELL").unwrap_or_else(|_| "/bin/zsh".into()); - match tokio::process::Command::new(&shell) - .args(["-li", "-c", "env -0"]) - .stdin(std::process::Stdio::null()) - .stderr(std::process::Stdio::null()) - .output() - .await - { - Ok(output) if output.status.success() => output - .stdout - .split(|&b| b == 0) - .filter_map(|entry| { - let s = std::str::from_utf8(entry).ok()?; - let (k, v) = s.split_once('=')?; - if k.is_empty() { - return None; - } - Some((k.to_string(), v.to_string())) - }) - .collect(), - // Fallback: use the daemon's own env - _ => std::env::vars().collect(), - } - } - - fn register_project(&self, project_root: &str) { - if !project_root.is_empty() { - if let Ok(mut projects) = self.registered_projects.lock() { - projects.insert(project_root.to_string()); - } - } - } - - pub fn registered_projects(&self) -> Vec { - self.registered_projects - .lock() - .map(|p| p.iter().cloned().collect()) - .unwrap_or_default() - } - - pub async fn handle_request(&self, request: Request) -> Response { - // Register project for any project-scoped request - match &request { - Request::Init { project_root } - | Request::Spawn { project_root, .. } - | Request::CreateWorktree { project_root, .. } - | Request::Status { project_root, .. } - | Request::Kill { project_root, .. } - | Request::ListTemplates { project_root } - | Request::ListAgentDefs { project_root } - | Request::ListSwarmDefs { project_root } - | Request::ListSchedules { project_root } - | Request::SaveSchedule { project_root, .. } - | Request::EnableSchedule { project_root, .. } - | Request::DisableSchedule { project_root, .. } - | Request::ListTriggers { project_root } - | Request::SaveTrigger { project_root, .. } - | Request::EvaluateGate { project_root, .. } - | Request::Diff { project_root, .. } - | Request::GetConfig { project_root } - | Request::UpdateAgentConfig { project_root, .. } - | Request::Pulse { project_root, .. } - | Request::AssignTrigger { project_root, .. } => { - self.register_project(project_root); - } - _ => {} - } - - match request { - Request::Health => self.handle_health().await, - Request::Init { project_root } => self.handle_init(&project_root).await, - Request::Rename { - project_root, - agent_id, - name, - } => self.handle_rename(&project_root, &agent_id, &name).await, - Request::AssignTrigger { - project_root, - agent_id, - trigger_name, - } => { - self.handle_assign_trigger(&project_root, &agent_id, &trigger_name) - .await - } - Request::GetConfig { project_root } => self.handle_get_config(&project_root).await, - Request::UpdateAgentConfig { - project_root, - agent_name, - launch_args, - } => { - self.handle_update_agent_config(&project_root, &agent_name, launch_args) - .await - } - Request::Shutdown => Response::ShuttingDown, - Request::Status { - project_root, - agent_id, - } => self.handle_status(&project_root, agent_id.as_deref()).await, - Request::SpawnShell { cwd } => self.handle_spawn_shell(&cwd).await, - Request::Spawn { - project_root, - prompt, - agent, - name, - base, - root, - worktree, - command, - no_auto, - extra_args, - plan_mode, - no_trigger, - trigger, - } => { - self.handle_spawn(SpawnParams { - project_root, - prompt, - agent_type: agent, - name, - base, - root, - worktree, - terminal_command: command, - no_auto, - extra_args, - plan_mode, - no_trigger, - trigger, - }) - .await - } - Request::CreateWorktree { - project_root, - name, - base, - } => self.handle_create_worktree(&project_root, name, base).await, - Request::Kill { - project_root, - target, - exclude, - } => self.handle_kill(&project_root, target, &exclude).await, - Request::Suspend { - project_root, - target, - } => self.handle_suspend(&project_root, target).await, - Request::Resume { - project_root, - agent_id, - } => self.handle_resume(&project_root, &agent_id).await, - Request::Logs { agent_id, tail } => self.handle_logs(&agent_id, tail).await, - Request::Attach { agent_id } => self.handle_attach(&agent_id).await, - Request::Input { - agent_id, - data, - submit, - } => self.handle_input(&agent_id, &data, submit).await, - Request::Resize { - agent_id, - cols, - rows, - } => self.handle_resize(&agent_id, cols, rows).await, - Request::SubscribeGrid { project_root } => { - self.handle_subscribe_grid(&project_root).await - } - Request::SubscribeStatus { project_root } => { - self.handle_subscribe_status(&project_root).await - } - Request::GridCommand { - project_root, - command, - } => self.handle_grid_command(&project_root, command).await, - Request::DeleteWorktree { - project_root, - worktree_id, - } => { - self.handle_delete_worktree(&project_root, &worktree_id) - .await - } - // Template CRUD - Request::ListTemplates { project_root } => { - self.handle_list_templates(&project_root).await - } - Request::GetTemplate { project_root, name } => { - self.handle_get_template(&project_root, &name).await - } - Request::SaveTemplate { - project_root, - name, - description, - agent, - body, - scope, - command, - } => { - self.handle_save_template( - &project_root, - &name, - &description, - &agent, - &body, - &scope, - command, - ) - .await - } - Request::DeleteTemplate { - project_root, - name, - scope, - } => { - self.handle_delete_template(&project_root, &name, &scope) - .await - } - // Agent def CRUD - Request::ListAgentDefs { project_root } => { - self.handle_list_agent_defs(&project_root).await - } - Request::GetAgentDef { project_root, name } => { - self.handle_get_agent_def(&project_root, &name).await - } - Request::SaveAgentDef { - project_root, - name, - agent_type, - template, - inline_prompt, - tags, - scope, - available_in_command_dialog, - icon, - command, - } => { - self.handle_save_agent_def( - &project_root, - &name, - &agent_type, - template, - inline_prompt, - tags, - &scope, - available_in_command_dialog, - icon, - command, - ) - .await - } - Request::DeleteAgentDef { - project_root, - name, - scope, - } => { - self.handle_delete_agent_def(&project_root, &name, &scope) - .await - } - // Swarm def CRUD - Request::ListSwarmDefs { project_root } => { - self.handle_list_swarm_defs(&project_root).await - } - Request::GetSwarmDef { project_root, name } => { - self.handle_get_swarm_def(&project_root, &name).await - } - Request::SaveSwarmDef { - project_root, - name, - worktree_count, - worktree_template, - roster, - include_terminal, - scope, - } => { - self.handle_save_swarm_def( - &project_root, - &name, - worktree_count, - &worktree_template, - roster, - include_terminal, - &scope, - ) - .await - } - Request::DeleteSwarmDef { - project_root, - name, - scope, - } => { - self.handle_delete_swarm_def(&project_root, &name, &scope) - .await - } - // Execution - Request::RunSwarm { - project_root, - swarm_name, - vars, - } => { - self.handle_run_swarm(&project_root, &swarm_name, vars) - .await - } - // Schedule CRUD - Request::ListSchedules { project_root } => { - self.handle_list_schedules(&project_root).await - } - Request::GetSchedule { project_root, name } => { - self.handle_get_schedule(&project_root, &name).await - } - Request::SaveSchedule { - project_root, - name, - enabled, - recurrence, - start_at, - trigger, - target, - scope, - root, - agent_name, - } => { - self.handle_save_schedule( - &project_root, - &name, - enabled, - &recurrence, - start_at, - trigger, - &target, - &scope, - root, - agent_name, - ) - .await - } - Request::DeleteSchedule { - project_root, - name, - scope, - } => { - self.handle_delete_schedule(&project_root, &name, &scope) - .await - } - Request::EnableSchedule { project_root, name } => { - self.handle_enable_schedule(&project_root, &name).await - } - Request::DisableSchedule { project_root, name } => { - self.handle_disable_schedule(&project_root, &name).await - } - // Trigger CRUD - Request::ListTriggers { project_root } => { - self.handle_list_triggers(&project_root).await - } - Request::GetTrigger { project_root, name } => { - self.handle_get_trigger(&project_root, &name).await - } - Request::SaveTrigger { - project_root, - name, - description, - on, - sequence, - variables, - scope, - } => { - self.handle_save_trigger(SaveTriggerParams { - project_root, - name, - description, - on, - sequence, - variables, - scope, - }) - .await - } - Request::DeleteTrigger { - project_root, - name, - scope, - } => { - self.handle_delete_trigger(&project_root, &name, &scope) - .await - } - Request::EvaluateGate { - event, - project_root, - worktree_path, - } => { - self.handle_evaluate_gate(&event, &project_root, &worktree_path) - .await - } - Request::Diff { - project_root, - worktree_id, - stat, - } => { - self.handle_diff(&project_root, worktree_id.as_deref(), stat) - .await - } - Request::Pulse { project_root } => self.handle_pulse(&project_root).await, - } - } - - async fn handle_health(&self) -> Response { - let sessions = self.sessions.lock().await; - Response::HealthReport { - pid: std::process::id(), - uptime_seconds: self.start_time.elapsed().as_secs(), - protocol_version: PROTOCOL_VERSION, - projects: vec![], - agent_count: sessions.len(), - } - } - - async fn handle_init(&self, project_root: &str) -> Response { - let project_root = project_root.to_string(); - tokio::task::spawn_blocking(move || { - let root = Path::new(&project_root); - let pu_dir = paths::pu_dir(root); - - if let Err(e) = std::fs::create_dir_all(&pu_dir) { - return Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to create .pu directory: {e}"), - }; - } - - // Atomic check-and-create via O_EXCL — prevents TOCTOU race - let manifest_path = paths::manifest_path(root); - let file = match std::fs::OpenOptions::new() - .write(true) - .create_new(true) - .open(&manifest_path) - { - Ok(f) => f, - Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => { - Self::reconcile_agents_on_init(&project_root); - return Response::InitResult { created: false }; - } - Err(e) => { - return Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to create manifest: {e}"), - }; - } - }; - - let m = Manifest::new(project_root.clone()); - let content = match serde_json::to_string_pretty(&m) { - Ok(c) => c + "\n", - Err(e) => { - let _ = std::fs::remove_file(&manifest_path); - return Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to serialize manifest: {e}"), - }; - } - }; - let mut file = file; - if let Err(e) = file - .write_all(content.as_bytes()) - .and_then(|_| file.sync_all()) - { - let _ = std::fs::remove_file(&manifest_path); - return Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to write manifest: {e}"), - }; - } - - if let Err(e) = config::write_default_config(root) { - return Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to write config: {e}"), - }; - } - - Response::InitResult { created: true } - }) - .await - .unwrap_or_else(|e| Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }) - } - - async fn handle_get_config(&self, project_root: &str) -> Response { - let pr = project_root.to_string(); - tokio::task::spawn_blocking(move || { - let root = Path::new(&pr); - match config::load_config_strict(root) { - Ok(cfg) => config_to_report(&cfg), - Err(e) => Response::Error { - code: e.code().into(), - message: e.to_string(), - }, - } - }) - .await - .unwrap_or_else(|e| Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }) - } - - async fn handle_update_agent_config( - &self, - project_root: &str, - agent_name: &str, - launch_args: Option>, - ) -> Response { - let pr = project_root.to_string(); - let name = agent_name.to_string(); - tokio::task::spawn_blocking(move || { - let root = Path::new(&pr); - match config::update_agent_config(root, &name, launch_args) { - Ok(cfg) => config_to_report(&cfg), - Err(e) => Response::Error { - code: e.code().into(), - message: e.to_string(), - }, - } - }) - .await - .unwrap_or_else(|e| Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }) - } - - async fn handle_status(&self, project_root: &str, agent_id: Option<&str>) -> Response { - // On first status call per project, reap agents whose PIDs are dead. - // Fire-and-forget: first status returns immediately, next refresh corrects. - let should_reap = { - let mut reaped = self.reaped_projects.lock().unwrap(); - reaped.insert(project_root.to_string()) - }; // MutexGuard dropped here — before any .await - if should_reap { - let pr = project_root.to_string(); - tokio::spawn(async move { - tokio::task::spawn_blocking(move || Self::reap_stale_agents(&pr)) - .await - .ok(); - }); - } - - if let Some(id) = agent_id { - let m = match self.read_manifest_async(project_root).await { - Ok(m) => m, - Err(e) => return Self::error_response(&e), - }; - match m.find_agent(id) { - Some(loc) => { - let (agent, wt_id) = match loc { - pu_core::types::AgentLocation::Root(a) => (a, None), - pu_core::types::AgentLocation::Worktree { worktree, agent } => { - (agent, Some(worktree.id.clone())) - } - }; - let sessions = self.sessions.lock().await; - Response::AgentStatus(self.build_agent_status_report(agent, &sessions, wt_id)) - } - None => Self::agent_not_found(id), - } - } else { - match self.compute_full_status(project_root).await { - Ok((worktrees, agents)) => Response::StatusReport { worktrees, agents }, - Err(e) => Self::error_response(&e), - } - } - } - - /// Build a status report for a single agent, using live PTY state when available. - fn build_agent_status_report( - &self, - agent: &AgentEntry, - sessions: &HashMap, - worktree_id: Option, - ) -> AgentStatusReport { - let (status, exit_code, idle_seconds) = - self.live_agent_status_sync(&agent.id, agent, sessions); - AgentStatusReport { - id: agent.id.clone(), - name: agent.name.clone(), - agent_type: agent.agent_type.clone(), - status, - pid: agent.pid, - exit_code, - idle_seconds, - worktree_id, - started_at: agent.started_at, - session_id: agent.session_id.clone(), - prompt: agent.prompt.clone(), - suspended: agent.suspended, - trigger_seq_index: agent.trigger_seq_index, - trigger_state: agent.trigger_state, - trigger_total: agent.trigger_total, - } - } - - /// Compute live agent status from PTY state. - /// Returns (status, exit_code, idle_seconds). - fn live_agent_status_sync( - &self, - id: &str, - agent: &AgentEntry, - sessions: &HashMap, - ) -> (AgentStatus, Option, Option) { - match sessions.get(id) { - Some(handle) => { - let exit_code = *handle.exit_rx.borrow(); - let status = agent_monitor::effective_status(exit_code, &handle.output_buffer); - let idle_seconds = Some(handle.output_buffer.content_idle_seconds()); - (status, exit_code, idle_seconds) - } - // No live session — use manifest (agent already exited/killed/etc.) - None => (agent.status, agent.exit_code, None), - } - } - - /// Spawn a bare shell (no project, no manifest, no config). - /// Used by Point Guard for a root terminal at the given cwd. - async fn handle_spawn_shell(&self, cwd: &str) -> Response { - let shell = std::env::var("SHELL").unwrap_or_else(|_| "/bin/zsh".to_string()); - let agent_id = pu_core::id::agent_id(); - - let env = self.agent_env().await; - let spawn_config = SpawnConfig { - command: shell, - args: vec!["-l".to_string()], - cwd: cwd.to_string(), - env, - env_remove: vec![], - cols: 120, - rows: 40, - }; - - let handle = match self.pty_host.spawn(spawn_config).await { - Ok(h) => h, - Err(e) => { - return Response::Error { - code: "SPAWN_FAILED".into(), - message: format!("failed to spawn shell: {e}"), - }; - } - }; - - // Start exit monitor (cleans up session map when shell exits) - let exit_rx = handle.exit_rx.clone(); - let sessions = self.sessions.clone(); - let aid = agent_id.clone(); - tokio::spawn(async move { - let mut rx = exit_rx; - while rx.changed().await.is_ok() { - if rx.borrow().is_some() { - break; - } - } - sessions.lock().await.remove(&aid); - }); - - self.sessions.lock().await.insert(agent_id.clone(), handle); - - Response::SpawnResult { - worktree_id: None, - agent_id, - status: AgentStatus::Streaming, - } - } - - async fn handle_spawn(&self, params: SpawnParams) -> Response { - let SpawnParams { - project_root, - prompt, - agent_type, - name, - base, - root, - worktree, - terminal_command, - no_auto, - extra_args, - plan_mode, - no_trigger, - trigger: trigger_param, - } = params; - let root_path = Path::new(&project_root); - - // Ensure initialized - if !paths::manifest_path(root_path).exists() { - return Response::Error { - code: "NOT_INITIALIZED".into(), - message: "not initialized — run `pu init` first".into(), - }; - } - - // Resolve agent config (strict: surface YAML parse errors) - let cfg = match config::load_config_strict(root_path) { - Ok(c) => c, - Err(e) => { - return Response::Error { - code: "CONFIG_ERROR".into(), - message: format!("failed to load config: {e}"), - }; - } - }; - let agent_cfg = match config::resolve_agent(&cfg, &agent_type) { - Some(c) => c.clone(), - None => { - return Response::Error { - code: "INVALID_ARGUMENT".into(), - message: format!("unknown agent type: {agent_type}"), - }; - } - }; - - let agent_id = pu_core::id::agent_id(); - let creating_new_worktree = !root && worktree.is_none(); - let agent_name = if creating_new_worktree { - // Worktree spawns require a user-provided name (becomes the branch slug) - let Some(raw) = name else { - return Response::Error { - code: "INVALID_ARGUMENT".into(), - message: "worktree spawn requires a name".into(), - }; - }; - let normalized = pu_core::id::normalize_worktree_name(&raw); - if normalized.is_empty() { - return Response::Error { - code: "INVALID_ARGUMENT".into(), - message: "worktree spawn requires a name".into(), - }; - } - normalized - } else { - // Root agents and existing-worktree agents get auto-generated names - name.unwrap_or_else(pu_core::id::root_agent_name) - }; - let base_branch = match base { - Some(b) => b, - None => git::resolve_base_ref(root_path, "HEAD") - .await - .unwrap_or_else(|_| "HEAD".into()), - }; - - // Normalize empty command to None - let terminal_command = terminal_command.filter(|c| !c.is_empty()); - - // Plan mode requires a prompt-driven agent that understands EnterPlanMode. - // Reject early for terminal agents or terminal_command spawns where the - // prefix would be meaningless or actively harmful. - if plan_mode - && (prompt.is_empty() || terminal_command.is_some() || agent_type == "terminal") - { - return Response::Error { - code: "INVALID_ARGUMENT".into(), - message: "plan mode requires a prompt-driven non-terminal agent".into(), - }; - } - - // When plan_mode is active, prefix the prompt with instructions to enter plan mode. - // This keeps bypass permissions as the base while guiding the agent into plan mode - // via its own tool (EnterPlanMode) rather than conflicting CLI flags. - let prompt = if plan_mode { - format!( - "[PLAN MODE] You MUST call the EnterPlanMode tool immediately before doing anything else. \ - Do not read files, do not explore — call EnterPlanMode first. \ - Once in plan mode, research and plan before making changes.\n\n{prompt}" - ) - } else { - prompt.to_string() - }; - let prompt = &prompt; - - // When a terminal command is set, it becomes the PTY process directly - let (command, args, session_id, inject_prompt_via_stdin) = if let Some(ref cmd) = - terminal_command - { - let has_metacharacters = cmd.contains('|') - || cmd.contains("&&") - || cmd.contains(';') - || cmd.contains('>') - || cmd.contains('<') - || cmd.contains('$'); - - let (cmd_bin, cmd_args) = if has_metacharacters { - let shell = std::env::var("SHELL").unwrap_or_else(|_| "/bin/sh".to_string()); - (shell, vec!["-c".to_string(), cmd.clone()]) - } else { - let parts: Vec<&str> = cmd.split_whitespace().collect(); - if parts.is_empty() { - // Shouldn't happen after filter, but handle gracefully - let shell = std::env::var("SHELL").unwrap_or_else(|_| "/bin/sh".to_string()); - (shell, vec![]) - } else { - ( - parts[0].to_string(), - parts[1..].iter().map(ToString::to_string).collect(), - ) - } - }; - (cmd_bin, cmd_args, None, false) - } else { - // Standard agent flow - let (command, cmd_args) = match Self::parse_agent_command(&agent_cfg, &agent_type) { - Ok(v) => v, - Err(e) => return e, - }; - let mut args = cmd_args; - - // Add agent-type-specific launch args from config (or defaults). - // --no-auto skips only the built-in defaults; explicit user-configured - // launchArgs are always applied. - let launch_args = if no_auto && agent_cfg.launch_args.is_none() { - Vec::new() - } else { - pu_core::types::resolved_launch_args(&agent_type, agent_cfg.launch_args.as_deref()) - }; - if launch_args.is_empty() && agent_cfg.launch_args.is_some() { - tracing::info!(agent_type, "auto-mode disabled via config (launchArgs: [])"); - } - for arg in launch_args.into_iter().rev() { - if !args.iter().any(|a| a == &arg) { - args.insert(0, arg); - } - } - - // Append extra args from --agent-args (always applied, even with --no-auto) - args.extend(extra_args.iter().cloned()); - - // Generate session ID for claude agents (enables resume via --resume) - let session_id = if agent_type == "claude" { - let id = pu_core::id::session_id(); - args.push("--session-id".into()); - args.push(id.clone()); - Some(id) - } else { - None - }; - - // Claude prompt via argv can stall first render in some terminals; keep stdin injection - // for Claude (and terminal agent). Codex/OpenCode accept startup prompts via CLI args. - let inject_prompt_via_stdin = - Self::should_inject_prompt_via_stdin(&agent_type, agent_cfg.interactive, prompt); - if !inject_prompt_via_stdin && !prompt.is_empty() { - let prompt_flag = - Self::resolved_prompt_flag(&agent_type, agent_cfg.prompt_flag.as_deref()); - if let Some(flag) = prompt_flag { - args.push(flag); - args.push(prompt.to_string()); - } else { - // Default prompt style is positional (for example codex [PROMPT]). - args.push(prompt.to_string()); - } - } - - (command, args, session_id, inject_prompt_via_stdin) - }; - - // Determine working directory - let (cwd, worktree_id) = if root || worktree.is_some() { - // Spawn in project root or existing worktree - let wt_id = worktree.clone(); - let dir = if let Some(ref wt) = worktree { - paths::worktree_path(root_path, wt) - .to_string_lossy() - .to_string() - } else { - project_root.to_string() - }; - (dir, wt_id) - } else { - // Create new worktree - let wt_id = pu_core::id::worktree_id(); - let wt_path = paths::worktree_path(root_path, &wt_id); - let branch = format!("pu/{agent_name}"); - - if let Err(e) = git::create_worktree(root_path, &wt_path, &branch, &base_branch).await { - return Response::Error { - code: "SPAWN_FAILED".into(), - message: format!("failed to create worktree: {e}"), - }; - } - - // Install git hooks for trigger gate enforcement - if let Err(e) = git::install_hooks(&wt_path, root_path).await { - tracing::warn!("failed to install git hooks in worktree: {e}"); - } - - // Copy env files (e.g., .env, .env.local) into new worktree - for env_file in &cfg.env_files { - let src = root_path.join(env_file); - let dst = wt_path.join(env_file); - match tokio::fs::copy(&src, &dst).await { - Ok(_) => {} - Err(e) if e.kind() == std::io::ErrorKind::NotFound && !src.exists() => {} // source doesn't exist, skip - Err(e) => tracing::warn!("failed to copy {env_file} to worktree: {e}"), - } - } - - (wt_path.to_string_lossy().to_string(), Some(wt_id)) - }; - - // Spawn PTY process - let mut env = self.agent_env().await; - env.push(("PU_AGENT_ID".into(), agent_id.clone())); - let spawn_config = SpawnConfig { - command, - args, - cwd: cwd.clone(), - env, - env_remove: vec!["CLAUDECODE".into()], - cols: 120, - rows: 40, - }; - - // Track whether we created a new worktree (for rollback on failure) - let created_worktree = !root && worktree.is_none() && worktree_id.is_some(); - let rollback_branch = if created_worktree { - Some(format!("pu/{agent_name}")) - } else { - None - }; - - let handle = match self.pty_host.spawn(spawn_config).await { - Ok(h) => h, - Err(e) => { - if created_worktree { - self.rollback_worktree( - root_path, - worktree_id.as_deref(), - rollback_branch.as_deref(), - ) - .await; - } - return Response::Error { - code: "SPAWN_FAILED".into(), - message: format!("failed to spawn process: {e}"), - }; - } - }; - - if inject_prompt_via_stdin { - let prompt_bytes = prompt.as_bytes().to_vec(); - let pending = self.pending_initial_inputs.clone(); - pending - .lock() - .await - .insert(agent_id.clone(), prompt_bytes.clone()); - - let output_buffer = handle.output_buffer.clone(); - let master_fd = handle.master_fd(); - let mut exit_rx = handle.exit_rx.clone(); - let pty_host = NativePtyHost::new(); - let aid = agent_id.clone(); - - tokio::spawn(async move { - let mut watcher = output_buffer.subscribe(); - let timeout = tokio::time::sleep(Duration::from_millis(1800)); - tokio::pin!(timeout); - - loop { - tokio::select! { - _ = &mut timeout => { - // Fallback or quiet-period expired — inject now - break; - } - Ok(()) = exit_rx.changed() => { - // Process exited before we could inject — abort - pending.lock().await.remove(&aid); - tracing::debug!(agent_id = %aid, "prompt injection aborted: process exited"); - return; - } - Ok(()) = watcher.changed() => { - // Got output — reset to a 450ms quiet period - timeout - .as_mut() - .reset(tokio::time::Instant::now() + Duration::from_millis(450)); - } - } - } - - // Inject the prompt - if inject_initial_prompt(&pty_host, &master_fd, &aid, &prompt_bytes).await { - tracing::debug!(agent_id = %aid, "prompt injected at spawn time"); - } else { - tracing::warn!(agent_id = %aid, "failed to inject prompt at spawn time"); - } - pending.lock().await.remove(&aid); - }); - } - - let pid = handle.pid; - - // Store handle in session map BEFORE writing manifest. - // ManifestWatcher in Swift fires on manifest write and immediately - // tries to attach — the session must already be in the map. - self.sessions.lock().await.insert(agent_id.clone(), handle); - - // Bind trigger if explicitly specified via --trigger - let (trigger_name, trigger_total) = if no_trigger { - (None, None) - } else if let Some(ref name) = trigger_param { - let pr = project_root.to_string(); - let name_clone = name.clone(); - let found = tokio::task::spawn_blocking(move || { - let triggers = pu_core::trigger_def::triggers_for_event( - Path::new(&pr), - &pu_core::trigger_def::TriggerEvent::AgentIdle, - ); - triggers - .into_iter() - .find(|t| t.name == name_clone) - .map(|t| { - let len = t.sequence.len() as u32; - (t.name, len) - }) - }) - .await - .unwrap_or(None); - match found { - Some((tname, total)) if total > 0 => (Some(tname), Some(total)), - Some(_) => { - return Response::Error { - code: "INVALID_TRIGGER".into(), - message: format!("trigger '{name}' has empty sequence"), - }; - } - None => { - return Response::Error { - code: "NOT_FOUND".into(), - message: format!("trigger '{name}' not found"), - }; - } - } - } else { - (None, None) - }; - - // Update manifest - let agent_entry = AgentEntry { - id: agent_id.clone(), - name: agent_name.clone(), - agent_type, - status: AgentStatus::Streaming, - prompt: Some(prompt.to_string()), - started_at: chrono::Utc::now(), - completed_at: None, - exit_code: None, - error: None, - pid: Some(pid), - session_id, - suspended_at: None, - suspended: false, - command: terminal_command, - plan_mode, - trigger_seq_index: trigger_name.as_ref().map(|_| 0), - trigger_state: trigger_name - .as_ref() - .map(|_| pu_core::types::TriggerState::Active), - trigger_total, - gate_attempts: trigger_name.as_ref().map(|_| 0), - no_trigger, - trigger_name: trigger_name.clone(), - }; - - let wt_id_for_manifest = worktree_id.clone(); - let agent_id_clone = agent_id.clone(); - let manifest_result = manifest::update_manifest(root_path, move |mut m| { - if let Some(ref wt_id) = wt_id_for_manifest { - // Add or update worktree entry - let wt_entry = m - .worktrees - .entry(wt_id.clone()) - .or_insert_with(|| WorktreeEntry { - id: wt_id.clone(), - name: agent_name.clone(), - path: cwd.clone(), - branch: format!("pu/{agent_name}"), - base_branch: Some(base_branch.clone()), - status: WorktreeStatus::Active, - agents: IndexMap::new(), - created_at: chrono::Utc::now(), - merged_at: None, - }); - wt_entry.agents.insert(agent_id_clone, agent_entry); - } else { - m.agents.insert(agent_id_clone, agent_entry); - } - m - }); - - if let Err(e) = manifest_result { - // Rollback: remove session and kill process - if let Some(handle) = self.sessions.lock().await.remove(&agent_id) { - self.pty_host - .kill(&handle, Duration::from_secs(2)) - .await - .ok(); - } - if created_worktree { - self.rollback_worktree( - root_path, - worktree_id.as_deref(), - rollback_branch.as_deref(), - ) - .await; - } - return Response::Error { - code: "SPAWN_FAILED".into(), - message: format!("failed to update manifest: {e}"), - }; - } - - self.notify_status_change(&project_root).await; - - Response::SpawnResult { - worktree_id, - agent_id, - status: AgentStatus::Streaming, - } - } - - async fn handle_create_worktree( - &self, - project_root: &str, - name: Option, - base: Option, - ) -> Response { - let root_path = Path::new(project_root); - - // Ensure initialized - if !paths::manifest_path(root_path).exists() { - return Response::Error { - code: "NOT_INITIALIZED".into(), - message: "not initialized — run `pu init` first".into(), - }; - } - - // Load config for env_files - let cfg = match config::load_config_strict(root_path) { - Ok(c) => c, - Err(e) => { - return Response::Error { - code: "CONFIG_ERROR".into(), - message: format!("failed to load config: {e}"), - }; - } - }; - - // Resolve name - let Some(raw) = name else { - return Response::Error { - code: "INVALID_ARGUMENT".into(), - message: "worktree creation requires a name".into(), - }; - }; - let worktree_name = pu_core::id::normalize_worktree_name(&raw); - if worktree_name.is_empty() { - return Response::Error { - code: "INVALID_ARGUMENT".into(), - message: "worktree creation requires a name".into(), - }; - } - - let base_branch = match base { - Some(b) => b, - None => git::resolve_base_ref(root_path, "HEAD") - .await - .unwrap_or_else(|_| "HEAD".into()), - }; - let wt_id = pu_core::id::worktree_id(); - let wt_path = paths::worktree_path(root_path, &wt_id); - let branch = format!("pu/{worktree_name}"); - let rollback_branch = branch.clone(); - - if let Err(e) = git::create_worktree(root_path, &wt_path, &branch, &base_branch).await { - return Response::Error { - code: "CREATE_WORKTREE_FAILED".into(), - message: format!("failed to create worktree: {e}"), - }; - } - - // Install git hooks for trigger gate enforcement - if let Err(e) = git::install_hooks(&wt_path, root_path).await { - tracing::warn!("failed to install git hooks in worktree: {e}"); - } - - // Copy env files into new worktree - for env_file in &cfg.env_files { - let src = root_path.join(env_file); - let dst = wt_path.join(env_file); - match tokio::fs::copy(&src, &dst).await { - Ok(_) => {} - Err(e) if e.kind() == std::io::ErrorKind::NotFound && !src.exists() => {} - Err(e) => tracing::warn!("failed to copy {env_file} to worktree: {e}"), - } - } - - // Write manifest entry (worktree only, no agents) - let cwd = wt_path.to_string_lossy().to_string(); - let wt_id_clone = wt_id.clone(); - let manifest_result = manifest::update_manifest(root_path, move |mut m| { - m.worktrees - .entry(wt_id_clone.clone()) - .or_insert_with(|| WorktreeEntry { - id: wt_id_clone, - name: worktree_name.clone(), - path: cwd, - branch, - base_branch: Some(base_branch.clone()), - status: WorktreeStatus::Active, - agents: IndexMap::new(), - created_at: chrono::Utc::now(), - merged_at: None, - }); - m - }); - - if let Err(e) = manifest_result { - // Rollback: remove worktree + branch - self.rollback_worktree(root_path, Some(&wt_id), Some(&rollback_branch)) - .await; - return Response::Error { - code: "CREATE_WORKTREE_FAILED".into(), - message: format!("failed to update manifest: {e}"), - }; - } - - self.notify_status_change(project_root).await; - - Response::CreateWorktreeResult { worktree_id: wt_id } - } - - async fn handle_kill( - &self, - project_root: &str, - target: KillTarget, - exclude: &[String], - ) -> Response { - let m = match self.read_manifest_async(project_root).await { - Ok(m) => m, - Err(e) => return Self::error_response(&e), - }; - - let all_ids: Vec = match &target { - KillTarget::Agent(id) => vec![id.clone()], - KillTarget::Worktree(wt_id) => match m.worktrees.get(wt_id) { - Some(wt) => wt.agents.keys().cloned().collect(), - None => { - return Response::Error { - code: "WORKTREE_NOT_FOUND".into(), - message: format!("worktree {wt_id} not found"), - }; - } - }, - KillTarget::All => { - let mut ids: Vec = m.agents.keys().cloned().collect(); - for wt in m.worktrees.values() { - ids.extend(wt.agents.keys().cloned()); - } - ids - } - KillTarget::AllWorktrees => { - let mut ids: Vec = Vec::new(); - for wt in m.worktrees.values() { - ids.extend(wt.agents.keys().cloned()); - } - ids - } - }; - - // Apply exclusions (self-protection + root-protection) - let (agent_ids, skipped): (Vec, Vec) = - all_ids.into_iter().partition(|id| !exclude.contains(id)); - - // Kill agents: remove pending inputs, extract handles, kill PTY processes. - let handles_killed = self.kill_agents(&agent_ids).await; - let exit_codes: HashMap> = handles_killed - .iter() - .map(|(id, handle)| (id.clone(), *handle.exit_rx.borrow())) - .collect(); - - // Update manifest: remove all targeted agents (off async runtime) - let killed = agent_ids.clone(); - let pr = project_root.to_string(); - let killed_for_manifest = killed.clone(); - tokio::task::spawn_blocking(move || { - manifest::update_manifest(Path::new(&pr), move |mut m| { - for id in &killed_for_manifest { - m.agents.shift_remove(id); - for wt in m.worktrees.values_mut() { - wt.agents.shift_remove(id); - } - } - m - }) - .ok(); - }) - .await - .ok(); - - self.notify_status_change(project_root).await; - - Response::KillResult { - killed, - exit_codes, - skipped, - } - } - - async fn handle_delete_worktree(&self, project_root: &str, worktree_id: &str) -> Response { - let m = match self.read_manifest_async(project_root).await { - Ok(m) => m, - Err(e) => return Self::error_response(&e), - }; - - let wt = match m.worktrees.get(worktree_id) { - Some(wt) => wt.clone(), - None => { - return Response::Error { - code: "WORKTREE_NOT_FOUND".into(), - message: format!("worktree {worktree_id} not found"), - }; - } - }; - - // 1. Kill all agents in the worktree - let agent_ids: Vec = wt.agents.keys().cloned().collect(); - self.kill_agents(&agent_ids).await; - - // 2. Remove git worktree directory - let root_path = Path::new(project_root); - let wt_path = paths::worktree_path(root_path, worktree_id); - git::remove_worktree(root_path, &wt_path).await.ok(); - - // 3. Delete local branch (soft-fail) - let branch = wt.branch.clone(); - let branch_deleted = git::delete_local_branch(root_path, &branch).await.is_ok(); - - // 4. Delete remote branch (soft-fail) - let remote_deleted = git::delete_remote_branch(root_path, &branch).await.is_ok(); - - // 5. Remove worktree from manifest - let wt_id = worktree_id.to_string(); - let killed_agents = agent_ids.clone(); - let pr = project_root.to_string(); - tokio::task::spawn_blocking(move || { - manifest::update_manifest(Path::new(&pr), move |mut m| { - m.worktrees.shift_remove(&wt_id); - m - }) - .ok(); - }) - .await - .ok(); - - self.notify_status_change(project_root).await; - - Response::DeleteWorktreeResult { - worktree_id: worktree_id.to_string(), - killed_agents, - branch_deleted, - remote_deleted, - } - } - - async fn handle_rename(&self, project_root: &str, agent_id: &str, name: &str) -> Response { - let pr = project_root.to_string(); - let aid = agent_id.to_string(); - let new_name = name.to_string(); - let new_name2 = new_name.clone(); - - let result = tokio::task::spawn_blocking(move || { - manifest::update_manifest(Path::new(&pr), |mut m| { - if let Some(agent) = m.find_agent_mut(&aid) { - agent.name = new_name.clone(); - } - m - }) - }) - .await; - - match result { - Ok(Ok(updated)) => { - let found = updated.find_agent(agent_id).is_some(); - if found { - self.notify_status_change(project_root).await; - Response::RenameResult { - agent_id: agent_id.to_string(), - name: new_name2, - } - } else { - Self::agent_not_found(agent_id) - } - } - Ok(Err(e)) => Self::error_response(&e), - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("rename task failed: {e}"), - }, - } - } - - async fn handle_assign_trigger( - &self, - project_root: &str, - agent_id: &str, - trigger_name: &str, - ) -> Response { - let pr = project_root.to_string(); - let tn = trigger_name.to_string(); - - let trigger = tokio::task::spawn_blocking(move || { - pu_core::trigger_def::triggers_for_event( - Path::new(&pr), - &pu_core::trigger_def::TriggerEvent::AgentIdle, - ) - .into_iter() - .find(|t| t.name == tn) - }) - .await; - - let trigger = match trigger { - Ok(Some(t)) => t, - Ok(None) => { - return Response::Error { - code: "NOT_FOUND".into(), - message: format!("trigger '{trigger_name}' not found"), - }; - } - Err(e) => { - return Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("trigger lookup failed: {e}"), - }; - } - }; - - let sequence_len = trigger.sequence.len() as u32; - if sequence_len == 0 { - return Response::Error { - code: "INVALID_TRIGGER".into(), - message: format!("trigger '{trigger_name}' has empty sequence"), - }; - } - - // Verify the agent exists in the manifest before assigning - let pr_check = project_root.to_string(); - let aid_check = agent_id.to_string(); - let agent_exists = tokio::task::spawn_blocking(move || { - manifest::read_manifest(Path::new(&pr_check)) - .map(|m| m.find_agent(&aid_check).is_some()) - }) - .await; - - match agent_exists { - Ok(Ok(false)) | Ok(Err(_)) => { - return Response::Error { - code: "NOT_FOUND".into(), - message: format!("agent '{agent_id}' not found"), - }; - } - Err(e) => { - return Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("agent lookup failed: {e}"), - }; - } - Ok(Ok(true)) => {} // proceed - } - - let pr2 = project_root.to_string(); - let aid2 = agent_id.to_string(); - let tn2 = trigger_name.to_string(); - let result = tokio::task::spawn_blocking(move || { - manifest::update_manifest(Path::new(&pr2), |mut m| { - if let Some(agent) = m.find_agent_mut(&aid2) { - agent.trigger_name = Some(tn2.clone()); - agent.trigger_state = Some(pu_core::types::TriggerState::Active); - agent.trigger_seq_index = Some(0); - agent.trigger_total = Some(sequence_len); - agent.gate_attempts = Some(0); - } - m - }) - }) - .await; - - match result { - Ok(Ok(_)) => { - self.notify_status_change(project_root).await; - Response::AssignTriggerResult { - agent_id: agent_id.to_string(), - trigger_name: trigger_name.to_string(), - sequence_len, - } - } - Ok(Err(e)) => Self::error_response(&e), - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("assign trigger task failed: {e}"), - }, - } - } - - async fn handle_suspend(&self, project_root: &str, target: SuspendTarget) -> Response { - let m = match self.read_manifest_async(project_root).await { - Ok(m) => m, - Err(e) => return Self::error_response(&e), - }; - - // Collect suspendable agents — must be alive and not already suspended. - let agent_ids: Vec = match &target { - SuspendTarget::Agent(id) => match m.find_agent(id) { - Some(loc) => { - let agent = match loc { - pu_core::types::AgentLocation::Root(a) => a, - pu_core::types::AgentLocation::Worktree { agent, .. } => agent, - }; - if !agent.status.is_alive() || agent.suspended { - return Response::SuspendResult { suspended: vec![] }; - } - vec![id.clone()] - } - None => return Self::agent_not_found(id), - }, - SuspendTarget::All => m - .all_agents() - .into_iter() - .filter(|a| a.status.is_alive() && !a.suspended) - .map(|a| a.id.clone()) - .collect(), - }; - - if agent_ids.is_empty() { - return Response::SuspendResult { suspended: vec![] }; - } - - self.kill_agents(&agent_ids).await; - - // Update manifest: mark as suspended, clear pid, set suspended_at. - // Status stays as-is (Waiting); suspended flag is metadata. - let suspended = agent_ids.clone(); - let pr = project_root.to_string(); - let suspended_for_manifest = suspended.clone(); - tokio::task::spawn_blocking(move || { - manifest::update_manifest(Path::new(&pr), move |mut m| { - let now = chrono::Utc::now(); - for id in &suspended_for_manifest { - if let Some(agent) = m.find_agent_mut(id) { - agent.status = AgentStatus::Waiting; - agent.suspended = true; - agent.pid = None; - agent.suspended_at = Some(now); - } - } - m - }) - .ok(); - }) - .await - .ok(); - - self.notify_status_change(project_root).await; - - Response::SuspendResult { suspended } - } - - async fn handle_resume(&self, project_root: &str, agent_id: &str) -> Response { - let root_path = Path::new(project_root); - - // 1. Read manifest, find the suspended agent - let m = match self.read_manifest_async(project_root).await { - Ok(m) => m, - Err(e) => return Self::error_response(&e), - }; - - let (agent_entry, _worktree_id, cwd) = match m.find_agent(agent_id) { - Some(pu_core::types::AgentLocation::Root(a)) => { - (a.clone(), None::, project_root.to_string()) - } - Some(pu_core::types::AgentLocation::Worktree { worktree, agent }) => ( - agent.clone(), - Some(worktree.id.clone()), - worktree.path.clone(), - ), - None => return Self::agent_not_found(agent_id), - }; - - if !agent_entry.suspended { - return Response::Error { - code: "INVALID_STATE".into(), - message: "agent is not suspended".into(), - }; - } - - // 2. Load agent config - let cfg = match config::load_config_strict(root_path) { - Ok(c) => c, - Err(e) => { - return Response::Error { - code: "CONFIG_ERROR".into(), - message: format!("failed to load config: {e}"), - }; - } - }; - let agent_cfg = match config::resolve_agent(&cfg, &agent_entry.agent_type) { - Some(c) => c.clone(), - None => { - return Response::Error { - code: "INVALID_ARGUMENT".into(), - message: format!("unknown agent type: {}", agent_entry.agent_type), - }; - } - }; - - // 3. Repair corrupted session files before resume (claude-code#24304) - if let Some(ref sid) = agent_entry.session_id { - let cwd_clone = cwd.clone(); - let sid_clone = sid.clone(); - tokio::task::spawn_blocking(move || { - Self::repair_session_files(&cwd_clone, &sid_clone); - }) - .await - .ok(); - } - let effective_session_id = agent_entry.session_id.clone(); - - // 4. Construct resume command based on agent type - let (command, args, session_id) = match self.build_resume_command( - &agent_entry.agent_type, - &agent_cfg, - effective_session_id.as_deref(), - ) { - Ok(result) => result, - Err(response) => return response, - }; - - // 5. Spawn PTY process - let mut env = self.agent_env().await; - env.push(("PU_AGENT_ID".into(), agent_id.to_string())); - let spawn_config = SpawnConfig { - command, - args, - cwd, - env, - env_remove: vec!["CLAUDECODE".into()], - cols: 120, - rows: 40, - }; - - let handle = match self.pty_host.spawn(spawn_config).await { - Ok(h) => h, - Err(e) => { - return Response::Error { - code: "RESUME_FAILED".into(), - message: format!("failed to spawn process: {e}"), - }; - } - }; - - let pid = handle.pid; - - // Store handle in session map BEFORE writing manifest. - // ManifestWatcher in Swift fires on manifest write and immediately - // tries to attach — the session must already be in the map. - self.sessions - .lock() - .await - .insert(agent_id.to_string(), handle); - - // 6. Update manifest: Suspended → Running, new PID - let aid = agent_id.to_string(); - let sid = session_id.clone(); - let pr = project_root.to_string(); - let manifest_result = tokio::task::spawn_blocking(move || { - manifest::update_manifest(Path::new(&pr), move |mut m| { - if let Some(agent) = m.find_agent_mut(&aid) { - agent.status = AgentStatus::Streaming; - agent.suspended = false; - agent.pid = Some(pid); - agent.completed_at = None; - agent.suspended_at = None; - if let Some(ref s) = sid { - agent.session_id = Some(s.clone()); - } - } - m - }) - }) - .await - .unwrap_or_else(|e| Err(PuError::Io(std::io::Error::other(e)))); - - if let Err(e) = manifest_result { - // Rollback: remove session and kill process - if let Some(handle) = self.sessions.lock().await.remove(agent_id) { - self.pty_host - .kill(&handle, Duration::from_secs(2)) - .await - .ok(); - } - return Response::Error { - code: "RESUME_FAILED".into(), - message: format!("failed to update manifest: {e}"), - }; - } - - self.notify_status_change(project_root).await; - - Response::ResumeResult { - agent_id: agent_id.to_string(), - status: AgentStatus::Streaming, - } - } - - /// Construct the resume command for a given agent type. - /// Returns Ok((command, args, session_id)) or Err(Response) on failure. - #[allow(clippy::result_large_err)] - fn build_resume_command( - &self, - agent_type: &str, - agent_cfg: &pu_core::types::AgentConfig, - session_id: Option<&str>, - ) -> Result<(String, Vec, Option), Response> { - let launch_args = - pu_core::types::resolved_launch_args(agent_type, agent_cfg.launch_args.as_deref()); - match agent_type { - "claude" => { - let sid = session_id.ok_or_else(|| Response::Error { - code: "RESUME_FAILED".into(), - message: "cannot resume Claude agent: no session_id preserved".into(), - })?; - let mut args = launch_args; - args.push("--resume".into()); - args.push(sid.to_string()); - Ok(("claude".into(), args, Some(sid.to_string()))) - } - "codex" => { - // Top-level flags (e.g. --full-auto) must precede the subcommand - let mut args = launch_args; - args.push("resume".into()); - args.push("--last".into()); - Ok(("codex".into(), args, None)) - } - "opencode" => { - let mut args = vec!["--continue".into()]; - args.extend(launch_args); - Ok(("opencode".into(), args, None)) - } - _ => { - // Terminal / unknown: fresh shell in same directory - let (command, args) = Self::parse_agent_command(agent_cfg, agent_type)?; - Ok((command, args, None)) - } - } - } - - /// Scan Claude Code's session directory for the latest continuation of a session. - /// Claude Code stores sessions at `~/.claude/projects/{escaped-cwd}/{uuid}.jsonl`. - /// Resolve the sessions directory for a given working directory. - fn sessions_dir_for(cwd: &str) -> Option { - let home = std::env::var("HOME").ok()?; - let escaped: String = cwd - .chars() - .map(|c| { - if c.is_ascii_alphanumeric() || c == '-' { - c - } else { - '-' - } - }) - .collect(); - Some( - PathBuf::from(&home) - .join(".claude") - .join("projects") - .join(&escaped), - ) - } - - /// Repair corrupted Claude Code session JSONL files for the given session. - /// - /// Fixes three known corruption patterns (claude-code#24304): - /// 1. Snapshot `messageId` collisions — `file-history-snapshot` entries sharing UUIDs with real messages - /// 2. Broken `parentUuid` references — entries pointing to non-existent UUIDs - /// 3. Disconnected compaction roots — multiple `parentUuid: null` entries splitting the conversation - fn repair_session_files(cwd: &str, session_id: &str) { - let Some(sessions_dir) = Self::sessions_dir_for(cwd) else { - return; - }; - if !sessions_dir.is_dir() { - return; - } - - // Repair the original session file - let original = sessions_dir.join(format!("{session_id}.jsonl")); - if original.is_file() { - repair_session_file(&original); - } - - // Repair continuation files that chain back to the original session - let Ok(entries) = std::fs::read_dir(&sessions_dir) else { - return; - }; - for entry in entries.flatten() { - let path = entry.path(); - if path.extension().and_then(|e| e.to_str()) != Some("jsonl") { - continue; - } - // Skip the original — already repaired - if path.file_stem().and_then(|s| s.to_str()) == Some(session_id) { - continue; - } - // Only repair continuation files (first line has non-null parentUuid) - let Ok(file) = std::fs::File::open(&path) else { - continue; - }; - let mut reader = std::io::BufReader::new(file); - let mut first_line = String::new(); - if BufRead::read_line(&mut reader, &mut first_line).is_err() { - continue; - } - let Ok(value) = serde_json::from_str::(&first_line) else { - continue; - }; - if value.get("parentUuid").and_then(|v| v.as_str()).is_some() { - repair_session_file(&path); - } - } - } - - async fn handle_logs(&self, agent_id: &str, tail: usize) -> Response { - let buf = { - let sessions = self.sessions.lock().await; - match sessions.get(agent_id) { - Some(handle) => handle.output_buffer.clone(), - None => return Self::agent_not_found(agent_id), - } - }; - let data = buf.read_tail(tail); - let text = String::from_utf8_lossy(&data); - if let std::borrow::Cow::Owned(_) = &text { - tracing::warn!( - agent_id, - "logs output contained non-UTF-8 bytes (lossy conversion applied)" - ); - } - Response::LogsResult { - agent_id: agent_id.to_string(), - data: text.into_owned(), - } - } - - async fn handle_attach(&self, agent_id: &str) -> Response { - let sessions = self.sessions.lock().await; - match sessions.get(agent_id) { - Some(handle) => Response::AttachReady { - buffered_bytes: handle.output_buffer.len(), - }, - None => Self::agent_not_found(agent_id), - } - } - - async fn handle_input(&self, agent_id: &str, data: &[u8], submit: bool) -> Response { - // Clone the fd Arc under the lock, then drop the lock before the blocking write - let master_fd = { - let sessions = self.sessions.lock().await; - match sessions.get(agent_id) { - Some(handle) => handle.master_fd(), - None => return Self::agent_not_found(agent_id), - } - }; - let result = if submit { - self.pty_host.write_chunked_submit(&master_fd, data).await - } else { - self.pty_host.write_to_fd(&master_fd, data).await - }; - match result { - Ok(()) => Response::Ok, - Err(e) => Response::Error { - code: "IO_ERROR".into(), - message: format!("write failed: {e}"), - }, - } - } - - async fn handle_resize(&self, agent_id: &str, cols: u16, rows: u16) -> Response { - // Clone the fd Arc under the lock, then drop the lock before the blocking ioctl - let master_fd = { - let sessions = self.sessions.lock().await; - match sessions.get(agent_id) { - Some(handle) => handle.master_fd(), - None => return Self::agent_not_found(agent_id), - } - }; - match self.pty_host.resize_fd(&master_fd, cols, rows).await { - Ok(()) => Response::Ok, - Err(e) => Response::Error { - code: "IO_ERROR".into(), - message: format!("resize failed: {e}"), - }, - } - } - - /// Write data to a PTY fd via the pty host (avoids duplicating unsafe write logic). - pub async fn write_to_pty(&self, fd: &Arc, data: &[u8]) -> Result<(), std::io::Error> { - self.pty_host.write_to_fd(fd, data).await - } - - /// Resize a PTY fd via the pty host (avoids duplicating unsafe ioctl logic). - pub async fn resize_pty( - &self, - fd: &Arc, - cols: u16, - rows: u16, - ) -> Result<(), std::io::Error> { - self.pty_host.resize_fd(fd, cols, rows).await - } - - /// Return the output buffer, master PTY fd, and exit receiver for an agent, - /// if it has an active session. - pub async fn get_attach_handles( - &self, - agent_id: &str, - ) -> Option<( - Arc, - Arc, - tokio::sync::watch::Receiver>, - )> { - let sessions = self.sessions.lock().await; - sessions - .get(agent_id) - .map(|h| (h.output_buffer.clone(), h.master_fd(), h.exit_rx.clone())) - } - - /// Build the full environment for spawned agents. - /// Starts from the user's login shell env, then overrides PATH - /// (prepends ~/.pu/bin + fallback dirs), TERM, and COLORTERM. - async fn agent_env(&self) -> Vec<(String, String)> { - let login_env = self.login_env.get_or_init(Self::resolve_login_env).await; - let mut env = login_env.clone(); - - // Extract login PATH for augmentation - let login_path = env - .iter() - .find(|(k, _)| k == "PATH") - .map(|(_, v)| v.clone()) - .unwrap_or_default(); - - // Append common fallback dirs (guards against missing-binary issues) - let home = std::env::var("HOME").unwrap_or_default(); - let fallbacks = [ - format!("{home}/.local/bin"), - format!("{home}/.cargo/bin"), - "/usr/local/bin".to_string(), - "/opt/homebrew/bin".to_string(), - ]; - let mut path = login_path; - for dir in fallbacks { - if !path.split(':').any(|p| p == dir) { - path = format!("{path}:{dir}"); - } - } - // Prepend ~/.pu/bin - if let Ok(pu_dir) = paths::global_pu_dir() { - path = format!("{}:{}", pu_dir.join("bin").display(), path); - } - - // Override PATH, TERM, COLORTERM in the env - env.retain(|(k, _)| k != "PATH" && k != "TERM" && k != "COLORTERM"); - env.push(("PATH".into(), path)); - env.push(("TERM".into(), "xterm-256color".into())); - env.push(("COLORTERM".into(), "truecolor".into())); - - env - } - - // --- Grid --- - - async fn handle_subscribe_grid(&self, project_root: &str) -> Response { - self.ensure_grid_channel(project_root).await; - Response::GridSubscribed - } - - pub async fn handle_grid_command(&self, project_root: &str, command: GridCommand) -> Response { - // For GetLayout, read the grid-layout.json directly - if matches!(command, GridCommand::GetLayout) { - let root = project_root.to_string(); - return match tokio::task::spawn_blocking(move || { - let path = paths::pu_dir(Path::new(&root)).join("grid-layout.json"); - std::fs::read_to_string(path) - }) - .await - { - Ok(Ok(contents)) => match serde_json::from_str(&contents) { - Ok(layout) => Response::GridLayout { layout }, - Err(e) => Response::Error { - code: "PARSE_ERROR".into(), - message: format!("invalid grid layout JSON: {e}"), - }, - }, - _ => Response::GridLayout { - layout: serde_json::Value::Null, - }, - }; - } - - // Broadcast mutation commands to subscribers - let channels = self.grid_channels.lock().await; - if let Some(tx) = channels.get(project_root) { - let _ = tx.send(command.clone()); - } - Response::Ok - } - - async fn ensure_grid_channel(&self, project_root: &str) { - let mut channels = self.grid_channels.lock().await; - channels - .entry(project_root.to_string()) - .or_insert_with(|| tokio::sync::broadcast::channel(64).0); - } - - /// Get a grid broadcast receiver for a project (used by IPC server for streaming). - pub async fn subscribe_grid( - &self, - project_root: &str, - ) -> tokio::sync::broadcast::Receiver { - let mut channels = self.grid_channels.lock().await; - let tx = channels - .entry(project_root.to_string()) - .or_insert_with(|| tokio::sync::broadcast::channel(64).0); - tx.subscribe() - } - - // --- Helpers --- - - /// Parse an agent config's command string into (program, args), resolving - /// the "shell" sentinel to the user's login shell. - #[allow(clippy::result_large_err)] - fn parse_agent_command( - agent_cfg: &pu_core::types::AgentConfig, - agent_type: &str, - ) -> Result<(String, Vec), Response> { - let mut parts: Vec = agent_cfg - .command - .split_whitespace() - .map(String::from) - .collect(); - if parts.is_empty() { - return Err(Response::Error { - code: "CONFIG_ERROR".into(), - message: format!("agent type '{agent_type}' has an empty command"), - }); - } - let command = parts.remove(0); - let command = if command == "shell" { - std::env::var("SHELL").unwrap_or_else(|_| "/bin/sh".into()) - } else { - command - }; - Ok((command, parts)) - } - - fn should_inject_prompt_via_stdin(agent_type: &str, interactive: bool, prompt: &str) -> bool { - !prompt.is_empty() && interactive && matches!(agent_type, "claude" | "terminal") - } - - fn resolved_prompt_flag(agent_type: &str, prompt_flag: Option<&str>) -> Option { - match (agent_type, prompt_flag) { - ("opencode", None) => Some("--prompt".to_string()), - (_, Some(flag)) => Some(flag.to_string()), - _ => None, - } - } - - /// Remove pending inputs and session handles for the given agent IDs, then kill their - /// PTY processes. Returns the extracted handles (for callers that need exit codes). - async fn kill_agents(&self, agent_ids: &[String]) -> Vec<(String, AgentHandle)> { - { - let mut pending_inputs = self.pending_initial_inputs.lock().await; - for id in agent_ids { - pending_inputs.remove(id); - } - } - let handles: Vec<(String, AgentHandle)> = { - let mut sessions = self.sessions.lock().await; - agent_ids - .iter() - .filter_map(|id| sessions.remove(id).map(|h| (id.clone(), h))) - .collect() - }; - for (id, handle) in &handles { - if let Err(e) = self.pty_host.kill(handle, Duration::from_secs(5)).await { - tracing::debug!(agent_id = id, "kill failed: {e}"); - } - } - handles - } - - /// On daemon restart, reconcile agents that appear alive in the manifest but have no - /// live process. Resumable agents (claude, codex, opencode) with a session_id get marked - /// suspended so the Swift side can auto-resume them. Others get marked Broken. - /// Called synchronously inside handle_init so state is correct before the first status read. - fn reconcile_agents_on_init(project_root: &str) { - let root = Path::new(project_root); - let Ok(m) = manifest::read_manifest(root) else { - return; - }; - let is_stale = |a: &AgentEntry| { - !a.suspended && matches!(a.status, AgentStatus::Streaming | AgentStatus::Waiting) - }; - let has_stale = m - .agents - .values() - .chain(m.worktrees.values().flat_map(|wt| wt.agents.values())) - .any(is_stale); - if !has_stale { - return; - } - let is_resumable = |t: &str| matches!(t, "claude" | "codex" | "opencode"); - let now = chrono::Utc::now(); - manifest::update_manifest(root, move |mut m| { - for agent in m.agents.values_mut().chain( - m.worktrees - .values_mut() - .flat_map(|wt| wt.agents.values_mut()), - ) { - if !agent.suspended - && matches!(agent.status, AgentStatus::Streaming | AgentStatus::Waiting) - { - if agent.session_id.is_some() && is_resumable(&agent.agent_type) { - agent.status = AgentStatus::Waiting; - agent.suspended = true; - agent.pid = None; - agent.suspended_at = Some(now); - } else { - agent.status = AgentStatus::Broken; - agent.completed_at = Some(now); - } - } - } - m - }) - .ok(); - } - - /// Scan the manifest for Running/Idle agents whose PID is dead, mark them Lost. - /// Called once per project on the first status request after daemon (re)start. - /// Note: Suspended agents are intentionally unaffected — they have no PID and are paused. - fn reap_stale_agents(project_root: &str) { - let root = Path::new(project_root); - let Ok(m) = manifest::read_manifest(root) else { - return; - }; - let needs_reap = |a: &AgentEntry| { - !a.suspended - && matches!(a.status, AgentStatus::Streaming | AgentStatus::Waiting) - && a.pid - .is_none_or(|pid| !daemon_lifecycle::is_process_alive(pid)) - }; - let has_stale = m - .agents - .values() - .chain(m.worktrees.values().flat_map(|wt| wt.agents.values())) - .any(needs_reap); - if !has_stale { - return; - } - manifest::update_manifest(root, move |mut m| { - let now = chrono::Utc::now(); - for agent in m.agents.values_mut().chain( - m.worktrees - .values_mut() - .flat_map(|wt| wt.agents.values_mut()), - ) { - if !agent.suspended - && matches!(agent.status, AgentStatus::Streaming | AgentStatus::Waiting) - && agent - .pid - .is_none_or(|pid| !daemon_lifecycle::is_process_alive(pid)) - { - agent.status = AgentStatus::Broken; - agent.completed_at = Some(now); - } - } - m - }) - .ok(); - } - - async fn rollback_worktree( - &self, - root_path: &Path, - worktree_id: Option<&str>, - branch: Option<&str>, - ) { - if let Some(wt_id) = worktree_id { - let wt_path = paths::worktree_path(root_path, wt_id); - git::remove_worktree(root_path, &wt_path).await.ok(); - } - if let Some(b) = branch { - git::delete_local_branch(root_path, b).await.ok(); - } - } - - fn agent_not_found(agent_id: &str) -> Response { - Response::Error { - code: "AGENT_NOT_FOUND".into(), - message: format!("no active session for agent {agent_id}"), - } - } - - fn error_response(e: &PuError) -> Response { - Response::Error { - code: e.code().into(), - message: e.to_string(), - } - } - - /// Read manifest from disk (off async runtime). - async fn read_manifest_async(&self, project_root: &str) -> Result { - let pr = project_root.to_string(); - tokio::task::spawn_blocking(move || manifest::read_manifest(Path::new(&pr))) - .await - .unwrap_or_else(|e| Err(PuError::Io(std::io::Error::other(e)))) - } - - // --- Status Push --- - - async fn handle_subscribe_status(&self, project_root: &str) -> Response { - self.ensure_status_channel(project_root).await; - Response::StatusSubscribed - } - - async fn ensure_status_channel(&self, project_root: &str) { - let mut channels = self.status_channels.lock().await; - channels - .entry(project_root.to_string()) - .or_insert_with(|| tokio::sync::broadcast::channel(64).0); - } - - /// Get a status broadcast receiver for a project (used by IPC server for streaming). - pub async fn subscribe_status( - &self, - project_root: &str, - ) -> tokio::sync::broadcast::Receiver<()> { - let mut channels = self.status_channels.lock().await; - let tx = channels - .entry(project_root.to_string()) - .or_insert_with(|| tokio::sync::broadcast::channel(64).0); - tx.subscribe() - } - - /// Notify all status subscribers that state has changed. - async fn notify_status_change(&self, project_root: &str) { - let channels = self.status_channels.lock().await; - if let Some(tx) = channels.get(project_root) { - let _ = tx.send(()); - } - } - - /// Compute a full status report for a project (used by status push and handle_status). - pub async fn compute_full_status( - &self, - project_root: &str, - ) -> Result<(Vec, Vec), PuError> { - let m = self.read_manifest_async(project_root).await?; - let sessions = self.sessions.lock().await; - let mut agents: Vec = m - .agents - .values() - .map(|a| self.build_agent_status_report(a, &sessions, None)) - .collect(); - agents.sort_by_key(|a| a.started_at); - let worktrees: Vec = m - .worktrees - .into_values() - .map(|mut wt| { - for agent in wt.agents.values_mut() { - let (status, exit_code, _idle) = - self.live_agent_status_sync(&agent.id, agent, &sessions); - agent.status = status; - agent.exit_code = exit_code; - } - wt - }) - .collect(); - Ok((worktrees, agents)) - } - - // --- Template CRUD handlers --- - - async fn handle_list_templates(&self, project_root: &str) -> Response { - let pr = project_root.to_string(); - match tokio::task::spawn_blocking(move || { - let root = Path::new(&pr); - let templates = pu_core::template::list_templates(root); - let infos: Vec = templates - .into_iter() - .map(|t| TemplateInfo { - name: t.name, - description: t.description, - agent: t.agent, - source: t.source, - variables: pu_core::template::extract_variables(&t.body), - command: t.command, - }) - .collect(); - infos - }) - .await - { - Ok(templates) => Response::TemplateList { templates }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - async fn handle_get_template(&self, project_root: &str, name: &str) -> Response { - let pr = project_root.to_string(); - let tpl_name = name.to_string(); - match tokio::task::spawn_blocking(move || { - let root = Path::new(&pr); - pu_core::template::find_template(root, &tpl_name) - }) - .await - { - Ok(Some(t)) => Response::TemplateDetail { - name: t.name, - description: t.description, - agent: t.agent, - variables: pu_core::template::extract_variables(&t.body), - body: t.body, - source: t.source, - command: t.command, - }, - Ok(None) => Response::Error { - code: "NOT_FOUND".into(), - message: format!("template '{name}' not found"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - #[allow(clippy::too_many_arguments)] - async fn handle_save_template( - &self, - project_root: &str, - name: &str, - description: &str, - agent: &str, - body: &str, - scope: &str, - command: Option, - ) -> Response { - let dir = match Self::resolve_scope_dir( - project_root, - scope, - paths::templates_dir, - paths::global_templates_dir, - ) { - Ok(d) => d, - Err(msg) => { - return Response::Error { - code: "IO_ERROR".into(), - message: msg, - }; - } - }; - let n = name.to_string(); - let d = description.to_string(); - let a = agent.to_string(); - let b = body.to_string(); - match tokio::task::spawn_blocking(move || { - pu_core::template::save_template_with_command(&dir, &n, &d, &a, &b, command.as_deref()) - }) - .await - { - Ok(Ok(())) => Response::Ok, - Ok(Err(e)) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to save template: {e}"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - async fn handle_delete_template( - &self, - project_root: &str, - name: &str, - scope: &str, - ) -> Response { - let dir = match Self::resolve_scope_dir( - project_root, - scope, - paths::templates_dir, - paths::global_templates_dir, - ) { - Ok(d) => d, - Err(msg) => { - return Response::Error { - code: "IO_ERROR".into(), - message: msg, - }; - } - }; - let n = name.to_string(); - match tokio::task::spawn_blocking(move || pu_core::template::delete_template(&dir, &n)) - .await - { - Ok(Ok(_)) => Response::Ok, - Ok(Err(e)) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to delete template: {e}"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - // --- Agent def CRUD handlers --- - - async fn handle_list_agent_defs(&self, project_root: &str) -> Response { - let pr = project_root.to_string(); - match tokio::task::spawn_blocking(move || { - let root = Path::new(&pr); - let defs = pu_core::agent_def::list_agent_defs(root); - let infos: Vec = defs - .into_iter() - .map(|d| AgentDefInfo { - name: d.name, - agent_type: d.agent_type, - template: d.template, - inline_prompt: d.inline_prompt, - tags: d.tags, - scope: d.scope, - available_in_command_dialog: d.available_in_command_dialog, - icon: d.icon, - command: d.command, - }) - .collect(); - infos - }) - .await - { - Ok(agent_defs) => Response::AgentDefList { agent_defs }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - async fn handle_get_agent_def(&self, project_root: &str, name: &str) -> Response { - let pr = project_root.to_string(); - let n = name.to_string(); - match tokio::task::spawn_blocking(move || { - pu_core::agent_def::find_agent_def(Path::new(&pr), &n) - }) - .await - { - Ok(Some(d)) => Response::AgentDefDetail { - name: d.name, - agent_type: d.agent_type, - template: d.template, - inline_prompt: d.inline_prompt, - tags: d.tags, - scope: d.scope, - available_in_command_dialog: d.available_in_command_dialog, - icon: d.icon, - command: d.command, - }, - Ok(None) => Response::Error { - code: "NOT_FOUND".into(), - message: format!("agent def '{name}' not found"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - #[allow(clippy::too_many_arguments)] - async fn handle_save_agent_def( - &self, - project_root: &str, - name: &str, - agent_type: &str, - template: Option, - inline_prompt: Option, - tags: Vec, - scope: &str, - available_in_command_dialog: bool, - icon: Option, - command: Option, - ) -> Response { - let dir = match Self::resolve_scope_dir( - project_root, - scope, - paths::agents_dir, - paths::global_agents_dir, - ) { - Ok(d) => d, - Err(msg) => { - return Response::Error { - code: "IO_ERROR".into(), - message: msg, - }; - } - }; - let def = pu_core::agent_def::AgentDef { - name: name.to_string(), - agent_type: agent_type.to_string(), - template, - inline_prompt, - tags, - scope: scope.to_string(), - available_in_command_dialog, - icon, - command, - }; - match tokio::task::spawn_blocking(move || pu_core::agent_def::save_agent_def(&dir, &def)) - .await - { - Ok(Ok(())) => Response::Ok, - Ok(Err(e)) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to save agent def: {e}"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - async fn handle_delete_agent_def( - &self, - project_root: &str, - name: &str, - scope: &str, - ) -> Response { - let dir = match Self::resolve_scope_dir( - project_root, - scope, - paths::agents_dir, - paths::global_agents_dir, - ) { - Ok(d) => d, - Err(msg) => { - return Response::Error { - code: "IO_ERROR".into(), - message: msg, - }; - } - }; - let n = name.to_string(); - match tokio::task::spawn_blocking(move || pu_core::agent_def::delete_agent_def(&dir, &n)) - .await - { - Ok(Ok(_)) => Response::Ok, - Ok(Err(e)) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to delete agent def: {e}"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - // --- Swarm def CRUD handlers --- - - async fn handle_list_swarm_defs(&self, project_root: &str) -> Response { - let pr = project_root.to_string(); - match tokio::task::spawn_blocking(move || { - let root = Path::new(&pr); - let defs = pu_core::swarm_def::list_swarm_defs(root); - let infos: Vec = defs - .into_iter() - .map(|d| SwarmDefInfo { - name: d.name, - worktree_count: d.worktree_count, - worktree_template: d.worktree_template, - roster: d - .roster - .into_iter() - .map(|r| SwarmRosterEntryPayload { - agent_def: r.agent_def, - role: r.role, - quantity: r.quantity, - }) - .collect(), - include_terminal: d.include_terminal, - scope: d.scope, - }) - .collect(); - infos - }) - .await - { - Ok(swarm_defs) => Response::SwarmDefList { swarm_defs }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - async fn handle_get_swarm_def(&self, project_root: &str, name: &str) -> Response { - let pr = project_root.to_string(); - let n = name.to_string(); - match tokio::task::spawn_blocking(move || { - pu_core::swarm_def::find_swarm_def(Path::new(&pr), &n) - }) - .await - { - Ok(Some(d)) => Response::SwarmDefDetail { - name: d.name, - worktree_count: d.worktree_count, - worktree_template: d.worktree_template, - roster: d - .roster - .into_iter() - .map(|r| SwarmRosterEntryPayload { - agent_def: r.agent_def, - role: r.role, - quantity: r.quantity, - }) - .collect(), - include_terminal: d.include_terminal, - scope: d.scope, - }, - Ok(None) => Response::Error { - code: "NOT_FOUND".into(), - message: format!("swarm def '{name}' not found"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - #[allow(clippy::too_many_arguments)] - async fn handle_save_swarm_def( - &self, - project_root: &str, - name: &str, - worktree_count: u32, - worktree_template: &str, - roster: Vec, - include_terminal: bool, - scope: &str, - ) -> Response { - let dir = match Self::resolve_scope_dir( - project_root, - scope, - paths::swarms_dir, - paths::global_swarms_dir, - ) { - Ok(d) => d, - Err(msg) => { - return Response::Error { - code: "IO_ERROR".into(), - message: msg, - }; - } - }; - let def = pu_core::swarm_def::SwarmDef { - name: name.to_string(), - worktree_count, - worktree_template: worktree_template.to_string(), - roster: roster - .into_iter() - .map(|r| pu_core::swarm_def::SwarmRosterEntry { - agent_def: r.agent_def, - role: r.role, - quantity: r.quantity, - }) - .collect(), - include_terminal, - scope: scope.to_string(), - }; - match tokio::task::spawn_blocking(move || pu_core::swarm_def::save_swarm_def(&dir, &def)) - .await - { - Ok(Ok(())) => Response::Ok, - Ok(Err(e)) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to save swarm def: {e}"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - async fn handle_delete_swarm_def( - &self, - project_root: &str, - name: &str, - scope: &str, - ) -> Response { - let dir = match Self::resolve_scope_dir( - project_root, - scope, - paths::swarms_dir, - paths::global_swarms_dir, - ) { - Ok(d) => d, - Err(msg) => { - return Response::Error { - code: "IO_ERROR".into(), - message: msg, - }; - } - }; - let n = name.to_string(); - match tokio::task::spawn_blocking(move || pu_core::swarm_def::delete_swarm_def(&dir, &n)) - .await - { - Ok(Ok(_)) => Response::Ok, - Ok(Err(e)) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to delete swarm def: {e}"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - // --- RunSwarm handler --- - - async fn handle_run_swarm( - &self, - project_root: &str, - swarm_name: &str, - vars: std::collections::HashMap, - ) -> Response { - // Read the swarm definition - let pr = project_root.to_string(); - let sn = swarm_name.to_string(); - let swarm_def = match tokio::task::spawn_blocking(move || { - pu_core::swarm_def::find_swarm_def(Path::new(&pr), &sn) - }) - .await - { - Ok(Some(def)) => def, - Ok(None) => { - return Response::Error { - code: "NOT_FOUND".into(), - message: format!("swarm def '{swarm_name}' not found"), - }; - } - Err(e) => { - return Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }; - } - }; - - // Pre-resolve all agent defs and their prompts once, before iterating worktrees. - let mut resolved_roster: Vec<(pu_core::agent_def::AgentDef, String, Option, u32)> = - Vec::new(); - for entry in &swarm_def.roster { - let pr2 = project_root.to_string(); - let ad_name = entry.agent_def.clone(); - let agent_def = match tokio::task::spawn_blocking(move || { - pu_core::agent_def::find_agent_def(Path::new(&pr2), &ad_name) - }) - .await - { - Ok(Some(def)) => def, - Ok(None) => { - return Response::Error { - code: "NOT_FOUND".into(), - message: format!( - "agent def '{}' referenced by swarm not found", - entry.agent_def - ), - }; - } - Err(e) => { - return Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }; - } - }; - - let (prompt, template_command) = if let Some(ref tpl_name) = agent_def.template { - let pr3 = project_root.to_string(); - let tn = tpl_name.clone(); - let vars_clone = vars.clone(); - match tokio::task::spawn_blocking(move || { - pu_core::template::find_template(Path::new(&pr3), &tn) - }) - .await - { - Ok(Some(tpl)) => { - let rendered = pu_core::template::render(&tpl, &vars_clone); - let cmd = pu_core::template::render_command(&tpl, &vars_clone); - (rendered, cmd) - } - Ok(None) => { - return Response::Error { - code: "NOT_FOUND".into(), - message: format!("template '{tpl_name}' not found"), - }; - } - Err(e) => { - return Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }; - } - } - } else { - (agent_def.inline_prompt.clone().unwrap_or_default(), None) - }; - - resolved_roster.push((agent_def, prompt, template_command, entry.quantity)); - } - - let mut spawned_agents = Vec::new(); - - for wt_index in 0..swarm_def.worktree_count { - let wt_name = if swarm_def.worktree_template.is_empty() { - format!("{swarm_name}-{wt_index}") - } else { - swarm_def - .worktree_template - .replace("{index}", &wt_index.to_string()) - }; - - let mut worktree_id: Option = None; - - for (agent_def, prompt, template_command, quantity) in &resolved_roster { - for q in 0..*quantity { - let agent_name = format!("{}-{}-{wt_index}-{q}", swarm_name, agent_def.name); - - // First agent creates the worktree; subsequent agents reuse it - let (spawn_name, spawn_worktree) = if worktree_id.is_some() { - (Some(agent_name), worktree_id.clone()) - } else { - (Some(wt_name.clone()), None) - }; - - // Agent def command takes precedence, then template command - let resolved_command = agent_def - .command - .clone() - .or_else(|| template_command.clone()); - - let resp = self - .handle_spawn(SpawnParams { - project_root: project_root.to_string(), - prompt: prompt.to_string(), - agent_type: agent_def.agent_type.clone(), - name: spawn_name, - base: None, - root: false, - worktree: spawn_worktree, - terminal_command: resolved_command, - no_auto: false, - extra_args: vec![], - plan_mode: false, - no_trigger: false, - trigger: None, - }) - .await; - - match resp { - Response::SpawnResult { - agent_id, - worktree_id: wt_id, - .. - } => { - spawned_agents.push(agent_id); - if worktree_id.is_none() { - worktree_id = wt_id; - } - } - Response::Error { code, message } => { - return Response::RunSwarmPartial { - spawned_agents, - error_code: code, - error_message: message, - }; - } - _ => {} - } - } - } - - // If include_terminal is set, spawn a bare terminal into this worktree - if swarm_def.include_terminal { - if let Some(ref wt_id) = worktree_id { - let term_name = format!("{swarm_name}-terminal-{wt_index}"); - let resp = self - .handle_spawn(SpawnParams { - project_root: project_root.to_string(), - prompt: String::new(), - agent_type: "terminal".into(), - name: Some(term_name), - base: None, - root: false, - worktree: Some(wt_id.clone()), - terminal_command: None, - no_auto: false, - extra_args: vec![], - plan_mode: false, - no_trigger: false, - trigger: None, - }) - .await; - if let Response::SpawnResult { agent_id, .. } = resp { - spawned_agents.push(agent_id); - } - } - } - } - - Response::RunSwarmResult { spawned_agents } - } - - // --- Schedule handlers --- - - async fn handle_list_schedules(&self, project_root: &str) -> Response { - let pr = project_root.to_string(); - match tokio::task::spawn_blocking(move || { - let root = Path::new(&pr); - let defs = pu_core::schedule_def::list_schedule_defs(root); - let infos: Vec = - defs.into_iter().map(Self::schedule_def_to_info).collect(); - infos - }) - .await - { - Ok(schedules) => Response::ScheduleList { schedules }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - async fn handle_get_schedule(&self, project_root: &str, name: &str) -> Response { - let pr = project_root.to_string(); - let n = name.to_string(); - match tokio::task::spawn_blocking(move || { - pu_core::schedule_def::find_schedule_def(Path::new(&pr), &n) - }) - .await - { - Ok(Some(d)) => Self::schedule_def_to_detail(d), - Ok(None) => Response::Error { - code: "NOT_FOUND".into(), - message: format!("schedule '{name}' not found"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - #[allow(clippy::too_many_arguments)] - async fn handle_save_schedule( - &self, - project_root: &str, - name: &str, - enabled: bool, - recurrence: &str, - start_at: chrono::DateTime, - trigger: ScheduleTriggerPayload, - target: &str, - scope: &str, - root: bool, - agent_name: Option, - ) -> Response { - let dir = match Self::resolve_scope_dir( - project_root, - scope, - paths::schedules_dir, - paths::global_schedules_dir, - ) { - Ok(d) => d, - Err(msg) => { - return Response::Error { - code: "IO_ERROR".into(), - message: msg, - }; - } - }; - let rec = match Self::parse_recurrence(recurrence) { - Ok(r) => r, - Err(msg) => { - return Response::Error { - code: "INVALID_INPUT".into(), - message: msg, - }; - } - }; - let now = chrono::Utc::now(); - let next_run = if enabled { - pu_core::schedule_def::next_occurrence(start_at, &rec, now) - } else { - None - }; - let def = pu_core::schedule_def::ScheduleDef { - name: name.to_string(), - enabled, - recurrence: rec, - start_at, - next_run, - trigger: Self::payload_to_trigger(&trigger), - project_root: project_root.to_string(), - target: target.to_string(), - root, - agent_name, - scope: scope.to_string(), - created_at: now, - }; - match tokio::task::spawn_blocking(move || { - pu_core::schedule_def::save_schedule_def(&dir, &def) - }) - .await - { - Ok(Ok(())) => Response::Ok, - Ok(Err(e)) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to save schedule: {e}"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - async fn handle_delete_schedule( - &self, - project_root: &str, - name: &str, - scope: &str, - ) -> Response { - let dir = match Self::resolve_scope_dir( - project_root, - scope, - paths::schedules_dir, - paths::global_schedules_dir, - ) { - Ok(d) => d, - Err(msg) => { - return Response::Error { - code: "IO_ERROR".into(), - message: msg, - }; - } - }; - let n = name.to_string(); - match tokio::task::spawn_blocking(move || { - pu_core::schedule_def::delete_schedule_def(&dir, &n) - }) - .await - { - Ok(Ok(_)) => Response::Ok, - Ok(Err(e)) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to delete schedule: {e}"), - }, - Err(e) => Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }, - } - } - - async fn handle_enable_schedule(&self, project_root: &str, name: &str) -> Response { - let pr = project_root.to_string(); - let n = name.to_string(); - tokio::task::spawn_blocking(move || { - let root = Path::new(&pr); - let mut def = match pu_core::schedule_def::find_schedule_def(root, &n) { - Some(d) => d, - None => { - return Response::Error { - code: "NOT_FOUND".into(), - message: format!("schedule '{n}' not found"), - }; - } - }; - def.enabled = true; - let now = chrono::Utc::now(); - def.next_run = - pu_core::schedule_def::next_occurrence(def.start_at, &def.recurrence, now); - let dir = paths::schedules_dir(root); - match pu_core::schedule_def::save_schedule_def(&dir, &def) { - Ok(()) => Response::Ok, - Err(e) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to save schedule: {e}"), - }, - } - }) - .await - .unwrap_or_else(|e| Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }) - } - - async fn handle_disable_schedule(&self, project_root: &str, name: &str) -> Response { - let pr = project_root.to_string(); - let n = name.to_string(); - tokio::task::spawn_blocking(move || { - let root = Path::new(&pr); - let mut def = match pu_core::schedule_def::find_schedule_def(root, &n) { - Some(d) => d, - None => { - return Response::Error { - code: "NOT_FOUND".into(), - message: format!("schedule '{n}' not found"), - }; - } - }; - def.enabled = false; - def.next_run = None; - let dir = paths::schedules_dir(root); - match pu_core::schedule_def::save_schedule_def(&dir, &def) { - Ok(()) => Response::Ok, - Err(e) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to save schedule: {e}"), - }, - } - }) - .await - .unwrap_or_else(|e| Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }) - } - - fn schedule_def_to_info(d: pu_core::schedule_def::ScheduleDef) -> ScheduleInfo { - ScheduleInfo { - name: d.name, - enabled: d.enabled, - recurrence: Self::recurrence_to_string(&d.recurrence), - start_at: d.start_at, - next_run: d.next_run, - trigger: Self::trigger_to_payload(&d.trigger), - project_root: d.project_root, - target: d.target, - scope: d.scope, - root: d.root, - agent_name: d.agent_name, - created_at: d.created_at, - } - } - - fn schedule_def_to_detail(d: pu_core::schedule_def::ScheduleDef) -> Response { - Response::ScheduleDetail { - name: d.name, - enabled: d.enabled, - recurrence: Self::recurrence_to_string(&d.recurrence), - start_at: d.start_at, - next_run: d.next_run, - trigger: Self::trigger_to_payload(&d.trigger), - project_root: d.project_root, - target: d.target, - scope: d.scope, - root: d.root, - agent_name: d.agent_name, - created_at: d.created_at, - } - } - - fn recurrence_to_string(r: &pu_core::schedule_def::Recurrence) -> String { - match r { - pu_core::schedule_def::Recurrence::None => "none", - pu_core::schedule_def::Recurrence::Hourly => "hourly", - pu_core::schedule_def::Recurrence::Daily => "daily", - pu_core::schedule_def::Recurrence::Weekdays => "weekdays", - pu_core::schedule_def::Recurrence::Weekly => "weekly", - pu_core::schedule_def::Recurrence::Monthly => "monthly", - } - .to_string() - } - - fn parse_recurrence(s: &str) -> Result { - match s { - "none" => Ok(pu_core::schedule_def::Recurrence::None), - "hourly" => Ok(pu_core::schedule_def::Recurrence::Hourly), - "daily" => Ok(pu_core::schedule_def::Recurrence::Daily), - "weekdays" => Ok(pu_core::schedule_def::Recurrence::Weekdays), - "weekly" => Ok(pu_core::schedule_def::Recurrence::Weekly), - "monthly" => Ok(pu_core::schedule_def::Recurrence::Monthly), - other => Err(format!("unknown recurrence: {other}")), - } - } - - fn trigger_to_payload(t: &pu_core::schedule_def::ScheduleTrigger) -> ScheduleTriggerPayload { - match t { - pu_core::schedule_def::ScheduleTrigger::AgentDef { name } => { - ScheduleTriggerPayload::AgentDef { name: name.clone() } - } - pu_core::schedule_def::ScheduleTrigger::SwarmDef { name, vars } => { - ScheduleTriggerPayload::SwarmDef { - name: name.clone(), - vars: vars.clone(), - } - } - pu_core::schedule_def::ScheduleTrigger::InlinePrompt { prompt, agent } => { - ScheduleTriggerPayload::InlinePrompt { - prompt: prompt.clone(), - agent: agent.clone(), - } - } - } - } - - fn payload_to_trigger(p: &ScheduleTriggerPayload) -> pu_core::schedule_def::ScheduleTrigger { - match p { - ScheduleTriggerPayload::AgentDef { name } => { - pu_core::schedule_def::ScheduleTrigger::AgentDef { name: name.clone() } - } - ScheduleTriggerPayload::SwarmDef { name, vars } => { - pu_core::schedule_def::ScheduleTrigger::SwarmDef { - name: name.clone(), - vars: vars.clone(), - } - } - ScheduleTriggerPayload::InlinePrompt { prompt, agent } => { - pu_core::schedule_def::ScheduleTrigger::InlinePrompt { - prompt: prompt.clone(), - agent: agent.clone(), - } - } - } - } - - // --- Trigger CRUD handlers --- - - async fn handle_list_triggers(&self, project_root: &str) -> Response { - let pr = project_root.to_string(); - tokio::task::spawn_blocking(move || { - let defs = pu_core::trigger_def::list_trigger_defs(Path::new(&pr)); - let triggers: Vec<_> = defs - .into_iter() - .map(pu_core::protocol::TriggerInfo::from) - .collect(); - Response::TriggerList { triggers } - }) - .await - .unwrap_or_else(|e| Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }) - } - - async fn handle_get_trigger(&self, project_root: &str, name: &str) -> Response { - let pr = project_root.to_string(); - let name = name.to_string(); - tokio::task::spawn_blocking(move || { - match pu_core::trigger_def::find_trigger_def(Path::new(&pr), &name) { - Some(def) => Response::TriggerDetail(pu_core::protocol::TriggerInfo::from(def)), - None => Response::Error { - code: "NOT_FOUND".into(), - message: format!("trigger not found: {name}"), - }, - } - }) - .await - .unwrap_or_else(|e| Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }) - } - - async fn handle_save_trigger(&self, params: SaveTriggerParams) -> Response { - let pr = params.project_root; - let name = params.name; - // Normalize hyphenated form to underscored form (consistent with handle_evaluate_gate) - let on = params.on.replace('-', "_"); - let scope = params.scope; - let description = params.description; - let sequence = params.sequence; - let variables = params.variables; - tokio::task::spawn_blocking(move || { - let event = match on.as_str() { - "agent_idle" => pu_core::trigger_def::TriggerEvent::AgentIdle, - "pre_commit" => pu_core::trigger_def::TriggerEvent::PreCommit, - "pre_push" => pu_core::trigger_def::TriggerEvent::PrePush, - other => { - return Response::Error { - code: "INVALID_ARGUMENT".into(), - message: format!("unknown trigger event: {other}"), - }; - } - }; - let actions: Vec = - sequence.into_iter().map(Into::into).collect(); - let def = pu_core::trigger_def::TriggerDef { - name: name.clone(), - description, - on: event, - sequence: actions, - variables, - scope: scope.clone(), - }; - let dir = match Self::resolve_scope_dir( - &pr, - &scope, - pu_core::paths::triggers_dir, - pu_core::paths::global_triggers_dir, - ) { - Ok(d) => d, - Err(e) => { - return Response::Error { - code: "IO_ERROR".into(), - message: e, - }; - } - }; - match pu_core::trigger_def::save_trigger_def(&dir, &def) { - Ok(()) => Response::Ok, - Err(e) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to save trigger: {e}"), - }, - } - }) - .await - .unwrap_or_else(|e| Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }) - } - - async fn handle_delete_trigger(&self, project_root: &str, name: &str, scope: &str) -> Response { - let pr = project_root.to_string(); - let name = name.to_string(); - let scope = scope.to_string(); - tokio::task::spawn_blocking(move || { - let dir = match Self::resolve_scope_dir( - &pr, - &scope, - pu_core::paths::triggers_dir, - pu_core::paths::global_triggers_dir, - ) { - Ok(d) => d, - Err(e) => { - return Response::Error { - code: "IO_ERROR".into(), - message: e, - }; - } - }; - match pu_core::trigger_def::delete_trigger_def(&dir, &name) { - Ok(true) => Response::Ok, - Ok(false) => Response::Error { - code: "NOT_FOUND".into(), - message: format!("trigger not found: {name}"), - }, - Err(e) => Response::Error { - code: "IO_ERROR".into(), - message: format!("failed to delete trigger: {e}"), - }, - } - }) - .await - .unwrap_or_else(|e| Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }) - } - - async fn handle_evaluate_gate( - &self, - event: &str, - project_root: &str, - worktree_path: &str, - ) -> Response { - // Normalize hyphenated form (from git hooks) to underscored form - let normalized_event = event.replace('-', "_"); - let trigger_event = match normalized_event.as_str() { - "pre_commit" => pu_core::trigger_def::TriggerEvent::PreCommit, - "pre_push" => pu_core::trigger_def::TriggerEvent::PrePush, - other => { - return Response::Error { - code: "INVALID_ARGUMENT".into(), - message: format!("unsupported gate event: {other}"), - }; - } - }; - - let triggers = { - let pr = project_root.to_string(); - let evt = trigger_event.clone(); - match tokio::task::spawn_blocking(move || { - pu_core::trigger_def::triggers_for_event(Path::new(&pr), &evt) - }) - .await - { - Ok(t) => t, - Err(e) => { - return Response::Error { - code: "INTERNAL_ERROR".into(), - message: format!("task join error: {e}"), - }; - } - } - }; - - if triggers.is_empty() { - return Response::GateResult { - passed: true, - output: String::new(), - }; - } - - let wt = worktree_path.to_string(); - match crate::gate::evaluate_trigger_gates(&triggers, Path::new(&wt)).await { - Ok(result) => Response::GateResult { - passed: result.passed, - output: result.output, - }, - Err(e) => Response::GateResult { - passed: false, - output: format!("gate evaluation error: {e}"), - }, - } - } - - // --- Scheduler --- - - /// Start a background task that periodically checks for due schedules and fires them. - pub fn start_scheduler(self: &Arc) { - let engine = Arc::clone(self); - tokio::spawn(async move { - let mut interval = tokio::time::interval(Duration::from_secs(30)); - interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - loop { - interval.tick().await; - engine.scheduler_tick().await; - } - }); - } - - async fn scheduler_tick(&self) { - let projects = self.registered_projects(); - for project_root in projects { - let defs = { - let pr = project_root.clone(); - match tokio::task::spawn_blocking(move || { - pu_core::schedule_def::list_schedule_defs(Path::new(&pr)) - }) - .await - { - Ok(d) => d, - Err(_) => continue, - } - }; - - let now = chrono::Utc::now(); - for def in defs { - if !def.enabled { - continue; - } - if let Some(next_run) = def.next_run { - if next_run <= now { - self.fire_schedule(&def).await; - self.advance_schedule(def, now).await; - } - } - } - - // Evaluate agent_idle triggers for active agents - self.evaluate_idle_triggers(&project_root).await; - } - } - - /// Check all agents with active trigger sequences and advance them when idle. - async fn evaluate_idle_triggers(&self, project_root: &str) { - let manifest = match self.read_manifest_async(project_root).await { - Ok(m) => m, - Err(_) => return, - }; - - // Collect candidate agents with their trigger name, seq index, and worktree path. - // Briefly hold the sessions lock to check live status, then release before I/O. - let candidates: Vec<(String, String, u32, Option)> = { - let sessions = self.sessions.lock().await; - let mut result = Vec::new(); - for agent in manifest.all_agents() { - if agent.trigger_state != Some(pu_core::types::TriggerState::Active) { - continue; - } - let trigger_name = match &agent.trigger_name { - Some(name) => name.clone(), - None => continue, // No bound trigger, skip - }; - let seq_index = agent.trigger_seq_index.unwrap_or(0); - let (status, _, _) = self.live_agent_status_sync(&agent.id, agent, &sessions); - if status != AgentStatus::Waiting { - continue; - } - let wt_path = match manifest.find_agent(&agent.id) { - Some(pu_core::types::AgentLocation::Worktree { worktree, .. }) => { - Some(std::path::PathBuf::from(&worktree.path)) - } - _ => None, - }; - result.push((agent.id.clone(), trigger_name, seq_index, wt_path)); - } - result - // sessions lock dropped here - }; - - if candidates.is_empty() { - return; - } - - // Load trigger defs once for this project - let pr = project_root.to_string(); - let idle_triggers = match tokio::task::spawn_blocking(move || { - pu_core::trigger_def::triggers_for_event( - Path::new(&pr), - &pu_core::trigger_def::TriggerEvent::AgentIdle, - ) - }) - .await - { - Ok(t) => t, - Err(_) => return, - }; - - // Index triggers by name for O(1) lookup - let trigger_map: std::collections::HashMap<&str, &pu_core::trigger_def::TriggerDef> = - idle_triggers.iter().map(|t| (t.name.as_str(), t)).collect(); - - for (agent_id, trigger_name, seq_index, wt_path) in &candidates { - let trigger = match trigger_map.get(trigger_name.as_str()) { - Some(t) => t, - None => { - // Trigger was removed since spawn — mark failed - self.update_trigger_state( - project_root, - agent_id, - pu_core::types::TriggerState::Failed, - None, - None, - ) - .await; - continue; - } - }; - - let sequence = &trigger.sequence; - let seq_index = *seq_index as usize; - if seq_index >= sequence.len() { - self.update_trigger_state( - project_root, - agent_id, - pu_core::types::TriggerState::Completed, - None, - None, - ) - .await; - continue; - } - - let action = &sequence[seq_index]; - let cwd = wt_path - .as_deref() - .unwrap_or_else(|| Path::new(project_root)); - - // If action has a gate, evaluate it first (no lock held) - if let Some(ref gate) = action.gate { - let resolved_run = - pu_core::trigger_def::substitute_variables(&gate.run, &trigger.variables); - - // Mark as Gating while the command runs - self.update_trigger_state( - project_root, - agent_id, - pu_core::types::TriggerState::Gating, - None, - None, - ) - .await; - - match crate::gate::run_gate_command(&resolved_run, cwd).await { - Ok((exit_code, stdout, stderr)) => { - let expect_exit = gate.expect_exit.unwrap_or(0); - if exit_code != expect_exit { - let max_retries = action - .max_retries - .unwrap_or(crate::gate::DEFAULT_GATE_MAX_RETRIES); - let manifest = self.read_manifest_async(project_root).await; - let attempts = manifest - .ok() - .and_then(|m| { - m.find_agent(agent_id).map(|loc| match loc { - pu_core::types::AgentLocation::Root(a) => a.gate_attempts, - pu_core::types::AgentLocation::Worktree { - agent, .. - } => agent.gate_attempts, - }) - }) - .flatten() - .unwrap_or(0); - - if attempts < max_retries { - let failure_msg = format!( - "\n\nGate '{resolved_run}' failed (exit {exit_code}, expected {expect_exit}):\n{stdout}{stderr}\nPlease fix the issues and try again.\n" - ); - if let Err(e) = self.inject_text(agent_id, &failure_msg).await { - tracing::warn!(agent_id, "failed to inject gate failure: {e}"); - } - self.update_trigger_state( - project_root, - agent_id, - pu_core::types::TriggerState::Active, - None, - Some(attempts + 1), - ) - .await; - } else { - self.update_trigger_state( - project_root, - agent_id, - pu_core::types::TriggerState::Failed, - None, - None, - ) - .await; - } - continue; - } - } - Err(e) => { - tracing::warn!(agent_id, gate = %resolved_run, "gate command error: {e}"); - self.update_trigger_state( - project_root, - agent_id, - pu_core::types::TriggerState::Failed, - None, - None, - ) - .await; - continue; - } - } - } - - // Inject text if present — only advance on success - if let Some(ref inject_text) = action.inject { - let resolved = - pu_core::trigger_def::substitute_variables(inject_text, &trigger.variables); - match self.inject_text(agent_id, &resolved).await { - Ok(true) => {} // success, proceed to advance - Ok(false) => { - tracing::warn!(agent_id, "inject_text: session not found, marking failed"); - self.update_trigger_state( - project_root, - agent_id, - pu_core::types::TriggerState::Failed, - None, - None, - ) - .await; - continue; - } - Err(e) => { - tracing::warn!(agent_id, "inject_text failed: {e}, marking failed"); - self.update_trigger_state( - project_root, - agent_id, - pu_core::types::TriggerState::Failed, - None, - None, - ) - .await; - continue; - } - } - } - - // Advance sequence index - let new_index = seq_index as u32 + 1; - let new_state = if new_index >= sequence.len() as u32 { - pu_core::types::TriggerState::Completed - } else { - pu_core::types::TriggerState::Active - }; - self.update_trigger_state(project_root, agent_id, new_state, Some(new_index), Some(0)) - .await; - } - } - - /// Inject text into an agent's PTY using chunked typing + Enter submission. - /// Returns `Ok(true)` on success, `Ok(false)` if the session was not found. - async fn inject_text(&self, agent_id: &str, text: &str) -> Result { - let fd = { - let sessions = self.sessions.lock().await; - sessions.get(agent_id).map(|handle| handle.master_fd()) - }; - match fd { - Some(fd) => { - self.pty_host - .write_chunked_submit(&fd, text.as_bytes()) - .await?; - Ok(true) - } - None => Ok(false), - } - } - - async fn update_trigger_state( - &self, - project_root: &str, - agent_id: &str, - state: pu_core::types::TriggerState, - seq_index: Option, - gate_attempts: Option, - ) { - let agent_id = agent_id.to_string(); - let pr = project_root.to_string(); - let result = tokio::task::spawn_blocking(move || { - manifest::update_manifest(Path::new(&pr), move |mut m| { - if let Some(agent) = m.find_agent_mut(&agent_id) { - agent.trigger_state = Some(state); - if let Some(idx) = seq_index { - agent.trigger_seq_index = Some(idx); - } - if let Some(attempts) = gate_attempts { - agent.gate_attempts = Some(attempts); - } - } - m - }) - }) - .await; - match result { - Ok(Ok(_)) => { - self.notify_status_change(project_root).await; - } - Ok(Err(e)) => { - tracing::warn!("failed to update trigger state in manifest: {e}"); - } - Err(e) => { - tracing::warn!("trigger state update task panicked: {e}"); - } - } - } - - async fn fire_schedule(&self, schedule: &pu_core::schedule_def::ScheduleDef) { - let result = match &schedule.trigger { - pu_core::schedule_def::ScheduleTrigger::AgentDef { name } => { - // Resolve agent def to get its type and prompt - let pr = schedule.project_root.clone(); - let project_path = Path::new(&pr); - if let Some(def) = pu_core::agent_def::find_agent_def(project_path, name) { - let empty_vars = std::collections::HashMap::new(); - let (prompt, template_command) = if let Some(ref ip) = def.inline_prompt { - (ip.clone(), None) - } else if let Some(ref tpl_name) = def.template { - match pu_core::template::find_template(project_path, tpl_name) { - Some(tpl) => { - let rendered = pu_core::template::render(&tpl, &empty_vars); - let cmd = pu_core::template::render_command(&tpl, &empty_vars); - (rendered, cmd) - } - None => ( - format!("Scheduled: agent def '{name}' (template not found)"), - None, - ), - } - } else { - (format!("Scheduled: run agent def '{name}'"), None) - }; - self.handle_request(Request::Spawn { - project_root: pr, - prompt, - agent: def.agent_type, - name: schedule.agent_name.clone(), - base: None, - root: schedule.root, - worktree: None, - command: def.command.or(template_command), - no_auto: false, - extra_args: vec![], - plan_mode: false, - no_trigger: false, - trigger: None, - }) - .await - } else { - Response::Error { - code: "NOT_FOUND".to_string(), - message: format!("agent def '{name}' not found"), - } - } - } - pu_core::schedule_def::ScheduleTrigger::SwarmDef { name, vars } => { - self.handle_request(Request::RunSwarm { - project_root: schedule.project_root.clone(), - swarm_name: name.clone(), - vars: vars.clone(), - }) - .await - } - pu_core::schedule_def::ScheduleTrigger::InlinePrompt { prompt, agent } => { - self.handle_request(Request::Spawn { - project_root: schedule.project_root.clone(), - prompt: prompt.clone(), - agent: agent.clone(), - name: schedule.agent_name.clone(), - base: None, - root: schedule.root, - worktree: None, - command: None, - no_auto: false, - extra_args: vec![], - plan_mode: false, - no_trigger: false, - trigger: None, - }) - .await - } - }; - - if let Response::Error { code, message } = result { - tracing::warn!( - schedule = schedule.name, - code, - message, - "scheduled task failed" - ); - } else { - tracing::info!(schedule = schedule.name, "scheduled task fired"); - } - } - - async fn advance_schedule( - &self, - mut schedule: pu_core::schedule_def::ScheduleDef, - now: chrono::DateTime, - ) { - let is_one_shot = schedule.recurrence == pu_core::schedule_def::Recurrence::None; - if is_one_shot { - schedule.enabled = false; - schedule.next_run = None; - } else { - schedule.next_run = pu_core::schedule_def::next_occurrence( - schedule.start_at, - &schedule.recurrence, - now, - ); - } - let pr = schedule.project_root.clone(); - let scope = schedule.scope.clone(); - let def = schedule; - if let Err(e) = tokio::task::spawn_blocking(move || { - let dir = if scope == "global" { - paths::global_schedules_dir()? - } else { - paths::schedules_dir(Path::new(&pr)) - }; - pu_core::schedule_def::save_schedule_def(&dir, &def) - }) - .await - .unwrap_or_else(|e| Err(std::io::Error::other(e))) - { - tracing::warn!(error = %e, "failed to advance schedule"); - } - } - - // --- Scope resolution helper --- - - fn resolve_scope_dir( - project_root: &str, - scope: &str, - local_fn: fn(&Path) -> std::path::PathBuf, - global_fn: fn() -> Result, - ) -> Result { - match scope { - "global" => global_fn().map_err(|e| e.to_string()), - "local" => Ok(local_fn(Path::new(project_root))), - other => Err(format!( - "unknown scope: {other} (expected 'local' or 'global')" - )), - } - } - - fn agent_pulse_entry( - &self, - agent: &AgentEntry, - sessions: &HashMap, - now: chrono::DateTime, - ) -> pu_core::protocol::AgentPulseEntry { - let (status, exit_code, idle_seconds) = - self.live_agent_status_sync(&agent.id, agent, sessions); - let runtime = (now - agent.started_at).num_seconds(); - let snippet = agent.prompt.as_ref().map(|p| { - let trimmed = p.trim(); - let truncated: String = trimmed.chars().take(77).collect(); - if truncated.len() < trimmed.len() { - format!("{truncated}...") - } else { - truncated - } - }); - pu_core::protocol::AgentPulseEntry { - id: agent.id.clone(), - name: agent.name.clone(), - agent_type: agent.agent_type.clone(), - status, - exit_code, - runtime_seconds: runtime, - idle_seconds, - prompt_snippet: snippet, - } - } - - async fn handle_pulse(&self, project_root: &str) -> Response { - let m = match self.read_manifest_async(project_root).await { - Ok(m) => m, - Err(e) => return Self::error_response(&e), - }; - - let sessions = self.sessions.lock().await; - let now = chrono::Utc::now(); - - // Build root-level agents - let root_agents: Vec = m - .agents - .values() - .map(|a| self.agent_pulse_entry(a, &sessions, now)) - .collect(); - - // Build worktree entries — collect all agent data in one lock acquisition - let active_worktrees: Vec<_> = m - .worktrees - .values() - .filter(|wt| wt.status == WorktreeStatus::Active) - .cloned() - .collect(); - - let wt_agents: Vec> = active_worktrees - .iter() - .map(|wt| { - wt.agents - .values() - .map(|a| self.agent_pulse_entry(a, &sessions, now)) - .collect() - }) - .collect(); - - // Drop sessions lock before shelling out to git - drop(sessions); - - let mut worktrees = Vec::new(); - for (wt, agents) in active_worktrees.iter().zip(wt_agents) { - let elapsed = (now - wt.created_at).num_seconds(); - - // Get git diff stats - let wt_path = std::path::PathBuf::from(&wt.path); - let (files_changed, insertions, deletions, diff_error) = if wt_path.exists() { - let base = wt.base_branch.as_deref(); - match git::diff_worktree(&wt_path, base, true).await { - Ok(output) => ( - output.files_changed, - output.insertions, - output.deletions, - None, - ), - Err(e) => (0, 0, 0, Some(format!("{e}"))), - } - } else { - (0, 0, 0, None) - }; - - worktrees.push(pu_core::protocol::WorktreePulseEntry { - worktree_id: wt.id.clone(), - worktree_name: wt.name.clone(), - branch: wt.branch.clone(), - elapsed_seconds: elapsed, - agents, - files_changed, - insertions, - deletions, - diff_error, - }); - } - - Response::PulseReport { - worktrees, - root_agents, - } - } - - async fn handle_diff( - &self, - project_root: &str, - worktree_id: Option<&str>, - stat: bool, - ) -> Response { - let m = match self.read_manifest_async(project_root).await { - Ok(m) => m, - Err(e) => return Self::error_response(&e), - }; - - let worktrees: Vec = if let Some(wt_id) = worktree_id { - match m.worktrees.get(wt_id) { - Some(wt) => vec![wt.clone()], - None => { - return Response::Error { - code: "NOT_FOUND".into(), - message: format!("worktree '{wt_id}' not found"), - }; - } - } - } else { - m.worktrees - .into_values() - .filter(|wt| wt.status == WorktreeStatus::Active) - .collect() - }; - - if worktrees.is_empty() { - return Response::DiffResult { diffs: vec![] }; - } - - let is_targeted = worktree_id.is_some(); - let mut diffs = Vec::new(); - for wt in &worktrees { - let wt_path = std::path::PathBuf::from(&wt.path); - if !wt_path.exists() { - if is_targeted { - // Targeted query: report the error so callers can distinguish - // a deleted worktree from a clean one. - diffs.push(pu_core::protocol::WorktreeDiffEntry { - worktree_id: wt.id.clone(), - worktree_name: wt.name.clone(), - branch: wt.branch.clone(), - base_branch: wt.base_branch.clone(), - diff_output: String::new(), - files_changed: 0, - insertions: 0, - deletions: 0, - error: Some(format!("worktree directory not found: {}", wt.path)), - }); - } - // Bulk query: skip missing dirs (best-effort) - continue; - } - let base = wt.base_branch.as_deref(); - match git::diff_worktree(&wt_path, base, stat).await { - Ok(output) => { - diffs.push(pu_core::protocol::WorktreeDiffEntry { - worktree_id: wt.id.clone(), - worktree_name: wt.name.clone(), - branch: wt.branch.clone(), - base_branch: wt.base_branch.clone(), - diff_output: output.diff, - files_changed: output.files_changed, - insertions: output.insertions, - deletions: output.deletions, - error: None, - }); - } - Err(e) => { - tracing::warn!("failed to diff worktree {}: {}", wt.id, e); - diffs.push(pu_core::protocol::WorktreeDiffEntry { - worktree_id: wt.id.clone(), - worktree_name: wt.name.clone(), - branch: wt.branch.clone(), - base_branch: wt.base_branch.clone(), - diff_output: String::new(), - files_changed: 0, - insertions: 0, - deletions: 0, - error: Some(format!("{e}")), - }); - } - } - } - - Response::DiffResult { diffs } - } -} - -impl Drop for Engine { - fn drop(&mut self) { - // Kill all child processes so spawn_blocking reader/waitpid tasks can finish. - if let Ok(sessions) = self.sessions.try_lock() { - for handle in sessions.values() { - unsafe { - libc::kill(handle.pid as i32, libc::SIGKILL); - } - } - } - } -} - -/// Inject prompt text into a PTY and submit with Enter via chunked typing. -/// Returns `true` on success. -async fn inject_initial_prompt( - pty_host: &NativePtyHost, - master_fd: &Arc, - agent_id: &str, - prompt: &[u8], -) -> bool { - if prompt.is_empty() { - return true; - } - if let Err(e) = pty_host.write_chunked_submit(master_fd, prompt).await { - tracing::warn!("failed to inject initial prompt for {}: {}", agent_id, e); - return false; - } - true -} - -/// Repair a single Claude Code session JSONL file. -/// -/// Returns `true` if any repairs were made (a `.bak` backup is written). -fn repair_session_file(path: &Path) -> bool { - let Ok(content) = std::fs::read_to_string(path) else { - return false; - }; - - let mut lines: Vec = Vec::new(); - for raw_line in content.lines() { - if raw_line.trim().is_empty() { - continue; - } - match serde_json::from_str::(raw_line) { - Ok(v) => lines.push(v), - Err(_) => { - // Preserve unparseable lines as-is by wrapping in a raw marker - lines.push(serde_json::json!({"__raw": raw_line})); - } - } - } - - if lines.is_empty() { - return false; - } - - // Collect all "uuid" values into a set - let mut uuid_set: HashSet = HashSet::new(); - for entry in &lines { - if let Some(uuid) = entry.get("uuid").and_then(|v| v.as_str()) { - uuid_set.insert(uuid.to_string()); - } - } - - let mut modified = false; - - // Fix 1: Snapshot messageId collisions - // file-history-snapshot entries sometimes reuse a messageId that collides with - // a real message uuid. Nullify the messageId to prevent confusion. - for entry in &mut lines { - if entry.get("__raw").is_some() { - continue; - } - let is_snapshot = - entry.get("type").and_then(|v| v.as_str()) == Some("file-history-snapshot"); - if !is_snapshot { - continue; - } - if let Some(mid) = entry - .get("messageId") - .and_then(|v| v.as_str()) - .map(String::from) - { - if uuid_set.contains(&mid) { - entry["messageId"] = serde_json::Value::Null; - modified = true; - } - } - } - - // Fix 2: Broken parentUuid references — point to nearest preceding entry's uuid - // Fix 3: Disconnected roots — if >1 entry has parentUuid: null, stitch extras - let mut null_parent_count = 0; - let mut last_uuid: Option = None; - - for entry in &mut lines { - if entry.get("__raw").is_some() { - continue; - } - - let has_parent_uuid_field = entry.get("parentUuid").is_some(); - let parent_uuid_value = entry - .get("parentUuid") - .and_then(|v| v.as_str()) - .map(String::from); - - if has_parent_uuid_field { - match &parent_uuid_value { - Some(pu) if !uuid_set.contains(pu) => { - // Broken reference — point to nearest preceding uuid - if let Some(ref prev) = last_uuid { - entry["parentUuid"] = serde_json::Value::String(prev.clone()); - modified = true; - } - } - None => { - // parentUuid is null — this is a root - null_parent_count += 1; - if null_parent_count > 1 { - // Stitch disconnected root to nearest preceding uuid - if let Some(ref prev) = last_uuid { - entry["parentUuid"] = serde_json::Value::String(prev.clone()); - modified = true; - } - } - } - _ => {} - } - } - - // Track the most recent uuid for stitching - if let Some(uuid) = entry.get("uuid").and_then(|v| v.as_str()) { - last_uuid = Some(uuid.to_string()); - } - } - - if !modified { - return false; - } - - // Write backup - let backup = path.with_extension("jsonl.bak"); - let _ = std::fs::write(&backup, &content); - - // Write repaired file - let mut output = String::new(); - for entry in &lines { - if let Some(raw) = entry.get("__raw").and_then(|v| v.as_str()) { - output.push_str(raw); - } else { - output.push_str(&serde_json::to_string(entry).unwrap_or_default()); - } - output.push('\n'); - } - let _ = std::fs::write(path, &output); - - true -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_helpers::init_and_spawn; - use pu_core::protocol::{Request, Response}; - use tempfile::TempDir; - - #[tokio::test(flavor = "current_thread")] - async fn given_spawned_agent_should_return_attach_handles() { - let (engine, agent_id, _tmp) = init_and_spawn().await; - - let handles = engine.get_attach_handles(&agent_id).await; - assert!( - handles.is_some(), - "expected attach handles for spawned agent" - ); - - let (buffer, _fd, _exit_rx) = handles.unwrap(); - // Buffer exists and has a valid offset - let _ = buffer.current_offset(); - } - - #[tokio::test(flavor = "current_thread")] - async fn given_unknown_agent_should_return_none() { - let engine = Engine::new(); - let handles = engine.get_attach_handles("ag-nonexistent").await; - assert!(handles.is_none()); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] - async fn given_spawn_with_prompt_should_inject_in_background() { - let tmp = TempDir::new().unwrap(); - let project_root = tmp.path().join("project"); - std::fs::create_dir_all(&project_root).unwrap(); - let pr = project_root.to_string_lossy().to_string(); - - let engine = Engine::new(); - engine - .handle_request(Request::Init { - project_root: pr.clone(), - }) - .await; - - let resp = engine - .handle_request(Request::Spawn { - project_root: pr, - prompt: "hello world".into(), - agent: "terminal".into(), - name: None, - base: None, - root: true, - worktree: None, - command: None, - no_auto: false, - extra_args: vec![], - plan_mode: false, - no_trigger: false, - trigger: None, - }) - .await; - - let agent_id = match resp { - Response::SpawnResult { agent_id, .. } => agent_id, - other => panic!("expected SpawnResult, got {other:?}"), - }; - - // The background task should eventually drain pending_initial_inputs. - // Allow up to 5s for the injection to complete (includes readiness - // timeout + chunked write delays). - let deadline = tokio::time::Instant::now() + Duration::from_secs(5); - loop { - let still_pending = engine - .pending_initial_inputs - .lock() - .await - .contains_key(&agent_id); - if !still_pending { - break; - } - if tokio::time::Instant::now() >= deadline { - panic!("prompt was never consumed from pending_initial_inputs"); - } - tokio::time::sleep(Duration::from_millis(50)).await; - } - } - - #[test] - fn given_claude_prompt_should_inject_via_stdin() { - assert!(Engine::should_inject_prompt_via_stdin( - "claude", true, "hello" - )); - } - - #[test] - fn given_codex_prompt_should_not_inject_via_stdin() { - assert!(!Engine::should_inject_prompt_via_stdin( - "codex", true, "hello" - )); - } - - #[test] - fn given_non_interactive_agent_should_not_inject_via_stdin() { - assert!(!Engine::should_inject_prompt_via_stdin( - "terminal", false, "hello" - )); - } - - #[test] - fn given_opencode_without_configured_flag_should_use_prompt_flag() { - assert_eq!( - Engine::resolved_prompt_flag("opencode", None), - Some("--prompt".to_string()) - ); - } - - #[test] - fn given_codex_without_configured_flag_should_use_positional_prompt() { - assert_eq!(Engine::resolved_prompt_flag("codex", None), None); - } - - #[test] - fn given_configured_prompt_flag_should_be_preserved() { - assert_eq!( - Engine::resolved_prompt_flag("codex", Some("--prompt")), - Some("--prompt".to_string()) - ); - } - - #[test] - fn given_claude_build_resume_with_default_launch_args_should_include_yolo() { - // given - let engine = Engine::new(); - let agent_cfg = pu_core::types::AgentConfig { - name: "claude".into(), - command: "claude".into(), - prompt_flag: None, - interactive: true, - launch_args: None, // use defaults - }; - - // when - let (cmd, args, sid) = engine - .build_resume_command("claude", &agent_cfg, Some("sess-123")) - .unwrap(); - - // then - assert_eq!(cmd, "claude"); - assert!(args.contains(&"--dangerously-skip-permissions".to_string())); - assert!(args.contains(&"--resume".to_string())); - assert!(args.contains(&"sess-123".to_string())); - assert_eq!(sid, Some("sess-123".to_string())); - } - - #[test] - fn given_claude_build_resume_with_empty_launch_args_should_omit_yolo() { - // given: user configured launchArgs: [] to disable auto-mode - let engine = Engine::new(); - let agent_cfg = pu_core::types::AgentConfig { - name: "claude".into(), - command: "claude".into(), - prompt_flag: None, - interactive: true, - launch_args: Some(vec![]), - }; - - // when - let (cmd, args, _) = engine - .build_resume_command("claude", &agent_cfg, Some("sess-456")) - .unwrap(); - - // then - assert_eq!(cmd, "claude"); - assert!(!args.contains(&"--dangerously-skip-permissions".to_string())); - assert!(args.contains(&"--resume".to_string())); - } - - #[test] - fn given_codex_build_resume_with_custom_launch_args_should_place_before_subcommand() { - // given - let engine = Engine::new(); - let agent_cfg = pu_core::types::AgentConfig { - name: "codex".into(), - command: "codex".into(), - prompt_flag: None, - interactive: true, - launch_args: Some(vec!["--approval-mode=full-auto".into()]), - }; - - // when - let (cmd, args, _) = engine - .build_resume_command("codex", &agent_cfg, None) - .unwrap(); - - // then — top-level flags must precede the subcommand - assert_eq!(cmd, "codex"); - assert_eq!(args, vec!["--approval-mode=full-auto", "resume", "--last"]); - } - - #[test] - fn given_codex_build_resume_with_defaults_should_place_full_auto_before_subcommand() { - // given - let engine = Engine::new(); - let agent_cfg = pu_core::types::AgentConfig { - name: "codex".into(), - command: "codex".into(), - prompt_flag: None, - interactive: true, - launch_args: None, // use defaults - }; - - // when - let (cmd, args, _) = engine - .build_resume_command("codex", &agent_cfg, None) - .unwrap(); - - // then — --full-auto is a top-level flag, must come before `resume` - assert_eq!(cmd, "codex"); - assert_eq!(args, vec!["--full-auto", "resume", "--last"]); - } - - /// Test the no_auto + launch_args interaction logic used in handle_spawn. - /// When no_auto is true and launch_args is None (defaults), launch args should be empty. - /// When no_auto is true but launch_args is explicitly configured, they should be preserved. - #[test] - fn given_no_auto_with_default_launch_args_should_produce_empty() { - let agent_cfg = pu_core::types::AgentConfig { - name: "claude".into(), - command: "claude".into(), - prompt_flag: None, - interactive: true, - launch_args: None, - }; - let no_auto = true; - let launch_args = if no_auto && agent_cfg.launch_args.is_none() { - Vec::new() - } else { - pu_core::types::resolved_launch_args("claude", agent_cfg.launch_args.as_deref()) - }; - assert!( - launch_args.is_empty(), - "--no-auto should skip default launch args" - ); - } - - #[test] - fn given_no_auto_with_explicit_launch_args_should_preserve_them() { - let agent_cfg = pu_core::types::AgentConfig { - name: "claude".into(), - command: "claude".into(), - prompt_flag: None, - interactive: true, - launch_args: Some(vec!["--verbose".into()]), - }; - let no_auto = true; - let launch_args = if no_auto && agent_cfg.launch_args.is_none() { - Vec::new() - } else { - pu_core::types::resolved_launch_args("claude", agent_cfg.launch_args.as_deref()) - }; - assert_eq!( - launch_args, - vec!["--verbose"], - "--no-auto should not affect explicit launch args" - ); - } - - #[test] - fn given_snapshot_collision_should_nullify_message_id() { - // given: a session file where a file-history-snapshot reuses a real message uuid - let tmp = TempDir::new().unwrap(); - let path = tmp.path().join("session.jsonl"); - let content = [ - r#"{"uuid":"u1","type":"summary","parentUuid":null}"#, - r#"{"uuid":"u2","type":"assistant","message":{"content":"hello"}}"#, - r#"{"type":"file-history-snapshot","messageId":"u2","data":{}}"#, - ] - .join("\n") - + "\n"; - std::fs::write(&path, &content).unwrap(); - - // when - let repaired = repair_session_file(&path); - - // then - assert!(repaired); - let result = std::fs::read_to_string(&path).unwrap(); - let lines: Vec = result - .lines() - .map(|l| serde_json::from_str(l).unwrap()) - .collect(); - // The snapshot's messageId should be null - assert_eq!(lines[2]["messageId"], serde_json::Value::Null); - // Backup should exist - assert!(tmp.path().join("session.jsonl.bak").exists()); - } - - #[test] - fn given_broken_parent_uuid_should_fix_reference() { - // given: a session file where an entry's parentUuid points to a non-existent uuid - let tmp = TempDir::new().unwrap(); - let path = tmp.path().join("session.jsonl"); - let content = [ - r#"{"uuid":"u1","type":"summary","parentUuid":null}"#, - r#"{"uuid":"u2","parentUuid":"u1","type":"assistant"}"#, - r#"{"uuid":"u3","parentUuid":"DOES_NOT_EXIST","type":"assistant"}"#, - ] - .join("\n") - + "\n"; - std::fs::write(&path, &content).unwrap(); - - // when - let repaired = repair_session_file(&path); - - // then - assert!(repaired); - let result = std::fs::read_to_string(&path).unwrap(); - let lines: Vec = result - .lines() - .map(|l| serde_json::from_str(l).unwrap()) - .collect(); - // u3's parentUuid should now point to u2 (nearest preceding) - assert_eq!(lines[2]["parentUuid"], "u2"); - } - - #[test] - fn given_disconnected_root_should_stitch() { - // given: a session file with two entries having parentUuid: null (disconnected roots) - let tmp = TempDir::new().unwrap(); - let path = tmp.path().join("session.jsonl"); - let content = [ - r#"{"uuid":"u1","type":"summary","parentUuid":null}"#, - r#"{"uuid":"u2","parentUuid":"u1","type":"assistant"}"#, - r#"{"uuid":"u3","parentUuid":null,"type":"assistant"}"#, - ] - .join("\n") - + "\n"; - std::fs::write(&path, &content).unwrap(); - - // when - let repaired = repair_session_file(&path); - - // then - assert!(repaired); - let result = std::fs::read_to_string(&path).unwrap(); - let lines: Vec = result - .lines() - .map(|l| serde_json::from_str(l).unwrap()) - .collect(); - // u3's parentUuid should now point to u2 (nearest preceding) - assert_eq!(lines[2]["parentUuid"], "u2"); - } - - // --- build_resume_command tests --- - - fn dummy_agent_cfg(name: &str) -> pu_core::types::AgentConfig { - pu_core::types::AgentConfig { - name: name.into(), - command: name.into(), - prompt_flag: None, - interactive: true, - launch_args: None, - } - } - - #[test] - fn given_claude_resume_should_use_bypass_and_resume() { - let engine = Engine::new(); - let cfg = dummy_agent_cfg("claude"); - let (cmd, args, sid) = engine - .build_resume_command("claude", &cfg, Some("sess-1")) - .unwrap(); - assert_eq!(cmd, "claude"); - assert!(args.contains(&"--dangerously-skip-permissions".to_string())); - assert!(args.contains(&"--resume".to_string())); - assert!(args.contains(&"sess-1".to_string())); - assert_eq!(sid, Some("sess-1".to_string())); - } - - #[test] - fn given_codex_resume_should_use_full_auto() { - let engine = Engine::new(); - let cfg = dummy_agent_cfg("codex"); - let (cmd, args, _) = engine.build_resume_command("codex", &cfg, None).unwrap(); - assert_eq!(cmd, "codex"); - assert!(args.contains(&"--full-auto".to_string())); - assert!(args.contains(&"resume".to_string())); - assert!(args.contains(&"--last".to_string())); - } - - #[test] - fn given_opencode_resume_should_use_continue() { - let engine = Engine::new(); - let cfg = dummy_agent_cfg("opencode"); - let (cmd, args, _) = engine.build_resume_command("opencode", &cfg, None).unwrap(); - assert_eq!(cmd, "opencode"); - assert!(args.contains(&"--continue".to_string())); - assert!(!args.contains(&"--agent".to_string())); - } - - #[test] - fn given_intact_file_should_not_modify() { - // given: a perfectly valid session file - let tmp = TempDir::new().unwrap(); - let path = tmp.path().join("session.jsonl"); - let content = [ - r#"{"uuid":"u1","type":"summary","parentUuid":null}"#, - r#"{"uuid":"u2","parentUuid":"u1","type":"assistant"}"#, - r#"{"uuid":"u3","parentUuid":"u2","type":"assistant"}"#, - ] - .join("\n") - + "\n"; - std::fs::write(&path, &content).unwrap(); - - // when - let repaired = repair_session_file(&path); - - // then: no changes needed - assert!(!repaired); - // No backup file should be created - assert!(!tmp.path().join("session.jsonl.bak").exists()); - } -} diff --git a/crates/pu-engine/src/engine/agent_lifecycle.rs b/crates/pu-engine/src/engine/agent_lifecycle.rs new file mode 100644 index 0000000..e0e6485 --- /dev/null +++ b/crates/pu-engine/src/engine/agent_lifecycle.rs @@ -0,0 +1,516 @@ +use std::collections::HashMap; +use std::path::Path; +use std::time::Duration; + +use pu_core::config; +use pu_core::error::PuError; +use pu_core::manifest; +use pu_core::protocol::{KillTarget, Response, SuspendTarget}; +use pu_core::types::AgentStatus; + +use crate::pty_manager::{AgentHandle, SpawnConfig}; + +use super::Engine; + +impl Engine { + pub(super) async fn handle_kill( + &self, + project_root: &str, + target: KillTarget, + exclude: &[String], + ) -> Response { + let m = match self.read_manifest_async(project_root).await { + Ok(m) => m, + Err(e) => return Self::error_response(&e), + }; + + let all_ids: Vec = match &target { + KillTarget::Agent(id) => vec![id.clone()], + KillTarget::Worktree(wt_id) => match m.worktrees.get(wt_id) { + Some(wt) => wt.agents.keys().cloned().collect(), + None => { + return Response::Error { + code: "WORKTREE_NOT_FOUND".into(), + message: format!("worktree {wt_id} not found"), + }; + } + }, + KillTarget::All => { + let mut ids: Vec = m.agents.keys().cloned().collect(); + for wt in m.worktrees.values() { + ids.extend(wt.agents.keys().cloned()); + } + ids + } + KillTarget::AllWorktrees => { + let mut ids: Vec = Vec::new(); + for wt in m.worktrees.values() { + ids.extend(wt.agents.keys().cloned()); + } + ids + } + }; + + // Apply exclusions (self-protection + root-protection) + let (agent_ids, skipped): (Vec, Vec) = + all_ids.into_iter().partition(|id| !exclude.contains(id)); + + // Kill agents: remove pending inputs, extract handles, kill PTY processes. + let handles_killed = self.kill_agents(&agent_ids).await; + let exit_codes: HashMap> = handles_killed + .iter() + .map(|(id, handle)| (id.clone(), *handle.exit_rx.borrow())) + .collect(); + + // Update manifest: remove all targeted agents (off async runtime) + let killed = agent_ids.clone(); + let pr = project_root.to_string(); + let killed_for_manifest = killed.clone(); + tokio::task::spawn_blocking(move || { + manifest::update_manifest(Path::new(&pr), move |mut m| { + for id in &killed_for_manifest { + m.agents.shift_remove(id); + for wt in m.worktrees.values_mut() { + wt.agents.shift_remove(id); + } + } + m + }) + .ok(); + }) + .await + .ok(); + + self.notify_status_change(project_root).await; + + Response::KillResult { + killed, + exit_codes, + skipped, + } + } + + pub(super) async fn handle_rename( + &self, + project_root: &str, + agent_id: &str, + name: &str, + ) -> Response { + let pr = project_root.to_string(); + let aid = agent_id.to_string(); + let new_name = name.to_string(); + let new_name2 = new_name.clone(); + + let result = tokio::task::spawn_blocking(move || { + manifest::update_manifest(Path::new(&pr), |mut m| { + if let Some(agent) = m.find_agent_mut(&aid) { + agent.name = new_name.clone(); + } + m + }) + }) + .await; + + match result { + Ok(Ok(updated)) => { + let found = updated.find_agent(agent_id).is_some(); + if found { + self.notify_status_change(project_root).await; + Response::RenameResult { + agent_id: agent_id.to_string(), + name: new_name2, + } + } else { + Self::agent_not_found(agent_id) + } + } + Ok(Err(e)) => Self::error_response(&e), + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("rename task failed: {e}"), + }, + } + } + + pub(super) async fn handle_assign_trigger( + &self, + project_root: &str, + agent_id: &str, + trigger_name: &str, + ) -> Response { + let pr = project_root.to_string(); + let tn = trigger_name.to_string(); + + let trigger = tokio::task::spawn_blocking(move || { + pu_core::trigger_def::triggers_for_event( + Path::new(&pr), + &pu_core::trigger_def::TriggerEvent::AgentIdle, + ) + .into_iter() + .find(|t| t.name == tn) + }) + .await; + + let trigger = match trigger { + Ok(Some(t)) => t, + Ok(None) => { + return Response::Error { + code: "NOT_FOUND".into(), + message: format!("trigger '{trigger_name}' not found"), + }; + } + Err(e) => { + return Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("trigger lookup failed: {e}"), + }; + } + }; + + let sequence_len = trigger.sequence.len() as u32; + if sequence_len == 0 { + return Response::Error { + code: "INVALID_TRIGGER".into(), + message: format!("trigger '{trigger_name}' has empty sequence"), + }; + } + + // Verify the agent exists in the manifest before assigning + let pr_check = project_root.to_string(); + let aid_check = agent_id.to_string(); + let agent_exists = tokio::task::spawn_blocking(move || { + manifest::read_manifest(Path::new(&pr_check)) + .map(|m| m.find_agent(&aid_check).is_some()) + }) + .await; + + match agent_exists { + Ok(Ok(false)) | Ok(Err(_)) => { + return Response::Error { + code: "NOT_FOUND".into(), + message: format!("agent '{agent_id}' not found"), + }; + } + Err(e) => { + return Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("agent lookup failed: {e}"), + }; + } + Ok(Ok(true)) => {} // proceed + } + + let pr2 = project_root.to_string(); + let aid2 = agent_id.to_string(); + let tn2 = trigger_name.to_string(); + let result = tokio::task::spawn_blocking(move || { + manifest::update_manifest(Path::new(&pr2), |mut m| { + if let Some(agent) = m.find_agent_mut(&aid2) { + agent.trigger_name = Some(tn2.clone()); + agent.trigger_state = Some(pu_core::types::TriggerState::Active); + agent.trigger_seq_index = Some(0); + agent.trigger_total = Some(sequence_len); + agent.gate_attempts = Some(0); + } + m + }) + }) + .await; + + match result { + Ok(Ok(_)) => { + self.notify_status_change(project_root).await; + Response::AssignTriggerResult { + agent_id: agent_id.to_string(), + trigger_name: trigger_name.to_string(), + sequence_len, + } + } + Ok(Err(e)) => Self::error_response(&e), + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("assign trigger task failed: {e}"), + }, + } + } + + pub(super) async fn handle_suspend( + &self, + project_root: &str, + target: SuspendTarget, + ) -> Response { + let m = match self.read_manifest_async(project_root).await { + Ok(m) => m, + Err(e) => return Self::error_response(&e), + }; + + // Collect suspendable agents — must be alive and not already suspended. + let agent_ids: Vec = match &target { + SuspendTarget::Agent(id) => match m.find_agent(id) { + Some(loc) => { + let agent = match loc { + pu_core::types::AgentLocation::Root(a) => a, + pu_core::types::AgentLocation::Worktree { agent, .. } => agent, + }; + if !agent.status.is_alive() || agent.suspended { + return Response::SuspendResult { suspended: vec![] }; + } + vec![id.clone()] + } + None => return Self::agent_not_found(id), + }, + SuspendTarget::All => m + .all_agents() + .into_iter() + .filter(|a| a.status.is_alive() && !a.suspended) + .map(|a| a.id.clone()) + .collect(), + }; + + if agent_ids.is_empty() { + return Response::SuspendResult { suspended: vec![] }; + } + + self.kill_agents(&agent_ids).await; + + // Update manifest: mark as suspended, clear pid, set suspended_at. + // Status stays as-is (Waiting); suspended flag is metadata. + let suspended = agent_ids.clone(); + let pr = project_root.to_string(); + let suspended_for_manifest = suspended.clone(); + tokio::task::spawn_blocking(move || { + manifest::update_manifest(Path::new(&pr), move |mut m| { + let now = chrono::Utc::now(); + for id in &suspended_for_manifest { + if let Some(agent) = m.find_agent_mut(id) { + agent.status = AgentStatus::Waiting; + agent.suspended = true; + agent.pid = None; + agent.suspended_at = Some(now); + } + } + m + }) + .ok(); + }) + .await + .ok(); + + self.notify_status_change(project_root).await; + + Response::SuspendResult { suspended } + } + + pub(super) async fn handle_resume(&self, project_root: &str, agent_id: &str) -> Response { + let root_path = Path::new(project_root); + + // 1. Read manifest, find the suspended agent + let m = match self.read_manifest_async(project_root).await { + Ok(m) => m, + Err(e) => return Self::error_response(&e), + }; + + let (agent_entry, _worktree_id, cwd) = match m.find_agent(agent_id) { + Some(pu_core::types::AgentLocation::Root(a)) => { + (a.clone(), None::, project_root.to_string()) + } + Some(pu_core::types::AgentLocation::Worktree { worktree, agent }) => ( + agent.clone(), + Some(worktree.id.clone()), + worktree.path.clone(), + ), + None => return Self::agent_not_found(agent_id), + }; + + if !agent_entry.suspended { + return Response::Error { + code: "INVALID_STATE".into(), + message: "agent is not suspended".into(), + }; + } + + // 2. Load agent config + let cfg = match config::load_config_strict(root_path) { + Ok(c) => c, + Err(e) => { + return Response::Error { + code: "CONFIG_ERROR".into(), + message: format!("failed to load config: {e}"), + }; + } + }; + let agent_cfg = match config::resolve_agent(&cfg, &agent_entry.agent_type) { + Some(c) => c.clone(), + None => { + return Response::Error { + code: "INVALID_ARGUMENT".into(), + message: format!("unknown agent type: {}", agent_entry.agent_type), + }; + } + }; + + // 3. Repair corrupted session files before resume (claude-code#24304) + if let Some(ref sid) = agent_entry.session_id { + let cwd_clone = cwd.clone(); + let sid_clone = sid.clone(); + tokio::task::spawn_blocking(move || { + Self::repair_session_files(&cwd_clone, &sid_clone); + }) + .await + .ok(); + } + let effective_session_id = agent_entry.session_id.clone(); + + // 4. Construct resume command based on agent type + let (command, args, session_id) = match self.build_resume_command( + &agent_entry.agent_type, + &agent_cfg, + effective_session_id.as_deref(), + ) { + Ok(result) => result, + Err(response) => return response, + }; + + // 5. Spawn PTY process + let mut env = self.agent_env().await; + env.push(("PU_AGENT_ID".into(), agent_id.to_string())); + let spawn_config = SpawnConfig { + command, + args, + cwd, + env, + env_remove: vec!["CLAUDECODE".into()], + cols: 120, + rows: 40, + }; + + let handle = match self.pty_host.spawn(spawn_config).await { + Ok(h) => h, + Err(e) => { + return Response::Error { + code: "RESUME_FAILED".into(), + message: format!("failed to spawn process: {e}"), + }; + } + }; + + let pid = handle.pid; + + // Store handle in session map BEFORE writing manifest. + // ManifestWatcher in Swift fires on manifest write and immediately + // tries to attach — the session must already be in the map. + self.sessions + .lock() + .await + .insert(agent_id.to_string(), handle); + + // 6. Update manifest: Suspended → Running, new PID + let aid = agent_id.to_string(); + let sid = session_id.clone(); + let pr = project_root.to_string(); + let manifest_result = tokio::task::spawn_blocking(move || { + manifest::update_manifest(Path::new(&pr), move |mut m| { + if let Some(agent) = m.find_agent_mut(&aid) { + agent.status = AgentStatus::Streaming; + agent.suspended = false; + agent.pid = Some(pid); + agent.completed_at = None; + agent.suspended_at = None; + if let Some(ref s) = sid { + agent.session_id = Some(s.clone()); + } + } + m + }) + }) + .await + .unwrap_or_else(|e| Err(PuError::Io(std::io::Error::other(e)))); + + if let Err(e) = manifest_result { + // Rollback: remove session and kill process + if let Some(handle) = self.sessions.lock().await.remove(agent_id) { + self.pty_host + .kill(&handle, Duration::from_secs(2)) + .await + .ok(); + } + return Response::Error { + code: "RESUME_FAILED".into(), + message: format!("failed to update manifest: {e}"), + }; + } + + self.notify_status_change(project_root).await; + + Response::ResumeResult { + agent_id: agent_id.to_string(), + status: AgentStatus::Streaming, + } + } + + /// Construct the resume command for a given agent type. + /// Returns Ok((command, args, session_id)) or Err(Response) on failure. + #[allow(clippy::result_large_err)] + pub(super) fn build_resume_command( + &self, + agent_type: &str, + agent_cfg: &pu_core::types::AgentConfig, + session_id: Option<&str>, + ) -> Result<(String, Vec, Option), Response> { + let launch_args = + pu_core::types::resolved_launch_args(agent_type, agent_cfg.launch_args.as_deref()); + match agent_type { + "claude" => { + let sid = session_id.ok_or_else(|| Response::Error { + code: "RESUME_FAILED".into(), + message: "cannot resume Claude agent: no session_id preserved".into(), + })?; + let mut args = launch_args; + args.push("--resume".into()); + args.push(sid.to_string()); + Ok(("claude".into(), args, Some(sid.to_string()))) + } + "codex" => { + // Top-level flags (e.g. --full-auto) must precede the subcommand + let mut args = launch_args; + args.push("resume".into()); + args.push("--last".into()); + Ok(("codex".into(), args, None)) + } + "opencode" => { + let mut args = vec!["--continue".into()]; + args.extend(launch_args); + Ok(("opencode".into(), args, None)) + } + _ => { + // Terminal / unknown: fresh shell in same directory + let (command, args) = Self::parse_agent_command(agent_cfg, agent_type)?; + Ok((command, args, None)) + } + } + } + + /// Remove pending inputs and session handles for the given agent IDs, then kill their + /// PTY processes. Returns the extracted handles (for callers that need exit codes). + pub(super) async fn kill_agents(&self, agent_ids: &[String]) -> Vec<(String, AgentHandle)> { + { + let mut pending_inputs = self.pending_initial_inputs.lock().await; + for id in agent_ids { + pending_inputs.remove(id); + } + } + let handles: Vec<(String, AgentHandle)> = { + let mut sessions = self.sessions.lock().await; + agent_ids + .iter() + .filter_map(|id| sessions.remove(id).map(|h| (id.clone(), h))) + .collect() + }; + for (id, handle) in &handles { + if let Err(e) = self.pty_host.kill(handle, Duration::from_secs(5)).await { + tracing::debug!(agent_id = id, "kill failed: {e}"); + } + } + handles + } +} diff --git a/crates/pu-engine/src/engine/definitions/agent_defs.rs b/crates/pu-engine/src/engine/definitions/agent_defs.rs new file mode 100644 index 0000000..f48f942 --- /dev/null +++ b/crates/pu-engine/src/engine/definitions/agent_defs.rs @@ -0,0 +1,163 @@ +use std::path::Path; + +use pu_core::paths; +use pu_core::protocol::{AgentDefInfo, Response}; + +use super::super::Engine; + +impl Engine { + pub(in crate::engine) async fn handle_list_agent_defs(&self, project_root: &str) -> Response { + let pr = project_root.to_string(); + match tokio::task::spawn_blocking(move || { + let root = Path::new(&pr); + let defs = pu_core::agent_def::list_agent_defs(root); + let infos: Vec = defs + .into_iter() + .map(|d| AgentDefInfo { + name: d.name, + agent_type: d.agent_type, + template: d.template, + inline_prompt: d.inline_prompt, + tags: d.tags, + scope: d.scope, + available_in_command_dialog: d.available_in_command_dialog, + icon: d.icon, + command: d.command, + }) + .collect(); + infos + }) + .await + { + Ok(agent_defs) => Response::AgentDefList { agent_defs }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + pub(in crate::engine) async fn handle_get_agent_def( + &self, + project_root: &str, + name: &str, + ) -> Response { + let pr = project_root.to_string(); + let n = name.to_string(); + match tokio::task::spawn_blocking(move || { + pu_core::agent_def::find_agent_def(Path::new(&pr), &n) + }) + .await + { + Ok(Some(d)) => Response::AgentDefDetail { + name: d.name, + agent_type: d.agent_type, + template: d.template, + inline_prompt: d.inline_prompt, + tags: d.tags, + scope: d.scope, + available_in_command_dialog: d.available_in_command_dialog, + icon: d.icon, + command: d.command, + }, + Ok(None) => Response::Error { + code: "NOT_FOUND".into(), + message: format!("agent def '{name}' not found"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + #[allow(clippy::too_many_arguments)] + pub(in crate::engine) async fn handle_save_agent_def( + &self, + project_root: &str, + name: &str, + agent_type: &str, + template: Option, + inline_prompt: Option, + tags: Vec, + scope: &str, + available_in_command_dialog: bool, + icon: Option, + command: Option, + ) -> Response { + let dir = match Self::resolve_scope_dir( + project_root, + scope, + paths::agents_dir, + paths::global_agents_dir, + ) { + Ok(d) => d, + Err(msg) => { + return Response::Error { + code: "IO_ERROR".into(), + message: msg, + }; + } + }; + let def = pu_core::agent_def::AgentDef { + name: name.to_string(), + agent_type: agent_type.to_string(), + template, + inline_prompt, + tags, + scope: scope.to_string(), + available_in_command_dialog, + icon, + command, + }; + match tokio::task::spawn_blocking(move || pu_core::agent_def::save_agent_def(&dir, &def)) + .await + { + Ok(Ok(())) => Response::Ok, + Ok(Err(e)) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to save agent def: {e}"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + pub(in crate::engine) async fn handle_delete_agent_def( + &self, + project_root: &str, + name: &str, + scope: &str, + ) -> Response { + let dir = match Self::resolve_scope_dir( + project_root, + scope, + paths::agents_dir, + paths::global_agents_dir, + ) { + Ok(d) => d, + Err(msg) => { + return Response::Error { + code: "IO_ERROR".into(), + message: msg, + }; + } + }; + let n = name.to_string(); + match tokio::task::spawn_blocking(move || pu_core::agent_def::delete_agent_def(&dir, &n)) + .await + { + Ok(Ok(_)) => Response::Ok, + Ok(Err(e)) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to delete agent def: {e}"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } +} diff --git a/crates/pu-engine/src/engine/definitions/mod.rs b/crates/pu-engine/src/engine/definitions/mod.rs new file mode 100644 index 0000000..8b54272 --- /dev/null +++ b/crates/pu-engine/src/engine/definitions/mod.rs @@ -0,0 +1,5 @@ +mod agent_defs; +mod schedules; +mod swarm_defs; +mod templates; +mod triggers; diff --git a/crates/pu-engine/src/engine/definitions/schedules.rs b/crates/pu-engine/src/engine/definitions/schedules.rs new file mode 100644 index 0000000..5df9658 --- /dev/null +++ b/crates/pu-engine/src/engine/definitions/schedules.rs @@ -0,0 +1,345 @@ +use std::path::Path; + +use pu_core::paths; +use pu_core::protocol::{Response, ScheduleInfo, ScheduleTriggerPayload}; + +use super::super::Engine; + +impl Engine { + pub(in crate::engine) async fn handle_list_schedules(&self, project_root: &str) -> Response { + let pr = project_root.to_string(); + match tokio::task::spawn_blocking(move || { + let root = Path::new(&pr); + let defs = pu_core::schedule_def::list_schedule_defs(root); + let infos: Vec = + defs.into_iter().map(Self::schedule_def_to_info).collect(); + infos + }) + .await + { + Ok(schedules) => Response::ScheduleList { schedules }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + pub(in crate::engine) async fn handle_get_schedule( + &self, + project_root: &str, + name: &str, + ) -> Response { + let pr = project_root.to_string(); + let n = name.to_string(); + match tokio::task::spawn_blocking(move || { + pu_core::schedule_def::find_schedule_def(Path::new(&pr), &n) + }) + .await + { + Ok(Some(d)) => Self::schedule_def_to_detail(d), + Ok(None) => Response::Error { + code: "NOT_FOUND".into(), + message: format!("schedule '{name}' not found"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + #[allow(clippy::too_many_arguments)] + pub(in crate::engine) async fn handle_save_schedule( + &self, + project_root: &str, + name: &str, + enabled: bool, + recurrence: &str, + start_at: chrono::DateTime, + trigger: ScheduleTriggerPayload, + target: &str, + scope: &str, + root: bool, + agent_name: Option, + ) -> Response { + let dir = match Self::resolve_scope_dir( + project_root, + scope, + paths::schedules_dir, + paths::global_schedules_dir, + ) { + Ok(d) => d, + Err(msg) => { + return Response::Error { + code: "IO_ERROR".into(), + message: msg, + }; + } + }; + let rec = match Self::parse_recurrence(recurrence) { + Ok(r) => r, + Err(msg) => { + return Response::Error { + code: "INVALID_INPUT".into(), + message: msg, + }; + } + }; + let now = chrono::Utc::now(); + let next_run = if enabled { + pu_core::schedule_def::next_occurrence(start_at, &rec, now) + } else { + None + }; + let def = pu_core::schedule_def::ScheduleDef { + name: name.to_string(), + enabled, + recurrence: rec, + start_at, + next_run, + trigger: Self::payload_to_trigger(&trigger), + project_root: project_root.to_string(), + target: target.to_string(), + root, + agent_name, + scope: scope.to_string(), + created_at: now, + }; + match tokio::task::spawn_blocking(move || { + pu_core::schedule_def::save_schedule_def(&dir, &def) + }) + .await + { + Ok(Ok(())) => Response::Ok, + Ok(Err(e)) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to save schedule: {e}"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + pub(in crate::engine) async fn handle_delete_schedule( + &self, + project_root: &str, + name: &str, + scope: &str, + ) -> Response { + let dir = match Self::resolve_scope_dir( + project_root, + scope, + paths::schedules_dir, + paths::global_schedules_dir, + ) { + Ok(d) => d, + Err(msg) => { + return Response::Error { + code: "IO_ERROR".into(), + message: msg, + }; + } + }; + let n = name.to_string(); + match tokio::task::spawn_blocking(move || { + pu_core::schedule_def::delete_schedule_def(&dir, &n) + }) + .await + { + Ok(Ok(_)) => Response::Ok, + Ok(Err(e)) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to delete schedule: {e}"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + pub(in crate::engine) async fn handle_enable_schedule( + &self, + project_root: &str, + name: &str, + ) -> Response { + let pr = project_root.to_string(); + let n = name.to_string(); + tokio::task::spawn_blocking(move || { + let root = Path::new(&pr); + let mut def = match pu_core::schedule_def::find_schedule_def(root, &n) { + Some(d) => d, + None => { + return Response::Error { + code: "NOT_FOUND".into(), + message: format!("schedule '{n}' not found"), + }; + } + }; + def.enabled = true; + let now = chrono::Utc::now(); + def.next_run = + pu_core::schedule_def::next_occurrence(def.start_at, &def.recurrence, now); + let dir = paths::schedules_dir(root); + match pu_core::schedule_def::save_schedule_def(&dir, &def) { + Ok(()) => Response::Ok, + Err(e) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to save schedule: {e}"), + }, + } + }) + .await + .unwrap_or_else(|e| Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }) + } + + pub(in crate::engine) async fn handle_disable_schedule( + &self, + project_root: &str, + name: &str, + ) -> Response { + let pr = project_root.to_string(); + let n = name.to_string(); + tokio::task::spawn_blocking(move || { + let root = Path::new(&pr); + let mut def = match pu_core::schedule_def::find_schedule_def(root, &n) { + Some(d) => d, + None => { + return Response::Error { + code: "NOT_FOUND".into(), + message: format!("schedule '{n}' not found"), + }; + } + }; + def.enabled = false; + def.next_run = None; + let dir = paths::schedules_dir(root); + match pu_core::schedule_def::save_schedule_def(&dir, &def) { + Ok(()) => Response::Ok, + Err(e) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to save schedule: {e}"), + }, + } + }) + .await + .unwrap_or_else(|e| Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }) + } + + pub(in crate::engine) fn schedule_def_to_info( + d: pu_core::schedule_def::ScheduleDef, + ) -> ScheduleInfo { + ScheduleInfo { + name: d.name, + enabled: d.enabled, + recurrence: Self::recurrence_to_string(&d.recurrence), + start_at: d.start_at, + next_run: d.next_run, + trigger: Self::trigger_to_payload(&d.trigger), + project_root: d.project_root, + target: d.target, + scope: d.scope, + root: d.root, + agent_name: d.agent_name, + created_at: d.created_at, + } + } + + pub(in crate::engine) fn schedule_def_to_detail( + d: pu_core::schedule_def::ScheduleDef, + ) -> Response { + Response::ScheduleDetail { + name: d.name, + enabled: d.enabled, + recurrence: Self::recurrence_to_string(&d.recurrence), + start_at: d.start_at, + next_run: d.next_run, + trigger: Self::trigger_to_payload(&d.trigger), + project_root: d.project_root, + target: d.target, + scope: d.scope, + root: d.root, + agent_name: d.agent_name, + created_at: d.created_at, + } + } + + pub(in crate::engine) fn recurrence_to_string(r: &pu_core::schedule_def::Recurrence) -> String { + match r { + pu_core::schedule_def::Recurrence::None => "none", + pu_core::schedule_def::Recurrence::Hourly => "hourly", + pu_core::schedule_def::Recurrence::Daily => "daily", + pu_core::schedule_def::Recurrence::Weekdays => "weekdays", + pu_core::schedule_def::Recurrence::Weekly => "weekly", + pu_core::schedule_def::Recurrence::Monthly => "monthly", + } + .to_string() + } + + pub(in crate::engine) fn parse_recurrence( + s: &str, + ) -> Result { + match s { + "none" => Ok(pu_core::schedule_def::Recurrence::None), + "hourly" => Ok(pu_core::schedule_def::Recurrence::Hourly), + "daily" => Ok(pu_core::schedule_def::Recurrence::Daily), + "weekdays" => Ok(pu_core::schedule_def::Recurrence::Weekdays), + "weekly" => Ok(pu_core::schedule_def::Recurrence::Weekly), + "monthly" => Ok(pu_core::schedule_def::Recurrence::Monthly), + other => Err(format!("unknown recurrence: {other}")), + } + } + + pub(in crate::engine) fn trigger_to_payload( + t: &pu_core::schedule_def::ScheduleTrigger, + ) -> ScheduleTriggerPayload { + match t { + pu_core::schedule_def::ScheduleTrigger::AgentDef { name } => { + ScheduleTriggerPayload::AgentDef { name: name.clone() } + } + pu_core::schedule_def::ScheduleTrigger::SwarmDef { name, vars } => { + ScheduleTriggerPayload::SwarmDef { + name: name.clone(), + vars: vars.clone(), + } + } + pu_core::schedule_def::ScheduleTrigger::InlinePrompt { prompt, agent } => { + ScheduleTriggerPayload::InlinePrompt { + prompt: prompt.clone(), + agent: agent.clone(), + } + } + } + } + + pub(in crate::engine) fn payload_to_trigger( + p: &ScheduleTriggerPayload, + ) -> pu_core::schedule_def::ScheduleTrigger { + match p { + ScheduleTriggerPayload::AgentDef { name } => { + pu_core::schedule_def::ScheduleTrigger::AgentDef { name: name.clone() } + } + ScheduleTriggerPayload::SwarmDef { name, vars } => { + pu_core::schedule_def::ScheduleTrigger::SwarmDef { + name: name.clone(), + vars: vars.clone(), + } + } + ScheduleTriggerPayload::InlinePrompt { prompt, agent } => { + pu_core::schedule_def::ScheduleTrigger::InlinePrompt { + prompt: prompt.clone(), + agent: agent.clone(), + } + } + } + } +} diff --git a/crates/pu-engine/src/engine/definitions/swarm_defs.rs b/crates/pu-engine/src/engine/definitions/swarm_defs.rs new file mode 100644 index 0000000..0e114e9 --- /dev/null +++ b/crates/pu-engine/src/engine/definitions/swarm_defs.rs @@ -0,0 +1,370 @@ +use std::path::Path; + +use pu_core::paths; +use pu_core::protocol::{Response, SwarmDefInfo, SwarmRosterEntryPayload}; + +use super::super::{Engine, SpawnParams}; + +impl Engine { + pub(in crate::engine) async fn handle_list_swarm_defs(&self, project_root: &str) -> Response { + let pr = project_root.to_string(); + match tokio::task::spawn_blocking(move || { + let root = Path::new(&pr); + let defs = pu_core::swarm_def::list_swarm_defs(root); + let infos: Vec = defs + .into_iter() + .map(|d| SwarmDefInfo { + name: d.name, + worktree_count: d.worktree_count, + worktree_template: d.worktree_template, + roster: d + .roster + .into_iter() + .map(|r| SwarmRosterEntryPayload { + agent_def: r.agent_def, + role: r.role, + quantity: r.quantity, + }) + .collect(), + include_terminal: d.include_terminal, + scope: d.scope, + }) + .collect(); + infos + }) + .await + { + Ok(swarm_defs) => Response::SwarmDefList { swarm_defs }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + pub(in crate::engine) async fn handle_get_swarm_def( + &self, + project_root: &str, + name: &str, + ) -> Response { + let pr = project_root.to_string(); + let n = name.to_string(); + match tokio::task::spawn_blocking(move || { + pu_core::swarm_def::find_swarm_def(Path::new(&pr), &n) + }) + .await + { + Ok(Some(d)) => Response::SwarmDefDetail { + name: d.name, + worktree_count: d.worktree_count, + worktree_template: d.worktree_template, + roster: d + .roster + .into_iter() + .map(|r| SwarmRosterEntryPayload { + agent_def: r.agent_def, + role: r.role, + quantity: r.quantity, + }) + .collect(), + include_terminal: d.include_terminal, + scope: d.scope, + }, + Ok(None) => Response::Error { + code: "NOT_FOUND".into(), + message: format!("swarm def '{name}' not found"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + #[allow(clippy::too_many_arguments)] + pub(in crate::engine) async fn handle_save_swarm_def( + &self, + project_root: &str, + name: &str, + worktree_count: u32, + worktree_template: &str, + roster: Vec, + include_terminal: bool, + scope: &str, + ) -> Response { + let dir = match Self::resolve_scope_dir( + project_root, + scope, + paths::swarms_dir, + paths::global_swarms_dir, + ) { + Ok(d) => d, + Err(msg) => { + return Response::Error { + code: "IO_ERROR".into(), + message: msg, + }; + } + }; + let def = pu_core::swarm_def::SwarmDef { + name: name.to_string(), + worktree_count, + worktree_template: worktree_template.to_string(), + roster: roster + .into_iter() + .map(|r| pu_core::swarm_def::SwarmRosterEntry { + agent_def: r.agent_def, + role: r.role, + quantity: r.quantity, + }) + .collect(), + include_terminal, + scope: scope.to_string(), + }; + match tokio::task::spawn_blocking(move || pu_core::swarm_def::save_swarm_def(&dir, &def)) + .await + { + Ok(Ok(())) => Response::Ok, + Ok(Err(e)) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to save swarm def: {e}"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + pub(in crate::engine) async fn handle_delete_swarm_def( + &self, + project_root: &str, + name: &str, + scope: &str, + ) -> Response { + let dir = match Self::resolve_scope_dir( + project_root, + scope, + paths::swarms_dir, + paths::global_swarms_dir, + ) { + Ok(d) => d, + Err(msg) => { + return Response::Error { + code: "IO_ERROR".into(), + message: msg, + }; + } + }; + let n = name.to_string(); + match tokio::task::spawn_blocking(move || pu_core::swarm_def::delete_swarm_def(&dir, &n)) + .await + { + Ok(Ok(_)) => Response::Ok, + Ok(Err(e)) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to delete swarm def: {e}"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + // --- RunSwarm handler --- + + pub(in crate::engine) async fn handle_run_swarm( + &self, + project_root: &str, + swarm_name: &str, + vars: std::collections::HashMap, + ) -> Response { + // Read the swarm definition + let pr = project_root.to_string(); + let sn = swarm_name.to_string(); + let swarm_def = match tokio::task::spawn_blocking(move || { + pu_core::swarm_def::find_swarm_def(Path::new(&pr), &sn) + }) + .await + { + Ok(Some(def)) => def, + Ok(None) => { + return Response::Error { + code: "NOT_FOUND".into(), + message: format!("swarm def '{swarm_name}' not found"), + }; + } + Err(e) => { + return Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }; + } + }; + + // Pre-resolve all agent defs and their prompts once, before iterating worktrees. + let mut resolved_roster: Vec<(pu_core::agent_def::AgentDef, String, Option, u32)> = + Vec::new(); + for entry in &swarm_def.roster { + let pr2 = project_root.to_string(); + let ad_name = entry.agent_def.clone(); + let agent_def = match tokio::task::spawn_blocking(move || { + pu_core::agent_def::find_agent_def(Path::new(&pr2), &ad_name) + }) + .await + { + Ok(Some(def)) => def, + Ok(None) => { + return Response::Error { + code: "NOT_FOUND".into(), + message: format!( + "agent def '{}' referenced by swarm not found", + entry.agent_def + ), + }; + } + Err(e) => { + return Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }; + } + }; + + let (prompt, template_command) = if let Some(ref tpl_name) = agent_def.template { + let pr3 = project_root.to_string(); + let tn = tpl_name.clone(); + let vars_clone = vars.clone(); + match tokio::task::spawn_blocking(move || { + pu_core::template::find_template(Path::new(&pr3), &tn) + }) + .await + { + Ok(Some(tpl)) => { + let rendered = pu_core::template::render(&tpl, &vars_clone); + let cmd = pu_core::template::render_command(&tpl, &vars_clone); + (rendered, cmd) + } + Ok(None) => { + return Response::Error { + code: "NOT_FOUND".into(), + message: format!("template '{tpl_name}' not found"), + }; + } + Err(e) => { + return Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }; + } + } + } else { + (agent_def.inline_prompt.clone().unwrap_or_default(), None) + }; + + resolved_roster.push((agent_def, prompt, template_command, entry.quantity)); + } + + let mut spawned_agents = Vec::new(); + + for wt_index in 0..swarm_def.worktree_count { + let wt_name = if swarm_def.worktree_template.is_empty() { + format!("{swarm_name}-{wt_index}") + } else { + swarm_def + .worktree_template + .replace("{index}", &wt_index.to_string()) + }; + + let mut worktree_id: Option = None; + + for (agent_def, prompt, template_command, quantity) in &resolved_roster { + for q in 0..*quantity { + let agent_name = format!("{}-{}-{wt_index}-{q}", swarm_name, agent_def.name); + + // First agent creates the worktree; subsequent agents reuse it + let (spawn_name, spawn_worktree) = if worktree_id.is_some() { + (Some(agent_name), worktree_id.clone()) + } else { + (Some(wt_name.clone()), None) + }; + + // Agent def command takes precedence, then template command + let resolved_command = agent_def + .command + .clone() + .or_else(|| template_command.clone()); + + let resp = self + .handle_spawn(SpawnParams { + project_root: project_root.to_string(), + prompt: prompt.to_string(), + agent_type: agent_def.agent_type.clone(), + name: spawn_name, + base: None, + root: false, + worktree: spawn_worktree, + terminal_command: resolved_command, + no_auto: false, + extra_args: vec![], + plan_mode: false, + no_trigger: false, + trigger: None, + }) + .await; + + match resp { + Response::SpawnResult { + agent_id, + worktree_id: wt_id, + .. + } => { + spawned_agents.push(agent_id); + if worktree_id.is_none() { + worktree_id = wt_id; + } + } + Response::Error { code, message } => { + return Response::RunSwarmPartial { + spawned_agents, + error_code: code, + error_message: message, + }; + } + _ => {} + } + } + } + + // If include_terminal is set, spawn a bare terminal into this worktree + if swarm_def.include_terminal { + if let Some(ref wt_id) = worktree_id { + let term_name = format!("{swarm_name}-terminal-{wt_index}"); + let resp = self + .handle_spawn(SpawnParams { + project_root: project_root.to_string(), + prompt: String::new(), + agent_type: "terminal".into(), + name: Some(term_name), + base: None, + root: false, + worktree: Some(wt_id.clone()), + terminal_command: None, + no_auto: false, + extra_args: vec![], + plan_mode: false, + no_trigger: false, + trigger: None, + }) + .await; + if let Response::SpawnResult { agent_id, .. } = resp { + spawned_agents.push(agent_id); + } + } + } + } + + Response::RunSwarmResult { spawned_agents } + } +} diff --git a/crates/pu-engine/src/engine/definitions/templates.rs b/crates/pu-engine/src/engine/definitions/templates.rs new file mode 100644 index 0000000..4b9694f --- /dev/null +++ b/crates/pu-engine/src/engine/definitions/templates.rs @@ -0,0 +1,151 @@ +use std::path::Path; + +use pu_core::paths; +use pu_core::protocol::{Response, TemplateInfo}; + +use super::super::Engine; + +impl Engine { + pub(in crate::engine) async fn handle_list_templates(&self, project_root: &str) -> Response { + let pr = project_root.to_string(); + match tokio::task::spawn_blocking(move || { + let root = Path::new(&pr); + let templates = pu_core::template::list_templates(root); + let infos: Vec = templates + .into_iter() + .map(|t| TemplateInfo { + name: t.name, + description: t.description, + agent: t.agent, + source: t.source, + variables: pu_core::template::extract_variables(&t.body), + command: t.command, + }) + .collect(); + infos + }) + .await + { + Ok(templates) => Response::TemplateList { templates }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + pub(in crate::engine) async fn handle_get_template( + &self, + project_root: &str, + name: &str, + ) -> Response { + let pr = project_root.to_string(); + let tpl_name = name.to_string(); + match tokio::task::spawn_blocking(move || { + let root = Path::new(&pr); + pu_core::template::find_template(root, &tpl_name) + }) + .await + { + Ok(Some(t)) => Response::TemplateDetail { + name: t.name, + description: t.description, + agent: t.agent, + variables: pu_core::template::extract_variables(&t.body), + body: t.body, + source: t.source, + command: t.command, + }, + Ok(None) => Response::Error { + code: "NOT_FOUND".into(), + message: format!("template '{name}' not found"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + #[allow(clippy::too_many_arguments)] + pub(in crate::engine) async fn handle_save_template( + &self, + project_root: &str, + name: &str, + description: &str, + agent: &str, + body: &str, + scope: &str, + command: Option, + ) -> Response { + let dir = match Self::resolve_scope_dir( + project_root, + scope, + paths::templates_dir, + paths::global_templates_dir, + ) { + Ok(d) => d, + Err(msg) => { + return Response::Error { + code: "IO_ERROR".into(), + message: msg, + }; + } + }; + let n = name.to_string(); + let d = description.to_string(); + let a = agent.to_string(); + let b = body.to_string(); + match tokio::task::spawn_blocking(move || { + pu_core::template::save_template_with_command(&dir, &n, &d, &a, &b, command.as_deref()) + }) + .await + { + Ok(Ok(())) => Response::Ok, + Ok(Err(e)) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to save template: {e}"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } + + pub(in crate::engine) async fn handle_delete_template( + &self, + project_root: &str, + name: &str, + scope: &str, + ) -> Response { + let dir = match Self::resolve_scope_dir( + project_root, + scope, + paths::templates_dir, + paths::global_templates_dir, + ) { + Ok(d) => d, + Err(msg) => { + return Response::Error { + code: "IO_ERROR".into(), + message: msg, + }; + } + }; + let n = name.to_string(); + match tokio::task::spawn_blocking(move || pu_core::template::delete_template(&dir, &n)) + .await + { + Ok(Ok(_)) => Response::Ok, + Ok(Err(e)) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to delete template: {e}"), + }, + Err(e) => Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }, + } + } +} diff --git a/crates/pu-engine/src/engine/definitions/triggers.rs b/crates/pu-engine/src/engine/definitions/triggers.rs new file mode 100644 index 0000000..12ddb97 --- /dev/null +++ b/crates/pu-engine/src/engine/definitions/triggers.rs @@ -0,0 +1,210 @@ +use std::path::Path; + +use pu_core::protocol::Response; + +use super::super::{Engine, SaveTriggerParams}; + +impl Engine { + pub(in crate::engine) async fn handle_list_triggers(&self, project_root: &str) -> Response { + let pr = project_root.to_string(); + tokio::task::spawn_blocking(move || { + let defs = pu_core::trigger_def::list_trigger_defs(Path::new(&pr)); + let triggers: Vec<_> = defs + .into_iter() + .map(pu_core::protocol::TriggerInfo::from) + .collect(); + Response::TriggerList { triggers } + }) + .await + .unwrap_or_else(|e| Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }) + } + + pub(in crate::engine) async fn handle_get_trigger( + &self, + project_root: &str, + name: &str, + ) -> Response { + let pr = project_root.to_string(); + let name = name.to_string(); + tokio::task::spawn_blocking(move || { + match pu_core::trigger_def::find_trigger_def(Path::new(&pr), &name) { + Some(def) => Response::TriggerDetail(pu_core::protocol::TriggerInfo::from(def)), + None => Response::Error { + code: "NOT_FOUND".into(), + message: format!("trigger not found: {name}"), + }, + } + }) + .await + .unwrap_or_else(|e| Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }) + } + + pub(in crate::engine) async fn handle_save_trigger( + &self, + params: SaveTriggerParams, + ) -> Response { + let pr = params.project_root; + let name = params.name; + // Normalize hyphenated form to underscored form (consistent with handle_evaluate_gate) + let on = params.on.replace('-', "_"); + let scope = params.scope; + let description = params.description; + let sequence = params.sequence; + let variables = params.variables; + tokio::task::spawn_blocking(move || { + let event = match on.as_str() { + "agent_idle" => pu_core::trigger_def::TriggerEvent::AgentIdle, + "pre_commit" => pu_core::trigger_def::TriggerEvent::PreCommit, + "pre_push" => pu_core::trigger_def::TriggerEvent::PrePush, + other => { + return Response::Error { + code: "INVALID_ARGUMENT".into(), + message: format!("unknown trigger event: {other}"), + }; + } + }; + let actions: Vec = + sequence.into_iter().map(Into::into).collect(); + let def = pu_core::trigger_def::TriggerDef { + name: name.clone(), + description, + on: event, + sequence: actions, + variables, + scope: scope.clone(), + }; + let dir = match Self::resolve_scope_dir( + &pr, + &scope, + pu_core::paths::triggers_dir, + pu_core::paths::global_triggers_dir, + ) { + Ok(d) => d, + Err(e) => { + return Response::Error { + code: "IO_ERROR".into(), + message: e, + }; + } + }; + match pu_core::trigger_def::save_trigger_def(&dir, &def) { + Ok(()) => Response::Ok, + Err(e) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to save trigger: {e}"), + }, + } + }) + .await + .unwrap_or_else(|e| Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }) + } + + pub(in crate::engine) async fn handle_delete_trigger( + &self, + project_root: &str, + name: &str, + scope: &str, + ) -> Response { + let pr = project_root.to_string(); + let name = name.to_string(); + let scope = scope.to_string(); + tokio::task::spawn_blocking(move || { + let dir = match Self::resolve_scope_dir( + &pr, + &scope, + pu_core::paths::triggers_dir, + pu_core::paths::global_triggers_dir, + ) { + Ok(d) => d, + Err(e) => { + return Response::Error { + code: "IO_ERROR".into(), + message: e, + }; + } + }; + match pu_core::trigger_def::delete_trigger_def(&dir, &name) { + Ok(true) => Response::Ok, + Ok(false) => Response::Error { + code: "NOT_FOUND".into(), + message: format!("trigger not found: {name}"), + }, + Err(e) => Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to delete trigger: {e}"), + }, + } + }) + .await + .unwrap_or_else(|e| Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }) + } + + pub(in crate::engine) async fn handle_evaluate_gate( + &self, + event: &str, + project_root: &str, + worktree_path: &str, + ) -> Response { + // Normalize hyphenated form (from git hooks) to underscored form + let normalized_event = event.replace('-', "_"); + let trigger_event = match normalized_event.as_str() { + "pre_commit" => pu_core::trigger_def::TriggerEvent::PreCommit, + "pre_push" => pu_core::trigger_def::TriggerEvent::PrePush, + other => { + return Response::Error { + code: "INVALID_ARGUMENT".into(), + message: format!("unsupported gate event: {other}"), + }; + } + }; + + let triggers = { + let pr = project_root.to_string(); + let evt = trigger_event.clone(); + match tokio::task::spawn_blocking(move || { + pu_core::trigger_def::triggers_for_event(Path::new(&pr), &evt) + }) + .await + { + Ok(t) => t, + Err(e) => { + return Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }; + } + } + }; + + if triggers.is_empty() { + return Response::GateResult { + passed: true, + output: String::new(), + }; + } + + let wt = worktree_path.to_string(); + match crate::gate::evaluate_trigger_gates(&triggers, Path::new(&wt)).await { + Ok(result) => Response::GateResult { + passed: result.passed, + output: result.output, + }, + Err(e) => Response::GateResult { + passed: false, + output: format!("gate evaluation error: {e}"), + }, + } + } +} diff --git a/crates/pu-engine/src/engine/helpers.rs b/crates/pu-engine/src/engine/helpers.rs new file mode 100644 index 0000000..07e9800 --- /dev/null +++ b/crates/pu-engine/src/engine/helpers.rs @@ -0,0 +1,191 @@ +use std::path::Path; + +use pu_core::error::PuError; +use pu_core::manifest; +use pu_core::protocol::Response; +use pu_core::types::{AgentEntry, AgentStatus, Manifest}; + +use crate::daemon_lifecycle; + +use super::Engine; + +impl Engine { + /// Parse an agent config's command string into (program, args), resolving + /// the "shell" sentinel to the user's login shell. + #[allow(clippy::result_large_err)] + pub(super) fn parse_agent_command( + agent_cfg: &pu_core::types::AgentConfig, + agent_type: &str, + ) -> Result<(String, Vec), Response> { + let mut parts: Vec = agent_cfg + .command + .split_whitespace() + .map(String::from) + .collect(); + if parts.is_empty() { + return Err(Response::Error { + code: "CONFIG_ERROR".into(), + message: format!("agent type '{agent_type}' has an empty command"), + }); + } + let command = parts.remove(0); + let command = if command == "shell" { + std::env::var("SHELL").unwrap_or_else(|_| "/bin/sh".into()) + } else { + command + }; + Ok((command, parts)) + } + + pub(super) fn should_inject_prompt_via_stdin( + agent_type: &str, + interactive: bool, + prompt: &str, + ) -> bool { + !prompt.is_empty() && interactive && matches!(agent_type, "claude" | "terminal") + } + + pub(super) fn resolved_prompt_flag( + agent_type: &str, + prompt_flag: Option<&str>, + ) -> Option { + match (agent_type, prompt_flag) { + ("opencode", None) => Some("--prompt".to_string()), + (_, Some(flag)) => Some(flag.to_string()), + _ => None, + } + } + + /// On daemon restart, reconcile agents that appear alive in the manifest but have no + /// live process. Resumable agents (claude, codex, opencode) with a session_id get marked + /// suspended so the Swift side can auto-resume them. Others get marked Broken. + /// Called synchronously inside handle_init so state is correct before the first status read. + pub(super) fn reconcile_agents_on_init(project_root: &str) { + let root = Path::new(project_root); + let Ok(m) = manifest::read_manifest(root) else { + return; + }; + let is_stale = |a: &AgentEntry| { + !a.suspended && matches!(a.status, AgentStatus::Streaming | AgentStatus::Waiting) + }; + let has_stale = m + .agents + .values() + .chain(m.worktrees.values().flat_map(|wt| wt.agents.values())) + .any(is_stale); + if !has_stale { + return; + } + let is_resumable = |t: &str| matches!(t, "claude" | "codex" | "opencode"); + let now = chrono::Utc::now(); + manifest::update_manifest(root, move |mut m| { + for agent in m.agents.values_mut().chain( + m.worktrees + .values_mut() + .flat_map(|wt| wt.agents.values_mut()), + ) { + if !agent.suspended + && matches!(agent.status, AgentStatus::Streaming | AgentStatus::Waiting) + { + if agent.session_id.is_some() && is_resumable(&agent.agent_type) { + agent.status = AgentStatus::Waiting; + agent.suspended = true; + agent.pid = None; + agent.suspended_at = Some(now); + } else { + agent.status = AgentStatus::Broken; + agent.completed_at = Some(now); + } + } + } + m + }) + .ok(); + } + + /// Scan the manifest for Running/Idle agents whose PID is dead, mark them Lost. + /// Called once per project on the first status request after daemon (re)start. + /// Note: Suspended agents are intentionally unaffected — they have no PID and are paused. + pub(super) fn reap_stale_agents(project_root: &str) { + let root = Path::new(project_root); + let Ok(m) = manifest::read_manifest(root) else { + return; + }; + let needs_reap = |a: &AgentEntry| { + !a.suspended + && matches!(a.status, AgentStatus::Streaming | AgentStatus::Waiting) + && a.pid + .is_none_or(|pid| !daemon_lifecycle::is_process_alive(pid)) + }; + let has_stale = m + .agents + .values() + .chain(m.worktrees.values().flat_map(|wt| wt.agents.values())) + .any(needs_reap); + if !has_stale { + return; + } + manifest::update_manifest(root, move |mut m| { + let now = chrono::Utc::now(); + for agent in m.agents.values_mut().chain( + m.worktrees + .values_mut() + .flat_map(|wt| wt.agents.values_mut()), + ) { + if !agent.suspended + && matches!(agent.status, AgentStatus::Streaming | AgentStatus::Waiting) + && agent + .pid + .is_none_or(|pid| !daemon_lifecycle::is_process_alive(pid)) + { + agent.status = AgentStatus::Broken; + agent.completed_at = Some(now); + } + } + m + }) + .ok(); + } + + pub(super) fn agent_not_found(agent_id: &str) -> Response { + Response::Error { + code: "AGENT_NOT_FOUND".into(), + message: format!("no active session for agent {agent_id}"), + } + } + + pub(super) fn error_response(e: &PuError) -> Response { + Response::Error { + code: e.code().into(), + message: e.to_string(), + } + } + + /// Read manifest from disk (off async runtime). + pub(super) async fn read_manifest_async( + &self, + project_root: &str, + ) -> Result { + let pr = project_root.to_string(); + tokio::task::spawn_blocking(move || manifest::read_manifest(Path::new(&pr))) + .await + .unwrap_or_else(|e| Err(PuError::Io(std::io::Error::other(e)))) + } + + // --- Scope resolution helper --- + + pub(super) fn resolve_scope_dir( + project_root: &str, + scope: &str, + local_fn: fn(&Path) -> std::path::PathBuf, + global_fn: fn() -> Result, + ) -> Result { + match scope { + "global" => global_fn().map_err(|e| e.to_string()), + "local" => Ok(local_fn(Path::new(project_root))), + other => Err(format!( + "unknown scope: {other} (expected 'local' or 'global')" + )), + } + } +} diff --git a/crates/pu-engine/src/engine/mod.rs b/crates/pu-engine/src/engine/mod.rs new file mode 100644 index 0000000..fefeb62 --- /dev/null +++ b/crates/pu-engine/src/engine/mod.rs @@ -0,0 +1,1136 @@ +mod agent_lifecycle; +mod definitions; +mod helpers; +mod pty_operations; +mod scheduler; +mod session_repair; +mod spawn; +mod status; +mod subscriptions; +mod trigger_executor; +mod worktree_ops; + +#[cfg(test)] +use session_repair::repair_session_file; + +use std::collections::{HashMap, HashSet}; +use std::io::Write; +use std::path::Path; +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use tokio::sync::Mutex; + +use pu_core::config; +use pu_core::paths; +use pu_core::protocol::{AgentConfigInfo, GridCommand, PROTOCOL_VERSION, Request, Response}; +use pu_core::types::Manifest; +use tokio::sync::OnceCell; + +use crate::pty_manager::{AgentHandle, NativePtyHost}; + +/// Parameters for spawning an agent, extracted to avoid too many positional args. +pub(super) struct SpawnParams { + project_root: String, + prompt: String, + agent_type: String, + name: Option, + base: Option, + root: bool, + worktree: Option, + terminal_command: Option, + /// Skip auto-mode launch args for this spawn. One-off override; + /// does not affect resume (resume always reads from config). + no_auto: bool, + /// Extra CLI args from --agent-args, appended after launch args. + extra_args: Vec, + plan_mode: bool, + no_trigger: bool, + /// Name of trigger to bind (from --trigger flag) + trigger: Option, +} + +pub(super) struct SaveTriggerParams { + project_root: String, + name: String, + description: Option, + on: String, + sequence: Vec, + variables: std::collections::HashMap, + scope: String, +} + +pub struct Engine { + start_time: Instant, + pty_host: NativePtyHost, + sessions: Arc>>, + pending_initial_inputs: Arc>>>, + login_env: Arc>>, + reaped_projects: Arc>>, + /// Per-project broadcast channels for grid commands. + grid_channels: Arc>>>, + /// Per-project broadcast channels for status push updates. + status_channels: Arc>>>, + /// Projects that have been initialized or used — scheduler scans these. + registered_projects: Arc>>, +} + +impl Default for Engine { + fn default() -> Self { + Self::new() + } +} + +/// Build a `ConfigReport` response from a loaded config. +/// Filters out the terminal agent (no launch args to configure). +fn config_to_report(cfg: &pu_core::types::Config) -> Response { + let agents = cfg + .agents + .iter() + .filter(|(name, _)| name.as_str() != "terminal") + .map(|(name, ac)| { + let resolved = pu_core::types::resolved_launch_args(name, ac.launch_args.as_deref()); + AgentConfigInfo { + name: ac.name.clone(), + command: ac.command.clone(), + launch_args: ac.launch_args.clone(), + resolved_launch_args: resolved, + interactive: ac.interactive, + } + }) + .collect(); + Response::ConfigReport { + default_agent: cfg.default_agent.clone(), + agents, + } +} + +impl Engine { + pub fn new() -> Self { + Self { + start_time: Instant::now(), + pty_host: NativePtyHost::new(), + sessions: Arc::new(Mutex::new(HashMap::new())), + pending_initial_inputs: Arc::new(Mutex::new(HashMap::new())), + login_env: Arc::new(OnceCell::new()), + reaped_projects: Arc::new(std::sync::Mutex::new(HashSet::new())), + grid_channels: Arc::new(Mutex::new(HashMap::new())), + status_channels: Arc::new(Mutex::new(HashMap::new())), + registered_projects: Arc::new(std::sync::Mutex::new(HashSet::new())), + } + } + + /// Start a background task that periodically removes session handles for + /// processes that have exited naturally, and cleans up broadcast channels + /// with no subscribers. Without this, HashMap entries leak. + pub fn start_session_reaper(self: &Arc) { + let sessions = self.sessions.clone(); + let pending_initial_inputs = self.pending_initial_inputs.clone(); + let grid_channels = self.grid_channels.clone(); + let status_channels = self.status_channels.clone(); + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(30)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + loop { + interval.tick().await; + + // Reap dead sessions + let dead_ids: Vec = { + let mut sessions = sessions.lock().await; + let dead: Vec = sessions + .iter() + .filter(|(_, handle)| handle.exit_rx.borrow().is_some()) + .map(|(id, _)| id.clone()) + .collect(); + for id in &dead { + sessions.remove(id); + } + dead + }; + if !dead_ids.is_empty() { + let mut pending_initial_inputs = pending_initial_inputs.lock().await; + for id in &dead_ids { + pending_initial_inputs.remove(id); + } + tracing::debug!(count = dead_ids.len(), "reaped dead session handles"); + } + + // Clean up grid channels with no subscribers + { + let mut channels = grid_channels.lock().await; + channels.retain(|_, tx| tx.receiver_count() > 0); + } + + // Clean up status channels with no subscribers + { + let mut channels = status_channels.lock().await; + channels.retain(|_, tx| tx.receiver_count() > 0); + } + } + }); + } + + async fn resolve_login_env() -> Vec<(String, String)> { + let shell = std::env::var("SHELL").unwrap_or_else(|_| "/bin/zsh".into()); + match tokio::process::Command::new(&shell) + .args(["-li", "-c", "env -0"]) + .stdin(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .output() + .await + { + Ok(output) if output.status.success() => output + .stdout + .split(|&b| b == 0) + .filter_map(|entry| { + let s = std::str::from_utf8(entry).ok()?; + let (k, v) = s.split_once('=')?; + if k.is_empty() { + return None; + } + Some((k.to_string(), v.to_string())) + }) + .collect(), + // Fallback: use the daemon's own env + _ => std::env::vars().collect(), + } + } + + fn register_project(&self, project_root: &str) { + if !project_root.is_empty() { + if let Ok(mut projects) = self.registered_projects.lock() { + projects.insert(project_root.to_string()); + } + } + } + + pub fn registered_projects(&self) -> Vec { + self.registered_projects + .lock() + .map(|p| p.iter().cloned().collect()) + .unwrap_or_default() + } + + pub async fn handle_request(&self, request: Request) -> Response { + // Register project for any project-scoped request + match &request { + Request::Init { project_root } + | Request::Spawn { project_root, .. } + | Request::CreateWorktree { project_root, .. } + | Request::Status { project_root, .. } + | Request::Kill { project_root, .. } + | Request::ListTemplates { project_root } + | Request::ListAgentDefs { project_root } + | Request::ListSwarmDefs { project_root } + | Request::ListSchedules { project_root } + | Request::SaveSchedule { project_root, .. } + | Request::EnableSchedule { project_root, .. } + | Request::DisableSchedule { project_root, .. } + | Request::ListTriggers { project_root } + | Request::SaveTrigger { project_root, .. } + | Request::EvaluateGate { project_root, .. } + | Request::Diff { project_root, .. } + | Request::GetConfig { project_root } + | Request::UpdateAgentConfig { project_root, .. } + | Request::Pulse { project_root, .. } + | Request::AssignTrigger { project_root, .. } => { + self.register_project(project_root); + } + _ => {} + } + + match request { + Request::Health => self.handle_health().await, + Request::Init { project_root } => self.handle_init(&project_root).await, + Request::Rename { + project_root, + agent_id, + name, + } => self.handle_rename(&project_root, &agent_id, &name).await, + Request::AssignTrigger { + project_root, + agent_id, + trigger_name, + } => { + self.handle_assign_trigger(&project_root, &agent_id, &trigger_name) + .await + } + Request::GetConfig { project_root } => self.handle_get_config(&project_root).await, + Request::UpdateAgentConfig { + project_root, + agent_name, + launch_args, + } => { + self.handle_update_agent_config(&project_root, &agent_name, launch_args) + .await + } + Request::Shutdown => Response::ShuttingDown, + Request::Status { + project_root, + agent_id, + } => self.handle_status(&project_root, agent_id.as_deref()).await, + Request::SpawnShell { cwd } => self.handle_spawn_shell(&cwd).await, + Request::Spawn { + project_root, + prompt, + agent, + name, + base, + root, + worktree, + command, + no_auto, + extra_args, + plan_mode, + no_trigger, + trigger, + } => { + self.handle_spawn(SpawnParams { + project_root, + prompt, + agent_type: agent, + name, + base, + root, + worktree, + terminal_command: command, + no_auto, + extra_args, + plan_mode, + no_trigger, + trigger, + }) + .await + } + Request::CreateWorktree { + project_root, + name, + base, + } => self.handle_create_worktree(&project_root, name, base).await, + Request::Kill { + project_root, + target, + exclude, + } => self.handle_kill(&project_root, target, &exclude).await, + Request::Suspend { + project_root, + target, + } => self.handle_suspend(&project_root, target).await, + Request::Resume { + project_root, + agent_id, + } => self.handle_resume(&project_root, &agent_id).await, + Request::Logs { agent_id, tail } => self.handle_logs(&agent_id, tail).await, + Request::Attach { agent_id } => self.handle_attach(&agent_id).await, + Request::Input { + agent_id, + data, + submit, + } => self.handle_input(&agent_id, &data, submit).await, + Request::Resize { + agent_id, + cols, + rows, + } => self.handle_resize(&agent_id, cols, rows).await, + Request::SubscribeGrid { project_root } => { + self.handle_subscribe_grid(&project_root).await + } + Request::SubscribeStatus { project_root } => { + self.handle_subscribe_status(&project_root).await + } + Request::GridCommand { + project_root, + command, + } => self.handle_grid_command(&project_root, command).await, + Request::DeleteWorktree { + project_root, + worktree_id, + } => { + self.handle_delete_worktree(&project_root, &worktree_id) + .await + } + // Template CRUD + Request::ListTemplates { project_root } => { + self.handle_list_templates(&project_root).await + } + Request::GetTemplate { project_root, name } => { + self.handle_get_template(&project_root, &name).await + } + Request::SaveTemplate { + project_root, + name, + description, + agent, + body, + scope, + command, + } => { + self.handle_save_template( + &project_root, + &name, + &description, + &agent, + &body, + &scope, + command, + ) + .await + } + Request::DeleteTemplate { + project_root, + name, + scope, + } => { + self.handle_delete_template(&project_root, &name, &scope) + .await + } + // Agent def CRUD + Request::ListAgentDefs { project_root } => { + self.handle_list_agent_defs(&project_root).await + } + Request::GetAgentDef { project_root, name } => { + self.handle_get_agent_def(&project_root, &name).await + } + Request::SaveAgentDef { + project_root, + name, + agent_type, + template, + inline_prompt, + tags, + scope, + available_in_command_dialog, + icon, + command, + } => { + self.handle_save_agent_def( + &project_root, + &name, + &agent_type, + template, + inline_prompt, + tags, + &scope, + available_in_command_dialog, + icon, + command, + ) + .await + } + Request::DeleteAgentDef { + project_root, + name, + scope, + } => { + self.handle_delete_agent_def(&project_root, &name, &scope) + .await + } + // Swarm def CRUD + Request::ListSwarmDefs { project_root } => { + self.handle_list_swarm_defs(&project_root).await + } + Request::GetSwarmDef { project_root, name } => { + self.handle_get_swarm_def(&project_root, &name).await + } + Request::SaveSwarmDef { + project_root, + name, + worktree_count, + worktree_template, + roster, + include_terminal, + scope, + } => { + self.handle_save_swarm_def( + &project_root, + &name, + worktree_count, + &worktree_template, + roster, + include_terminal, + &scope, + ) + .await + } + Request::DeleteSwarmDef { + project_root, + name, + scope, + } => { + self.handle_delete_swarm_def(&project_root, &name, &scope) + .await + } + // Execution + Request::RunSwarm { + project_root, + swarm_name, + vars, + } => { + self.handle_run_swarm(&project_root, &swarm_name, vars) + .await + } + // Schedule CRUD + Request::ListSchedules { project_root } => { + self.handle_list_schedules(&project_root).await + } + Request::GetSchedule { project_root, name } => { + self.handle_get_schedule(&project_root, &name).await + } + Request::SaveSchedule { + project_root, + name, + enabled, + recurrence, + start_at, + trigger, + target, + scope, + root, + agent_name, + } => { + self.handle_save_schedule( + &project_root, + &name, + enabled, + &recurrence, + start_at, + trigger, + &target, + &scope, + root, + agent_name, + ) + .await + } + Request::DeleteSchedule { + project_root, + name, + scope, + } => { + self.handle_delete_schedule(&project_root, &name, &scope) + .await + } + Request::EnableSchedule { project_root, name } => { + self.handle_enable_schedule(&project_root, &name).await + } + Request::DisableSchedule { project_root, name } => { + self.handle_disable_schedule(&project_root, &name).await + } + // Trigger CRUD + Request::ListTriggers { project_root } => { + self.handle_list_triggers(&project_root).await + } + Request::GetTrigger { project_root, name } => { + self.handle_get_trigger(&project_root, &name).await + } + Request::SaveTrigger { + project_root, + name, + description, + on, + sequence, + variables, + scope, + } => { + self.handle_save_trigger(SaveTriggerParams { + project_root, + name, + description, + on, + sequence, + variables, + scope, + }) + .await + } + Request::DeleteTrigger { + project_root, + name, + scope, + } => { + self.handle_delete_trigger(&project_root, &name, &scope) + .await + } + Request::EvaluateGate { + event, + project_root, + worktree_path, + } => { + self.handle_evaluate_gate(&event, &project_root, &worktree_path) + .await + } + Request::Diff { + project_root, + worktree_id, + stat, + } => { + self.handle_diff(&project_root, worktree_id.as_deref(), stat) + .await + } + Request::Pulse { project_root } => self.handle_pulse(&project_root).await, + } + } + + async fn handle_health(&self) -> Response { + let sessions = self.sessions.lock().await; + Response::HealthReport { + pid: std::process::id(), + uptime_seconds: self.start_time.elapsed().as_secs(), + protocol_version: PROTOCOL_VERSION, + projects: vec![], + agent_count: sessions.len(), + } + } + + async fn handle_init(&self, project_root: &str) -> Response { + let project_root = project_root.to_string(); + tokio::task::spawn_blocking(move || { + let root = Path::new(&project_root); + let pu_dir = paths::pu_dir(root); + + if let Err(e) = std::fs::create_dir_all(&pu_dir) { + return Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to create .pu directory: {e}"), + }; + } + + // Atomic check-and-create via O_EXCL — prevents TOCTOU race + let manifest_path = paths::manifest_path(root); + let file = match std::fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(&manifest_path) + { + Ok(f) => f, + Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => { + Self::reconcile_agents_on_init(&project_root); + return Response::InitResult { created: false }; + } + Err(e) => { + return Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to create manifest: {e}"), + }; + } + }; + + let m = Manifest::new(project_root.clone()); + let content = match serde_json::to_string_pretty(&m) { + Ok(c) => c + "\n", + Err(e) => { + let _ = std::fs::remove_file(&manifest_path); + return Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to serialize manifest: {e}"), + }; + } + }; + let mut file = file; + if let Err(e) = file + .write_all(content.as_bytes()) + .and_then(|_| file.sync_all()) + { + let _ = std::fs::remove_file(&manifest_path); + return Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to write manifest: {e}"), + }; + } + + if let Err(e) = config::write_default_config(root) { + return Response::Error { + code: "IO_ERROR".into(), + message: format!("failed to write config: {e}"), + }; + } + + Response::InitResult { created: true } + }) + .await + .unwrap_or_else(|e| Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }) + } + + async fn handle_get_config(&self, project_root: &str) -> Response { + let pr = project_root.to_string(); + tokio::task::spawn_blocking(move || { + let root = Path::new(&pr); + match config::load_config_strict(root) { + Ok(cfg) => config_to_report(&cfg), + Err(e) => Response::Error { + code: e.code().into(), + message: e.to_string(), + }, + } + }) + .await + .unwrap_or_else(|e| Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }) + } + + async fn handle_update_agent_config( + &self, + project_root: &str, + agent_name: &str, + launch_args: Option>, + ) -> Response { + let pr = project_root.to_string(); + let name = agent_name.to_string(); + tokio::task::spawn_blocking(move || { + let root = Path::new(&pr); + match config::update_agent_config(root, &name, launch_args) { + Ok(cfg) => config_to_report(&cfg), + Err(e) => Response::Error { + code: e.code().into(), + message: e.to_string(), + }, + } + }) + .await + .unwrap_or_else(|e| Response::Error { + code: "INTERNAL_ERROR".into(), + message: format!("task join error: {e}"), + }) + } +} + +impl Drop for Engine { + fn drop(&mut self) { + // Kill all child processes so spawn_blocking reader/waitpid tasks can finish. + if let Ok(sessions) = self.sessions.try_lock() { + for handle in sessions.values() { + unsafe { + libc::kill(handle.pid as i32, libc::SIGKILL); + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_helpers::init_and_spawn; + use pu_core::protocol::{Request, Response}; + use tempfile::TempDir; + + #[tokio::test(flavor = "current_thread")] + async fn given_spawned_agent_should_return_attach_handles() { + let (engine, agent_id, _tmp) = init_and_spawn().await; + + let handles = engine.get_attach_handles(&agent_id).await; + assert!( + handles.is_some(), + "expected attach handles for spawned agent" + ); + + let (buffer, _fd, _exit_rx) = handles.unwrap(); + // Buffer exists and has a valid offset + let _ = buffer.current_offset(); + } + + #[tokio::test(flavor = "current_thread")] + async fn given_unknown_agent_should_return_none() { + let engine = Engine::new(); + let handles = engine.get_attach_handles("ag-nonexistent").await; + assert!(handles.is_none()); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn given_spawn_with_prompt_should_inject_in_background() { + let tmp = TempDir::new().unwrap(); + let project_root = tmp.path().join("project"); + std::fs::create_dir_all(&project_root).unwrap(); + let pr = project_root.to_string_lossy().to_string(); + + let engine = Engine::new(); + engine + .handle_request(Request::Init { + project_root: pr.clone(), + }) + .await; + + let resp = engine + .handle_request(Request::Spawn { + project_root: pr, + prompt: "hello world".into(), + agent: "terminal".into(), + name: None, + base: None, + root: true, + worktree: None, + command: None, + no_auto: false, + extra_args: vec![], + plan_mode: false, + no_trigger: false, + trigger: None, + }) + .await; + + let agent_id = match resp { + Response::SpawnResult { agent_id, .. } => agent_id, + other => panic!("expected SpawnResult, got {other:?}"), + }; + + // The background task should eventually drain pending_initial_inputs. + // Allow up to 5s for the injection to complete (includes readiness + // timeout + chunked write delays). + let deadline = tokio::time::Instant::now() + Duration::from_secs(5); + loop { + let still_pending = engine + .pending_initial_inputs + .lock() + .await + .contains_key(&agent_id); + if !still_pending { + break; + } + if tokio::time::Instant::now() >= deadline { + panic!("prompt was never consumed from pending_initial_inputs"); + } + tokio::time::sleep(Duration::from_millis(50)).await; + } + } + + #[test] + fn given_claude_prompt_should_inject_via_stdin() { + assert!(Engine::should_inject_prompt_via_stdin( + "claude", true, "hello" + )); + } + + #[test] + fn given_codex_prompt_should_not_inject_via_stdin() { + assert!(!Engine::should_inject_prompt_via_stdin( + "codex", true, "hello" + )); + } + + #[test] + fn given_non_interactive_agent_should_not_inject_via_stdin() { + assert!(!Engine::should_inject_prompt_via_stdin( + "terminal", false, "hello" + )); + } + + #[test] + fn given_opencode_without_configured_flag_should_use_prompt_flag() { + assert_eq!( + Engine::resolved_prompt_flag("opencode", None), + Some("--prompt".to_string()) + ); + } + + #[test] + fn given_codex_without_configured_flag_should_use_positional_prompt() { + assert_eq!(Engine::resolved_prompt_flag("codex", None), None); + } + + #[test] + fn given_configured_prompt_flag_should_be_preserved() { + assert_eq!( + Engine::resolved_prompt_flag("codex", Some("--prompt")), + Some("--prompt".to_string()) + ); + } + + #[test] + fn given_claude_build_resume_with_default_launch_args_should_include_yolo() { + // given + let engine = Engine::new(); + let agent_cfg = pu_core::types::AgentConfig { + name: "claude".into(), + command: "claude".into(), + prompt_flag: None, + interactive: true, + launch_args: None, // use defaults + }; + + // when + let (cmd, args, sid) = engine + .build_resume_command("claude", &agent_cfg, Some("sess-123")) + .unwrap(); + + // then + assert_eq!(cmd, "claude"); + assert!(args.contains(&"--dangerously-skip-permissions".to_string())); + assert!(args.contains(&"--resume".to_string())); + assert!(args.contains(&"sess-123".to_string())); + assert_eq!(sid, Some("sess-123".to_string())); + } + + #[test] + fn given_claude_build_resume_with_empty_launch_args_should_omit_yolo() { + // given: user configured launchArgs: [] to disable auto-mode + let engine = Engine::new(); + let agent_cfg = pu_core::types::AgentConfig { + name: "claude".into(), + command: "claude".into(), + prompt_flag: None, + interactive: true, + launch_args: Some(vec![]), + }; + + // when + let (cmd, args, _) = engine + .build_resume_command("claude", &agent_cfg, Some("sess-456")) + .unwrap(); + + // then + assert_eq!(cmd, "claude"); + assert!(!args.contains(&"--dangerously-skip-permissions".to_string())); + assert!(args.contains(&"--resume".to_string())); + } + + #[test] + fn given_codex_build_resume_with_custom_launch_args_should_place_before_subcommand() { + // given + let engine = Engine::new(); + let agent_cfg = pu_core::types::AgentConfig { + name: "codex".into(), + command: "codex".into(), + prompt_flag: None, + interactive: true, + launch_args: Some(vec!["--approval-mode=full-auto".into()]), + }; + + // when + let (cmd, args, _) = engine + .build_resume_command("codex", &agent_cfg, None) + .unwrap(); + + // then — top-level flags must precede the subcommand + assert_eq!(cmd, "codex"); + assert_eq!(args, vec!["--approval-mode=full-auto", "resume", "--last"]); + } + + #[test] + fn given_codex_build_resume_with_defaults_should_place_full_auto_before_subcommand() { + // given + let engine = Engine::new(); + let agent_cfg = pu_core::types::AgentConfig { + name: "codex".into(), + command: "codex".into(), + prompt_flag: None, + interactive: true, + launch_args: None, // use defaults + }; + + // when + let (cmd, args, _) = engine + .build_resume_command("codex", &agent_cfg, None) + .unwrap(); + + // then — --full-auto is a top-level flag, must come before `resume` + assert_eq!(cmd, "codex"); + assert_eq!(args, vec!["--full-auto", "resume", "--last"]); + } + + /// Test the no_auto + launch_args interaction logic used in handle_spawn. + /// When no_auto is true and launch_args is None (defaults), launch args should be empty. + /// When no_auto is true but launch_args is explicitly configured, they should be preserved. + #[test] + fn given_no_auto_with_default_launch_args_should_produce_empty() { + let agent_cfg = pu_core::types::AgentConfig { + name: "claude".into(), + command: "claude".into(), + prompt_flag: None, + interactive: true, + launch_args: None, + }; + let no_auto = true; + let launch_args = if no_auto && agent_cfg.launch_args.is_none() { + Vec::new() + } else { + pu_core::types::resolved_launch_args("claude", agent_cfg.launch_args.as_deref()) + }; + assert!( + launch_args.is_empty(), + "--no-auto should skip default launch args" + ); + } + + #[test] + fn given_no_auto_with_explicit_launch_args_should_preserve_them() { + let agent_cfg = pu_core::types::AgentConfig { + name: "claude".into(), + command: "claude".into(), + prompt_flag: None, + interactive: true, + launch_args: Some(vec!["--verbose".into()]), + }; + let no_auto = true; + let launch_args = if no_auto && agent_cfg.launch_args.is_none() { + Vec::new() + } else { + pu_core::types::resolved_launch_args("claude", agent_cfg.launch_args.as_deref()) + }; + assert_eq!( + launch_args, + vec!["--verbose"], + "--no-auto should not affect explicit launch args" + ); + } + + #[test] + fn given_snapshot_collision_should_nullify_message_id() { + // given: a session file where a file-history-snapshot reuses a real message uuid + let tmp = TempDir::new().unwrap(); + let path = tmp.path().join("session.jsonl"); + let content = [ + r#"{"uuid":"u1","type":"summary","parentUuid":null}"#, + r#"{"uuid":"u2","type":"assistant","message":{"content":"hello"}}"#, + r#"{"type":"file-history-snapshot","messageId":"u2","data":{}}"#, + ] + .join("\n") + + "\n"; + std::fs::write(&path, &content).unwrap(); + + // when + let repaired = repair_session_file(&path); + + // then + assert!(repaired); + let result = std::fs::read_to_string(&path).unwrap(); + let lines: Vec = result + .lines() + .map(|l| serde_json::from_str(l).unwrap()) + .collect(); + // The snapshot's messageId should be null + assert_eq!(lines[2]["messageId"], serde_json::Value::Null); + // Backup should exist + assert!(tmp.path().join("session.jsonl.bak").exists()); + } + + #[test] + fn given_broken_parent_uuid_should_fix_reference() { + // given: a session file where an entry's parentUuid points to a non-existent uuid + let tmp = TempDir::new().unwrap(); + let path = tmp.path().join("session.jsonl"); + let content = [ + r#"{"uuid":"u1","type":"summary","parentUuid":null}"#, + r#"{"uuid":"u2","parentUuid":"u1","type":"assistant"}"#, + r#"{"uuid":"u3","parentUuid":"DOES_NOT_EXIST","type":"assistant"}"#, + ] + .join("\n") + + "\n"; + std::fs::write(&path, &content).unwrap(); + + // when + let repaired = repair_session_file(&path); + + // then + assert!(repaired); + let result = std::fs::read_to_string(&path).unwrap(); + let lines: Vec = result + .lines() + .map(|l| serde_json::from_str(l).unwrap()) + .collect(); + // u3's parentUuid should now point to u2 (nearest preceding) + assert_eq!(lines[2]["parentUuid"], "u2"); + } + + #[test] + fn given_disconnected_root_should_stitch() { + // given: a session file with two entries having parentUuid: null (disconnected roots) + let tmp = TempDir::new().unwrap(); + let path = tmp.path().join("session.jsonl"); + let content = [ + r#"{"uuid":"u1","type":"summary","parentUuid":null}"#, + r#"{"uuid":"u2","parentUuid":"u1","type":"assistant"}"#, + r#"{"uuid":"u3","parentUuid":null,"type":"assistant"}"#, + ] + .join("\n") + + "\n"; + std::fs::write(&path, &content).unwrap(); + + // when + let repaired = repair_session_file(&path); + + // then + assert!(repaired); + let result = std::fs::read_to_string(&path).unwrap(); + let lines: Vec = result + .lines() + .map(|l| serde_json::from_str(l).unwrap()) + .collect(); + // u3's parentUuid should now point to u2 (nearest preceding) + assert_eq!(lines[2]["parentUuid"], "u2"); + } + + // --- build_resume_command tests --- + + fn dummy_agent_cfg(name: &str) -> pu_core::types::AgentConfig { + pu_core::types::AgentConfig { + name: name.into(), + command: name.into(), + prompt_flag: None, + interactive: true, + launch_args: None, + } + } + + #[test] + fn given_claude_resume_should_use_bypass_and_resume() { + let engine = Engine::new(); + let cfg = dummy_agent_cfg("claude"); + let (cmd, args, sid) = engine + .build_resume_command("claude", &cfg, Some("sess-1")) + .unwrap(); + assert_eq!(cmd, "claude"); + assert!(args.contains(&"--dangerously-skip-permissions".to_string())); + assert!(args.contains(&"--resume".to_string())); + assert!(args.contains(&"sess-1".to_string())); + assert_eq!(sid, Some("sess-1".to_string())); + } + + #[test] + fn given_codex_resume_should_use_full_auto() { + let engine = Engine::new(); + let cfg = dummy_agent_cfg("codex"); + let (cmd, args, _) = engine.build_resume_command("codex", &cfg, None).unwrap(); + assert_eq!(cmd, "codex"); + assert!(args.contains(&"--full-auto".to_string())); + assert!(args.contains(&"resume".to_string())); + assert!(args.contains(&"--last".to_string())); + } + + #[test] + fn given_opencode_resume_should_use_continue() { + let engine = Engine::new(); + let cfg = dummy_agent_cfg("opencode"); + let (cmd, args, _) = engine.build_resume_command("opencode", &cfg, None).unwrap(); + assert_eq!(cmd, "opencode"); + assert!(args.contains(&"--continue".to_string())); + assert!(!args.contains(&"--agent".to_string())); + } + + #[test] + fn given_intact_file_should_not_modify() { + // given: a perfectly valid session file + let tmp = TempDir::new().unwrap(); + let path = tmp.path().join("session.jsonl"); + let content = [ + r#"{"uuid":"u1","type":"summary","parentUuid":null}"#, + r#"{"uuid":"u2","parentUuid":"u1","type":"assistant"}"#, + r#"{"uuid":"u3","parentUuid":"u2","type":"assistant"}"#, + ] + .join("\n") + + "\n"; + std::fs::write(&path, &content).unwrap(); + + // when + let repaired = repair_session_file(&path); + + // then: no changes needed + assert!(!repaired); + // No backup file should be created + assert!(!tmp.path().join("session.jsonl.bak").exists()); + } +} diff --git a/crates/pu-engine/src/engine/pty_operations.rs b/crates/pu-engine/src/engine/pty_operations.rs new file mode 100644 index 0000000..fc50403 --- /dev/null +++ b/crates/pu-engine/src/engine/pty_operations.rs @@ -0,0 +1,157 @@ +use std::os::fd::OwnedFd; +use std::sync::Arc; + +use pu_core::paths; +use pu_core::protocol::Response; + +use crate::output_buffer::OutputBuffer; + +use super::Engine; + +impl Engine { + pub(super) async fn handle_logs(&self, agent_id: &str, tail: usize) -> Response { + let buf = { + let sessions = self.sessions.lock().await; + match sessions.get(agent_id) { + Some(handle) => handle.output_buffer.clone(), + None => return Self::agent_not_found(agent_id), + } + }; + let data = buf.read_tail(tail); + let text = String::from_utf8_lossy(&data); + if let std::borrow::Cow::Owned(_) = &text { + tracing::warn!( + agent_id, + "logs output contained non-UTF-8 bytes (lossy conversion applied)" + ); + } + Response::LogsResult { + agent_id: agent_id.to_string(), + data: text.into_owned(), + } + } + + pub(super) async fn handle_attach(&self, agent_id: &str) -> Response { + let sessions = self.sessions.lock().await; + match sessions.get(agent_id) { + Some(handle) => Response::AttachReady { + buffered_bytes: handle.output_buffer.len(), + }, + None => Self::agent_not_found(agent_id), + } + } + + pub(super) async fn handle_input(&self, agent_id: &str, data: &[u8], submit: bool) -> Response { + // Clone the fd Arc under the lock, then drop the lock before the blocking write + let master_fd = { + let sessions = self.sessions.lock().await; + match sessions.get(agent_id) { + Some(handle) => handle.master_fd(), + None => return Self::agent_not_found(agent_id), + } + }; + let result = if submit { + self.pty_host.write_chunked_submit(&master_fd, data).await + } else { + self.pty_host.write_to_fd(&master_fd, data).await + }; + match result { + Ok(()) => Response::Ok, + Err(e) => Response::Error { + code: "IO_ERROR".into(), + message: format!("write failed: {e}"), + }, + } + } + + pub(super) async fn handle_resize(&self, agent_id: &str, cols: u16, rows: u16) -> Response { + // Clone the fd Arc under the lock, then drop the lock before the blocking ioctl + let master_fd = { + let sessions = self.sessions.lock().await; + match sessions.get(agent_id) { + Some(handle) => handle.master_fd(), + None => return Self::agent_not_found(agent_id), + } + }; + match self.pty_host.resize_fd(&master_fd, cols, rows).await { + Ok(()) => Response::Ok, + Err(e) => Response::Error { + code: "IO_ERROR".into(), + message: format!("resize failed: {e}"), + }, + } + } + + /// Write data to a PTY fd via the pty host (avoids duplicating unsafe write logic). + pub async fn write_to_pty(&self, fd: &Arc, data: &[u8]) -> Result<(), std::io::Error> { + self.pty_host.write_to_fd(fd, data).await + } + + /// Resize a PTY fd via the pty host (avoids duplicating unsafe ioctl logic). + pub async fn resize_pty( + &self, + fd: &Arc, + cols: u16, + rows: u16, + ) -> Result<(), std::io::Error> { + self.pty_host.resize_fd(fd, cols, rows).await + } + + /// Return the output buffer, master PTY fd, and exit receiver for an agent, + /// if it has an active session. + pub async fn get_attach_handles( + &self, + agent_id: &str, + ) -> Option<( + Arc, + Arc, + tokio::sync::watch::Receiver>, + )> { + let sessions = self.sessions.lock().await; + sessions + .get(agent_id) + .map(|h| (h.output_buffer.clone(), h.master_fd(), h.exit_rx.clone())) + } + + /// Build the full environment for spawned agents. + /// Starts from the user's login shell env, then overrides PATH + /// (prepends ~/.pu/bin + fallback dirs), TERM, and COLORTERM. + pub(super) async fn agent_env(&self) -> Vec<(String, String)> { + let login_env = self.login_env.get_or_init(Self::resolve_login_env).await; + let mut env = login_env.clone(); + + // Extract login PATH for augmentation + let login_path = env + .iter() + .find(|(k, _)| k == "PATH") + .map(|(_, v)| v.clone()) + .unwrap_or_default(); + + // Append common fallback dirs (guards against missing-binary issues) + let home = std::env::var("HOME").unwrap_or_default(); + let fallbacks = [ + format!("{home}/.local/bin"), + format!("{home}/.cargo/bin"), + "/usr/local/bin".to_string(), + "/opt/homebrew/bin".to_string(), + ]; + let mut path = login_path; + for dir in fallbacks { + if !path.split(':').any(|p| p == dir) { + path = format!("{path}:{dir}"); + } + } + // Prepend ~/.pu/bin + if let Ok(pu_dir) = paths::global_pu_dir() { + path = format!("{}:{}", pu_dir.join("bin").display(), path); + } + + // Override PATH, TERM, COLORTERM in the env + env.retain(|(k, _)| k != "PATH" && k != "TERM" && k != "COLORTERM"); + env.push(("PATH".into(), path)); + env.push(("TERM".into(), "xterm-256color".into())); + env.push(("COLORTERM".into(), "truecolor".into())); + + env + } +} diff --git a/crates/pu-engine/src/engine/scheduler.rs b/crates/pu-engine/src/engine/scheduler.rs new file mode 100644 index 0000000..b579f37 --- /dev/null +++ b/crates/pu-engine/src/engine/scheduler.rs @@ -0,0 +1,178 @@ +use std::path::Path; +use std::sync::Arc; +use std::time::Duration; + +use pu_core::paths; +use pu_core::protocol::{Request, Response}; + +use super::Engine; + +impl Engine { + /// Start a background task that periodically checks for due schedules and fires them. + pub fn start_scheduler(self: &Arc) { + let engine = Arc::clone(self); + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(30)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + loop { + interval.tick().await; + engine.scheduler_tick().await; + } + }); + } + + async fn scheduler_tick(&self) { + let projects = self.registered_projects(); + for project_root in projects { + let defs = { + let pr = project_root.clone(); + match tokio::task::spawn_blocking(move || { + pu_core::schedule_def::list_schedule_defs(Path::new(&pr)) + }) + .await + { + Ok(d) => d, + Err(_) => continue, + } + }; + + let now = chrono::Utc::now(); + for def in defs { + if !def.enabled { + continue; + } + if let Some(next_run) = def.next_run { + if next_run <= now { + self.fire_schedule(&def).await; + self.advance_schedule(def, now).await; + } + } + } + + // Evaluate agent_idle triggers for active agents + self.evaluate_idle_triggers(&project_root).await; + } + } + + async fn fire_schedule(&self, schedule: &pu_core::schedule_def::ScheduleDef) { + let result = match &schedule.trigger { + pu_core::schedule_def::ScheduleTrigger::AgentDef { name } => { + // Resolve agent def to get its type and prompt + let pr = schedule.project_root.clone(); + let project_path = Path::new(&pr); + if let Some(def) = pu_core::agent_def::find_agent_def(project_path, name) { + let empty_vars = std::collections::HashMap::new(); + let (prompt, template_command) = if let Some(ref ip) = def.inline_prompt { + (ip.clone(), None) + } else if let Some(ref tpl_name) = def.template { + match pu_core::template::find_template(project_path, tpl_name) { + Some(tpl) => { + let rendered = pu_core::template::render(&tpl, &empty_vars); + let cmd = pu_core::template::render_command(&tpl, &empty_vars); + (rendered, cmd) + } + None => ( + format!("Scheduled: agent def '{name}' (template not found)"), + None, + ), + } + } else { + (format!("Scheduled: run agent def '{name}'"), None) + }; + self.handle_request(Request::Spawn { + project_root: pr, + prompt, + agent: def.agent_type, + name: schedule.agent_name.clone(), + base: None, + root: schedule.root, + worktree: None, + command: def.command.or(template_command), + no_auto: false, + extra_args: vec![], + plan_mode: false, + no_trigger: false, + trigger: None, + }) + .await + } else { + Response::Error { + code: "NOT_FOUND".to_string(), + message: format!("agent def '{name}' not found"), + } + } + } + pu_core::schedule_def::ScheduleTrigger::SwarmDef { name, vars } => { + self.handle_request(Request::RunSwarm { + project_root: schedule.project_root.clone(), + swarm_name: name.clone(), + vars: vars.clone(), + }) + .await + } + pu_core::schedule_def::ScheduleTrigger::InlinePrompt { prompt, agent } => { + self.handle_request(Request::Spawn { + project_root: schedule.project_root.clone(), + prompt: prompt.clone(), + agent: agent.clone(), + name: schedule.agent_name.clone(), + base: None, + root: schedule.root, + worktree: None, + command: None, + no_auto: false, + extra_args: vec![], + plan_mode: false, + no_trigger: false, + trigger: None, + }) + .await + } + }; + + if let Response::Error { code, message } = result { + tracing::warn!( + schedule = schedule.name, + code, + message, + "scheduled task failed" + ); + } else { + tracing::info!(schedule = schedule.name, "scheduled task fired"); + } + } + + async fn advance_schedule( + &self, + mut schedule: pu_core::schedule_def::ScheduleDef, + now: chrono::DateTime, + ) { + let is_one_shot = schedule.recurrence == pu_core::schedule_def::Recurrence::None; + if is_one_shot { + schedule.enabled = false; + schedule.next_run = None; + } else { + schedule.next_run = pu_core::schedule_def::next_occurrence( + schedule.start_at, + &schedule.recurrence, + now, + ); + } + let pr = schedule.project_root.clone(); + let scope = schedule.scope.clone(); + let def = schedule; + if let Err(e) = tokio::task::spawn_blocking(move || { + let dir = if scope == "global" { + paths::global_schedules_dir()? + } else { + paths::schedules_dir(Path::new(&pr)) + }; + pu_core::schedule_def::save_schedule_def(&dir, &def) + }) + .await + .unwrap_or_else(|e| Err(std::io::Error::other(e))) + { + tracing::warn!(error = %e, "failed to advance schedule"); + } + } +} diff --git a/crates/pu-engine/src/engine/session_repair.rs b/crates/pu-engine/src/engine/session_repair.rs new file mode 100644 index 0000000..c4d8c64 --- /dev/null +++ b/crates/pu-engine/src/engine/session_repair.rs @@ -0,0 +1,232 @@ +use std::collections::HashSet; +use std::io::BufRead; +use std::os::fd::OwnedFd; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use crate::pty_manager::NativePtyHost; + +use super::Engine; + +impl Engine { + /// Scan Claude Code's session directory for the latest continuation of a session. + /// Claude Code stores sessions at `~/.claude/projects/{escaped-cwd}/{uuid}.jsonl`. + /// Resolve the sessions directory for a given working directory. + pub(super) fn sessions_dir_for(cwd: &str) -> Option { + let home = std::env::var("HOME").ok()?; + let escaped: String = cwd + .chars() + .map(|c| { + if c.is_ascii_alphanumeric() || c == '-' { + c + } else { + '-' + } + }) + .collect(); + Some( + PathBuf::from(&home) + .join(".claude") + .join("projects") + .join(&escaped), + ) + } + + /// Repair corrupted Claude Code session JSONL files for the given session. + /// + /// Fixes three known corruption patterns (claude-code#24304): + /// 1. Snapshot `messageId` collisions — `file-history-snapshot` entries sharing UUIDs with real messages + /// 2. Broken `parentUuid` references — entries pointing to non-existent UUIDs + /// 3. Disconnected compaction roots — multiple `parentUuid: null` entries splitting the conversation + pub(super) fn repair_session_files(cwd: &str, session_id: &str) { + let Some(sessions_dir) = Self::sessions_dir_for(cwd) else { + return; + }; + if !sessions_dir.is_dir() { + return; + } + + // Repair the original session file + let original = sessions_dir.join(format!("{session_id}.jsonl")); + if original.is_file() { + repair_session_file(&original); + } + + // Repair continuation files that chain back to the original session + let Ok(entries) = std::fs::read_dir(&sessions_dir) else { + return; + }; + for entry in entries.flatten() { + let path = entry.path(); + if path.extension().and_then(|e| e.to_str()) != Some("jsonl") { + continue; + } + // Skip the original — already repaired + if path.file_stem().and_then(|s| s.to_str()) == Some(session_id) { + continue; + } + // Only repair continuation files (first line has non-null parentUuid) + let Ok(file) = std::fs::File::open(&path) else { + continue; + }; + let mut reader = std::io::BufReader::new(file); + let mut first_line = String::new(); + if BufRead::read_line(&mut reader, &mut first_line).is_err() { + continue; + } + let Ok(value) = serde_json::from_str::(&first_line) else { + continue; + }; + if value.get("parentUuid").and_then(|v| v.as_str()).is_some() { + repair_session_file(&path); + } + } + } +} + +/// Inject prompt text into a PTY and submit with Enter via chunked typing. +/// Returns `true` on success. +pub(super) async fn inject_initial_prompt( + pty_host: &NativePtyHost, + master_fd: &Arc, + agent_id: &str, + prompt: &[u8], +) -> bool { + if prompt.is_empty() { + return true; + } + if let Err(e) = pty_host.write_chunked_submit(master_fd, prompt).await { + tracing::warn!("failed to inject initial prompt for {}: {}", agent_id, e); + return false; + } + true +} + +/// Repair a single Claude Code session JSONL file. +/// +/// Returns `true` if any repairs were made (a `.bak` backup is written). +pub(super) fn repair_session_file(path: &Path) -> bool { + let Ok(content) = std::fs::read_to_string(path) else { + return false; + }; + + let mut lines: Vec = Vec::new(); + for raw_line in content.lines() { + if raw_line.trim().is_empty() { + continue; + } + match serde_json::from_str::(raw_line) { + Ok(v) => lines.push(v), + Err(_) => { + // Preserve unparseable lines as-is by wrapping in a raw marker + lines.push(serde_json::json!({"__raw": raw_line})); + } + } + } + + if lines.is_empty() { + return false; + } + + // Collect all "uuid" values into a set + let mut uuid_set: HashSet = HashSet::new(); + for entry in &lines { + if let Some(uuid) = entry.get("uuid").and_then(|v| v.as_str()) { + uuid_set.insert(uuid.to_string()); + } + } + + let mut modified = false; + + // Fix 1: Snapshot messageId collisions + // file-history-snapshot entries sometimes reuse a messageId that collides with + // a real message uuid. Nullify the messageId to prevent confusion. + for entry in &mut lines { + if entry.get("__raw").is_some() { + continue; + } + let is_snapshot = + entry.get("type").and_then(|v| v.as_str()) == Some("file-history-snapshot"); + if !is_snapshot { + continue; + } + if let Some(mid) = entry + .get("messageId") + .and_then(|v| v.as_str()) + .map(String::from) + { + if uuid_set.contains(&mid) { + entry["messageId"] = serde_json::Value::Null; + modified = true; + } + } + } + + // Fix 2: Broken parentUuid references — point to nearest preceding entry's uuid + // Fix 3: Disconnected roots — if >1 entry has parentUuid: null, stitch extras + let mut null_parent_count = 0; + let mut last_uuid: Option = None; + + for entry in &mut lines { + if entry.get("__raw").is_some() { + continue; + } + + let has_parent_uuid_field = entry.get("parentUuid").is_some(); + let parent_uuid_value = entry + .get("parentUuid") + .and_then(|v| v.as_str()) + .map(String::from); + + if has_parent_uuid_field { + match &parent_uuid_value { + Some(pu) if !uuid_set.contains(pu) => { + // Broken reference — point to nearest preceding uuid + if let Some(ref prev) = last_uuid { + entry["parentUuid"] = serde_json::Value::String(prev.clone()); + modified = true; + } + } + None => { + // parentUuid is null — this is a root + null_parent_count += 1; + if null_parent_count > 1 { + // Stitch disconnected root to nearest preceding uuid + if let Some(ref prev) = last_uuid { + entry["parentUuid"] = serde_json::Value::String(prev.clone()); + modified = true; + } + } + } + _ => {} + } + } + + // Track the most recent uuid for stitching + if let Some(uuid) = entry.get("uuid").and_then(|v| v.as_str()) { + last_uuid = Some(uuid.to_string()); + } + } + + if !modified { + return false; + } + + // Write backup + let backup = path.with_extension("jsonl.bak"); + let _ = std::fs::write(&backup, &content); + + // Write repaired file + let mut output = String::new(); + for entry in &lines { + if let Some(raw) = entry.get("__raw").and_then(|v| v.as_str()) { + output.push_str(raw); + } else { + output.push_str(&serde_json::to_string(entry).unwrap_or_default()); + } + output.push('\n'); + } + let _ = std::fs::write(path, &output); + + true +} diff --git a/crates/pu-engine/src/engine/spawn.rs b/crates/pu-engine/src/engine/spawn.rs new file mode 100644 index 0000000..f29f0f1 --- /dev/null +++ b/crates/pu-engine/src/engine/spawn.rs @@ -0,0 +1,519 @@ +use std::path::Path; +use std::time::Duration; + +use indexmap::IndexMap; +use pu_core::config; +use pu_core::manifest; +use pu_core::paths; +use pu_core::protocol::Response; +use pu_core::types::{AgentEntry, AgentStatus, WorktreeEntry, WorktreeStatus}; + +use super::SpawnParams; +use super::session_repair::inject_initial_prompt; +use crate::git; +use crate::pty_manager::{NativePtyHost, SpawnConfig}; + +use super::Engine; + +impl Engine { + pub(super) async fn handle_spawn_shell(&self, cwd: &str) -> Response { + let shell = std::env::var("SHELL").unwrap_or_else(|_| "/bin/zsh".to_string()); + let agent_id = pu_core::id::agent_id(); + + let env = self.agent_env().await; + let spawn_config = SpawnConfig { + command: shell, + args: vec!["-l".to_string()], + cwd: cwd.to_string(), + env, + env_remove: vec![], + cols: 120, + rows: 40, + }; + + let handle = match self.pty_host.spawn(spawn_config).await { + Ok(h) => h, + Err(e) => { + return Response::Error { + code: "SPAWN_FAILED".into(), + message: format!("failed to spawn shell: {e}"), + }; + } + }; + + // Start exit monitor (cleans up session map when shell exits) + let exit_rx = handle.exit_rx.clone(); + let sessions = self.sessions.clone(); + let aid = agent_id.clone(); + tokio::spawn(async move { + let mut rx = exit_rx; + while rx.changed().await.is_ok() { + if rx.borrow().is_some() { + break; + } + } + sessions.lock().await.remove(&aid); + }); + + self.sessions.lock().await.insert(agent_id.clone(), handle); + + Response::SpawnResult { + worktree_id: None, + agent_id, + status: AgentStatus::Streaming, + } + } + + pub(super) async fn handle_spawn(&self, params: SpawnParams) -> Response { + let SpawnParams { + project_root, + prompt, + agent_type, + name, + base, + root, + worktree, + terminal_command, + no_auto, + extra_args, + plan_mode, + no_trigger, + trigger: trigger_param, + } = params; + let root_path = Path::new(&project_root); + + // Ensure initialized + if !paths::manifest_path(root_path).exists() { + return Response::Error { + code: "NOT_INITIALIZED".into(), + message: "not initialized — run `pu init` first".into(), + }; + } + + // Resolve agent config (strict: surface YAML parse errors) + let cfg = match config::load_config_strict(root_path) { + Ok(c) => c, + Err(e) => { + return Response::Error { + code: "CONFIG_ERROR".into(), + message: format!("failed to load config: {e}"), + }; + } + }; + let agent_cfg = match config::resolve_agent(&cfg, &agent_type) { + Some(c) => c.clone(), + None => { + return Response::Error { + code: "INVALID_ARGUMENT".into(), + message: format!("unknown agent type: {agent_type}"), + }; + } + }; + + let agent_id = pu_core::id::agent_id(); + let creating_new_worktree = !root && worktree.is_none(); + let agent_name = if creating_new_worktree { + // Worktree spawns require a user-provided name (becomes the branch slug) + let Some(raw) = name else { + return Response::Error { + code: "INVALID_ARGUMENT".into(), + message: "worktree spawn requires a name".into(), + }; + }; + let normalized = pu_core::id::normalize_worktree_name(&raw); + if normalized.is_empty() { + return Response::Error { + code: "INVALID_ARGUMENT".into(), + message: "worktree spawn requires a name".into(), + }; + } + normalized + } else { + // Root agents and existing-worktree agents get auto-generated names + name.unwrap_or_else(pu_core::id::root_agent_name) + }; + let base_branch = match base { + Some(b) => b, + None => git::resolve_base_ref(root_path, "HEAD") + .await + .unwrap_or_else(|_| "HEAD".into()), + }; + + // Normalize empty command to None + let terminal_command = terminal_command.filter(|c| !c.is_empty()); + + // Plan mode requires a prompt-driven agent that understands EnterPlanMode. + // Reject early for terminal agents or terminal_command spawns where the + // prefix would be meaningless or actively harmful. + if plan_mode + && (prompt.is_empty() || terminal_command.is_some() || agent_type == "terminal") + { + return Response::Error { + code: "INVALID_ARGUMENT".into(), + message: "plan mode requires a prompt-driven non-terminal agent".into(), + }; + } + + // When plan_mode is active, prefix the prompt with instructions to enter plan mode. + // This keeps bypass permissions as the base while guiding the agent into plan mode + // via its own tool (EnterPlanMode) rather than conflicting CLI flags. + let prompt = if plan_mode { + format!( + "[PLAN MODE] You MUST call the EnterPlanMode tool immediately before doing anything else. \ + Do not read files, do not explore — call EnterPlanMode first. \ + Once in plan mode, research and plan before making changes.\n\n{prompt}" + ) + } else { + prompt.to_string() + }; + let prompt = &prompt; + + // When a terminal command is set, it becomes the PTY process directly + let (command, args, session_id, inject_prompt_via_stdin) = if let Some(ref cmd) = + terminal_command + { + let has_metacharacters = cmd.contains('|') + || cmd.contains("&&") + || cmd.contains(';') + || cmd.contains('>') + || cmd.contains('<') + || cmd.contains('$'); + + let (cmd_bin, cmd_args) = if has_metacharacters { + let shell = std::env::var("SHELL").unwrap_or_else(|_| "/bin/sh".to_string()); + (shell, vec!["-c".to_string(), cmd.clone()]) + } else { + let parts: Vec<&str> = cmd.split_whitespace().collect(); + if parts.is_empty() { + // Shouldn't happen after filter, but handle gracefully + let shell = std::env::var("SHELL").unwrap_or_else(|_| "/bin/sh".to_string()); + (shell, vec![]) + } else { + ( + parts[0].to_string(), + parts[1..].iter().map(ToString::to_string).collect(), + ) + } + }; + (cmd_bin, cmd_args, None, false) + } else { + // Standard agent flow + let (command, cmd_args) = match Self::parse_agent_command(&agent_cfg, &agent_type) { + Ok(v) => v, + Err(e) => return e, + }; + let mut args = cmd_args; + + // Add agent-type-specific launch args from config (or defaults). + // --no-auto skips only the built-in defaults; explicit user-configured + // launchArgs are always applied. + let launch_args = if no_auto && agent_cfg.launch_args.is_none() { + Vec::new() + } else { + pu_core::types::resolved_launch_args(&agent_type, agent_cfg.launch_args.as_deref()) + }; + if launch_args.is_empty() && agent_cfg.launch_args.is_some() { + tracing::info!(agent_type, "auto-mode disabled via config (launchArgs: [])"); + } + for arg in launch_args.into_iter().rev() { + if !args.iter().any(|a| a == &arg) { + args.insert(0, arg); + } + } + + // Append extra args from --agent-args (always applied, even with --no-auto) + args.extend(extra_args.iter().cloned()); + + // Generate session ID for claude agents (enables resume via --resume) + let session_id = if agent_type == "claude" { + let id = pu_core::id::session_id(); + args.push("--session-id".into()); + args.push(id.clone()); + Some(id) + } else { + None + }; + + // Claude prompt via argv can stall first render in some terminals; keep stdin injection + // for Claude (and terminal agent). Codex/OpenCode accept startup prompts via CLI args. + let inject_prompt_via_stdin = + Self::should_inject_prompt_via_stdin(&agent_type, agent_cfg.interactive, prompt); + if !inject_prompt_via_stdin && !prompt.is_empty() { + let prompt_flag = + Self::resolved_prompt_flag(&agent_type, agent_cfg.prompt_flag.as_deref()); + if let Some(flag) = prompt_flag { + args.push(flag); + args.push(prompt.to_string()); + } else { + // Default prompt style is positional (for example codex [PROMPT]). + args.push(prompt.to_string()); + } + } + + (command, args, session_id, inject_prompt_via_stdin) + }; + + // Determine working directory + let (cwd, worktree_id) = if root || worktree.is_some() { + // Spawn in project root or existing worktree + let wt_id = worktree.clone(); + let dir = if let Some(ref wt) = worktree { + paths::worktree_path(root_path, wt) + .to_string_lossy() + .to_string() + } else { + project_root.to_string() + }; + (dir, wt_id) + } else { + // Create new worktree + let wt_id = pu_core::id::worktree_id(); + let wt_path = paths::worktree_path(root_path, &wt_id); + let branch = format!("pu/{agent_name}"); + + if let Err(e) = git::create_worktree(root_path, &wt_path, &branch, &base_branch).await { + return Response::Error { + code: "SPAWN_FAILED".into(), + message: format!("failed to create worktree: {e}"), + }; + } + + // Install git hooks for trigger gate enforcement + if let Err(e) = git::install_hooks(&wt_path, root_path).await { + tracing::warn!("failed to install git hooks in worktree: {e}"); + } + + // Copy env files (e.g., .env, .env.local) into new worktree + for env_file in &cfg.env_files { + let src = root_path.join(env_file); + let dst = wt_path.join(env_file); + match tokio::fs::copy(&src, &dst).await { + Ok(_) => {} + Err(e) if e.kind() == std::io::ErrorKind::NotFound && !src.exists() => {} // source doesn't exist, skip + Err(e) => tracing::warn!("failed to copy {env_file} to worktree: {e}"), + } + } + + (wt_path.to_string_lossy().to_string(), Some(wt_id)) + }; + + // Spawn PTY process + let mut env = self.agent_env().await; + env.push(("PU_AGENT_ID".into(), agent_id.clone())); + let spawn_config = SpawnConfig { + command, + args, + cwd: cwd.clone(), + env, + env_remove: vec!["CLAUDECODE".into()], + cols: 120, + rows: 40, + }; + + // Track whether we created a new worktree (for rollback on failure) + let created_worktree = !root && worktree.is_none() && worktree_id.is_some(); + let rollback_branch = if created_worktree { + Some(format!("pu/{agent_name}")) + } else { + None + }; + + let handle = match self.pty_host.spawn(spawn_config).await { + Ok(h) => h, + Err(e) => { + if created_worktree { + self.rollback_worktree( + root_path, + worktree_id.as_deref(), + rollback_branch.as_deref(), + ) + .await; + } + return Response::Error { + code: "SPAWN_FAILED".into(), + message: format!("failed to spawn process: {e}"), + }; + } + }; + + if inject_prompt_via_stdin { + let prompt_bytes = prompt.as_bytes().to_vec(); + let pending = self.pending_initial_inputs.clone(); + pending + .lock() + .await + .insert(agent_id.clone(), prompt_bytes.clone()); + + let output_buffer = handle.output_buffer.clone(); + let master_fd = handle.master_fd(); + let mut exit_rx = handle.exit_rx.clone(); + let pty_host = NativePtyHost::new(); + let aid = agent_id.clone(); + + tokio::spawn(async move { + let mut watcher = output_buffer.subscribe(); + let timeout = tokio::time::sleep(Duration::from_millis(1800)); + tokio::pin!(timeout); + + loop { + tokio::select! { + _ = &mut timeout => { + // Fallback or quiet-period expired — inject now + break; + } + Ok(()) = exit_rx.changed() => { + // Process exited before we could inject — abort + pending.lock().await.remove(&aid); + tracing::debug!(agent_id = %aid, "prompt injection aborted: process exited"); + return; + } + Ok(()) = watcher.changed() => { + // Got output — reset to a 450ms quiet period + timeout + .as_mut() + .reset(tokio::time::Instant::now() + Duration::from_millis(450)); + } + } + } + + // Inject the prompt + if inject_initial_prompt(&pty_host, &master_fd, &aid, &prompt_bytes).await { + tracing::debug!(agent_id = %aid, "prompt injected at spawn time"); + } else { + tracing::warn!(agent_id = %aid, "failed to inject prompt at spawn time"); + } + pending.lock().await.remove(&aid); + }); + } + + let pid = handle.pid; + + // Store handle in session map BEFORE writing manifest. + // ManifestWatcher in Swift fires on manifest write and immediately + // tries to attach — the session must already be in the map. + self.sessions.lock().await.insert(agent_id.clone(), handle); + + // Bind trigger if explicitly specified via --trigger + let (trigger_name, trigger_total) = if no_trigger { + (None, None) + } else if let Some(ref name) = trigger_param { + let pr = project_root.to_string(); + let name_clone = name.clone(); + let found = tokio::task::spawn_blocking(move || { + let triggers = pu_core::trigger_def::triggers_for_event( + Path::new(&pr), + &pu_core::trigger_def::TriggerEvent::AgentIdle, + ); + triggers + .into_iter() + .find(|t| t.name == name_clone) + .map(|t| { + let len = t.sequence.len() as u32; + (t.name, len) + }) + }) + .await + .unwrap_or(None); + match found { + Some((tname, total)) if total > 0 => (Some(tname), Some(total)), + Some(_) => { + return Response::Error { + code: "INVALID_TRIGGER".into(), + message: format!("trigger '{name}' has empty sequence"), + }; + } + None => { + return Response::Error { + code: "NOT_FOUND".into(), + message: format!("trigger '{name}' not found"), + }; + } + } + } else { + (None, None) + }; + + // Update manifest + let agent_entry = AgentEntry { + id: agent_id.clone(), + name: agent_name.clone(), + agent_type, + status: AgentStatus::Streaming, + prompt: Some(prompt.to_string()), + started_at: chrono::Utc::now(), + completed_at: None, + exit_code: None, + error: None, + pid: Some(pid), + session_id, + suspended_at: None, + suspended: false, + command: terminal_command, + plan_mode, + trigger_seq_index: trigger_name.as_ref().map(|_| 0), + trigger_state: trigger_name + .as_ref() + .map(|_| pu_core::types::TriggerState::Active), + trigger_total, + gate_attempts: trigger_name.as_ref().map(|_| 0), + no_trigger, + trigger_name: trigger_name.clone(), + }; + + let wt_id_for_manifest = worktree_id.clone(); + let agent_id_clone = agent_id.clone(); + let manifest_result = manifest::update_manifest(root_path, move |mut m| { + if let Some(ref wt_id) = wt_id_for_manifest { + // Add or update worktree entry + let wt_entry = m + .worktrees + .entry(wt_id.clone()) + .or_insert_with(|| WorktreeEntry { + id: wt_id.clone(), + name: agent_name.clone(), + path: cwd.clone(), + branch: format!("pu/{agent_name}"), + base_branch: Some(base_branch.clone()), + status: WorktreeStatus::Active, + agents: IndexMap::new(), + created_at: chrono::Utc::now(), + merged_at: None, + }); + wt_entry.agents.insert(agent_id_clone, agent_entry); + } else { + m.agents.insert(agent_id_clone, agent_entry); + } + m + }); + + if let Err(e) = manifest_result { + // Rollback: remove session and kill process + if let Some(handle) = self.sessions.lock().await.remove(&agent_id) { + self.pty_host + .kill(&handle, Duration::from_secs(2)) + .await + .ok(); + } + if created_worktree { + self.rollback_worktree( + root_path, + worktree_id.as_deref(), + rollback_branch.as_deref(), + ) + .await; + } + return Response::Error { + code: "SPAWN_FAILED".into(), + message: format!("failed to update manifest: {e}"), + }; + } + + self.notify_status_change(&project_root).await; + + Response::SpawnResult { + worktree_id, + agent_id, + status: AgentStatus::Streaming, + } + } +} diff --git a/crates/pu-engine/src/engine/status.rs b/crates/pu-engine/src/engine/status.rs new file mode 100644 index 0000000..1ea70d7 --- /dev/null +++ b/crates/pu-engine/src/engine/status.rs @@ -0,0 +1,303 @@ +use std::collections::HashMap; + +use pu_core::protocol::{AgentStatusReport, Response}; +use pu_core::types::{AgentEntry, AgentStatus, WorktreeEntry, WorktreeStatus}; + +use crate::agent_monitor; +use crate::git; +use crate::pty_manager::AgentHandle; + +use super::Engine; + +impl Engine { + pub(super) async fn handle_status( + &self, + project_root: &str, + agent_id: Option<&str>, + ) -> Response { + // On first status call per project, reap agents whose PIDs are dead. + // Fire-and-forget: first status returns immediately, next refresh corrects. + let should_reap = { + let mut reaped = self.reaped_projects.lock().unwrap(); + reaped.insert(project_root.to_string()) + }; // MutexGuard dropped here — before any .await + if should_reap { + let pr = project_root.to_string(); + tokio::spawn(async move { + tokio::task::spawn_blocking(move || Self::reap_stale_agents(&pr)) + .await + .ok(); + }); + } + + if let Some(id) = agent_id { + let m = match self.read_manifest_async(project_root).await { + Ok(m) => m, + Err(e) => return Self::error_response(&e), + }; + match m.find_agent(id) { + Some(loc) => { + let (agent, wt_id) = match loc { + pu_core::types::AgentLocation::Root(a) => (a, None), + pu_core::types::AgentLocation::Worktree { worktree, agent } => { + (agent, Some(worktree.id.clone())) + } + }; + let sessions = self.sessions.lock().await; + Response::AgentStatus(self.build_agent_status_report(agent, &sessions, wt_id)) + } + None => Self::agent_not_found(id), + } + } else { + match self.compute_full_status(project_root).await { + Ok((worktrees, agents)) => Response::StatusReport { worktrees, agents }, + Err(e) => Self::error_response(&e), + } + } + } + + /// Build a status report for a single agent, using live PTY state when available. + pub(super) fn build_agent_status_report( + &self, + agent: &AgentEntry, + sessions: &HashMap, + worktree_id: Option, + ) -> AgentStatusReport { + let (status, exit_code, idle_seconds) = + self.live_agent_status_sync(&agent.id, agent, sessions); + AgentStatusReport { + id: agent.id.clone(), + name: agent.name.clone(), + agent_type: agent.agent_type.clone(), + status, + pid: agent.pid, + exit_code, + idle_seconds, + worktree_id, + started_at: agent.started_at, + session_id: agent.session_id.clone(), + prompt: agent.prompt.clone(), + suspended: agent.suspended, + trigger_seq_index: agent.trigger_seq_index, + trigger_state: agent.trigger_state, + trigger_total: agent.trigger_total, + } + } + + /// Compute live agent status from PTY state. + /// Returns (status, exit_code, idle_seconds). + pub(super) fn live_agent_status_sync( + &self, + id: &str, + agent: &AgentEntry, + sessions: &HashMap, + ) -> (AgentStatus, Option, Option) { + match sessions.get(id) { + Some(handle) => { + let exit_code = *handle.exit_rx.borrow(); + let status = agent_monitor::effective_status(exit_code, &handle.output_buffer); + let idle_seconds = Some(handle.output_buffer.content_idle_seconds()); + (status, exit_code, idle_seconds) + } + // No live session — use manifest (agent already exited/killed/etc.) + None => (agent.status, agent.exit_code, None), + } + } + + fn agent_pulse_entry( + &self, + agent: &AgentEntry, + sessions: &HashMap, + now: chrono::DateTime, + ) -> pu_core::protocol::AgentPulseEntry { + let (status, exit_code, idle_seconds) = + self.live_agent_status_sync(&agent.id, agent, sessions); + let runtime = (now - agent.started_at).num_seconds(); + let snippet = agent.prompt.as_ref().map(|p| { + let trimmed = p.trim(); + let truncated: String = trimmed.chars().take(77).collect(); + if truncated.len() < trimmed.len() { + format!("{truncated}...") + } else { + truncated + } + }); + pu_core::protocol::AgentPulseEntry { + id: agent.id.clone(), + name: agent.name.clone(), + agent_type: agent.agent_type.clone(), + status, + exit_code, + runtime_seconds: runtime, + idle_seconds, + prompt_snippet: snippet, + } + } + + pub(super) async fn handle_pulse(&self, project_root: &str) -> Response { + let m = match self.read_manifest_async(project_root).await { + Ok(m) => m, + Err(e) => return Self::error_response(&e), + }; + + let sessions = self.sessions.lock().await; + let now = chrono::Utc::now(); + + // Build root-level agents + let root_agents: Vec = m + .agents + .values() + .map(|a| self.agent_pulse_entry(a, &sessions, now)) + .collect(); + + // Build worktree entries — collect all agent data in one lock acquisition + let active_worktrees: Vec<_> = m + .worktrees + .values() + .filter(|wt| wt.status == WorktreeStatus::Active) + .cloned() + .collect(); + + let wt_agents: Vec> = active_worktrees + .iter() + .map(|wt| { + wt.agents + .values() + .map(|a| self.agent_pulse_entry(a, &sessions, now)) + .collect() + }) + .collect(); + + // Drop sessions lock before shelling out to git + drop(sessions); + + let mut worktrees = Vec::new(); + for (wt, agents) in active_worktrees.iter().zip(wt_agents) { + let elapsed = (now - wt.created_at).num_seconds(); + + // Get git diff stats + let wt_path = std::path::PathBuf::from(&wt.path); + let (files_changed, insertions, deletions, diff_error) = if wt_path.exists() { + let base = wt.base_branch.as_deref(); + match git::diff_worktree(&wt_path, base, true).await { + Ok(output) => ( + output.files_changed, + output.insertions, + output.deletions, + None, + ), + Err(e) => (0, 0, 0, Some(format!("{e}"))), + } + } else { + (0, 0, 0, None) + }; + + worktrees.push(pu_core::protocol::WorktreePulseEntry { + worktree_id: wt.id.clone(), + worktree_name: wt.name.clone(), + branch: wt.branch.clone(), + elapsed_seconds: elapsed, + agents, + files_changed, + insertions, + deletions, + diff_error, + }); + } + + Response::PulseReport { + worktrees, + root_agents, + } + } + + pub(super) async fn handle_diff( + &self, + project_root: &str, + worktree_id: Option<&str>, + stat: bool, + ) -> Response { + let m = match self.read_manifest_async(project_root).await { + Ok(m) => m, + Err(e) => return Self::error_response(&e), + }; + + let worktrees: Vec = if let Some(wt_id) = worktree_id { + match m.worktrees.get(wt_id) { + Some(wt) => vec![wt.clone()], + None => { + return Response::Error { + code: "NOT_FOUND".into(), + message: format!("worktree '{wt_id}' not found"), + }; + } + } + } else { + m.worktrees + .into_values() + .filter(|wt| wt.status == WorktreeStatus::Active) + .collect() + }; + + if worktrees.is_empty() { + return Response::DiffResult { diffs: vec![] }; + } + + let is_targeted = worktree_id.is_some(); + let mut diffs = Vec::new(); + for wt in &worktrees { + let wt_path = std::path::PathBuf::from(&wt.path); + if !wt_path.exists() { + if is_targeted { + // Targeted query: report the error so callers can distinguish + // a deleted worktree from a clean one. + diffs.push(pu_core::protocol::WorktreeDiffEntry { + worktree_id: wt.id.clone(), + worktree_name: wt.name.clone(), + branch: wt.branch.clone(), + base_branch: wt.base_branch.clone(), + diff_output: String::new(), + files_changed: 0, + insertions: 0, + deletions: 0, + error: Some(format!("worktree directory not found: {}", wt.path)), + }); + } + // Bulk query: skip missing dirs (best-effort) + continue; + } + let base = wt.base_branch.as_deref(); + match git::diff_worktree(&wt_path, base, stat).await { + Ok(output) => { + diffs.push(pu_core::protocol::WorktreeDiffEntry { + worktree_id: wt.id.clone(), + worktree_name: wt.name.clone(), + branch: wt.branch.clone(), + base_branch: wt.base_branch.clone(), + diff_output: output.diff, + files_changed: output.files_changed, + insertions: output.insertions, + deletions: output.deletions, + error: None, + }); + } + Err(e) => { + tracing::warn!("failed to diff worktree {}: {}", wt.id, e); + diffs.push(pu_core::protocol::WorktreeDiffEntry { + worktree_id: wt.id.clone(), + worktree_name: wt.name.clone(), + branch: wt.branch.clone(), + base_branch: wt.base_branch.clone(), + diff_output: String::new(), + files_changed: 0, + insertions: 0, + deletions: 0, + error: Some(format!("{e}")), + }); + } + } + } + + Response::DiffResult { diffs } + } +} diff --git a/crates/pu-engine/src/engine/subscriptions.rs b/crates/pu-engine/src/engine/subscriptions.rs new file mode 100644 index 0000000..aa8567b --- /dev/null +++ b/crates/pu-engine/src/engine/subscriptions.rs @@ -0,0 +1,128 @@ +use pu_core::error::PuError; +use pu_core::protocol::{AgentStatusReport, GridCommand, Response}; +use pu_core::types::WorktreeEntry; + +use super::Engine; + +impl Engine { + // --- Grid --- + + pub(super) async fn handle_subscribe_grid(&self, project_root: &str) -> Response { + self.ensure_grid_channel(project_root).await; + Response::GridSubscribed + } + + pub async fn handle_grid_command(&self, project_root: &str, command: GridCommand) -> Response { + // For GetLayout, read the grid-layout.json directly + if matches!(command, GridCommand::GetLayout) { + let root = project_root.to_string(); + return match tokio::task::spawn_blocking(move || { + let path = + pu_core::paths::pu_dir(std::path::Path::new(&root)).join("grid-layout.json"); + std::fs::read_to_string(path) + }) + .await + { + Ok(Ok(contents)) => match serde_json::from_str(&contents) { + Ok(layout) => Response::GridLayout { layout }, + Err(e) => Response::Error { + code: "PARSE_ERROR".into(), + message: format!("invalid grid layout JSON: {e}"), + }, + }, + _ => Response::GridLayout { + layout: serde_json::Value::Null, + }, + }; + } + + // Broadcast mutation commands to subscribers + let channels = self.grid_channels.lock().await; + if let Some(tx) = channels.get(project_root) { + let _ = tx.send(command.clone()); + } + Response::Ok + } + + async fn ensure_grid_channel(&self, project_root: &str) { + let mut channels = self.grid_channels.lock().await; + channels + .entry(project_root.to_string()) + .or_insert_with(|| tokio::sync::broadcast::channel(64).0); + } + + /// Get a grid broadcast receiver for a project (used by IPC server for streaming). + pub async fn subscribe_grid( + &self, + project_root: &str, + ) -> tokio::sync::broadcast::Receiver { + let mut channels = self.grid_channels.lock().await; + let tx = channels + .entry(project_root.to_string()) + .or_insert_with(|| tokio::sync::broadcast::channel(64).0); + tx.subscribe() + } + + // --- Status Push --- + + pub(super) async fn handle_subscribe_status(&self, project_root: &str) -> Response { + self.ensure_status_channel(project_root).await; + Response::StatusSubscribed + } + + async fn ensure_status_channel(&self, project_root: &str) { + let mut channels = self.status_channels.lock().await; + channels + .entry(project_root.to_string()) + .or_insert_with(|| tokio::sync::broadcast::channel(64).0); + } + + /// Get a status broadcast receiver for a project (used by IPC server for streaming). + pub async fn subscribe_status( + &self, + project_root: &str, + ) -> tokio::sync::broadcast::Receiver<()> { + let mut channels = self.status_channels.lock().await; + let tx = channels + .entry(project_root.to_string()) + .or_insert_with(|| tokio::sync::broadcast::channel(64).0); + tx.subscribe() + } + + /// Notify all status subscribers that state has changed. + pub(super) async fn notify_status_change(&self, project_root: &str) { + let channels = self.status_channels.lock().await; + if let Some(tx) = channels.get(project_root) { + let _ = tx.send(()); + } + } + + /// Compute a full status report for a project (used by status push and handle_status). + pub async fn compute_full_status( + &self, + project_root: &str, + ) -> Result<(Vec, Vec), PuError> { + let m = self.read_manifest_async(project_root).await?; + let sessions = self.sessions.lock().await; + let mut agents: Vec = m + .agents + .values() + .map(|a| self.build_agent_status_report(a, &sessions, None)) + .collect(); + agents.sort_by_key(|a| a.started_at); + let worktrees: Vec = m + .worktrees + .into_values() + .map(|mut wt| { + for agent in wt.agents.values_mut() { + let (status, exit_code, _idle) = + self.live_agent_status_sync(&agent.id, agent, &sessions); + agent.status = status; + agent.exit_code = exit_code; + } + wt + }) + .collect(); + Ok((worktrees, agents)) + } +} diff --git a/crates/pu-engine/src/engine/trigger_executor.rs b/crates/pu-engine/src/engine/trigger_executor.rs new file mode 100644 index 0000000..a982e90 --- /dev/null +++ b/crates/pu-engine/src/engine/trigger_executor.rs @@ -0,0 +1,287 @@ +use std::path::Path; + +use pu_core::manifest; +use pu_core::types::AgentStatus; + +use super::Engine; + +impl Engine { + /// Check all agents with active trigger sequences and advance them when idle. + pub(super) async fn evaluate_idle_triggers(&self, project_root: &str) { + let manifest = match self.read_manifest_async(project_root).await { + Ok(m) => m, + Err(_) => return, + }; + + // Collect candidate agents with their trigger name, seq index, and worktree path. + // Briefly hold the sessions lock to check live status, then release before I/O. + let candidates: Vec<(String, String, u32, Option)> = { + let sessions = self.sessions.lock().await; + let mut result = Vec::new(); + for agent in manifest.all_agents() { + if agent.trigger_state != Some(pu_core::types::TriggerState::Active) { + continue; + } + let trigger_name = match &agent.trigger_name { + Some(name) => name.clone(), + None => continue, // No bound trigger, skip + }; + let seq_index = agent.trigger_seq_index.unwrap_or(0); + let (status, _, _) = self.live_agent_status_sync(&agent.id, agent, &sessions); + if status != AgentStatus::Waiting { + continue; + } + let wt_path = match manifest.find_agent(&agent.id) { + Some(pu_core::types::AgentLocation::Worktree { worktree, .. }) => { + Some(std::path::PathBuf::from(&worktree.path)) + } + _ => None, + }; + result.push((agent.id.clone(), trigger_name, seq_index, wt_path)); + } + result + // sessions lock dropped here + }; + + if candidates.is_empty() { + return; + } + + // Load trigger defs once for this project + let pr = project_root.to_string(); + let idle_triggers = match tokio::task::spawn_blocking(move || { + pu_core::trigger_def::triggers_for_event( + Path::new(&pr), + &pu_core::trigger_def::TriggerEvent::AgentIdle, + ) + }) + .await + { + Ok(t) => t, + Err(_) => return, + }; + + // Index triggers by name for O(1) lookup + let trigger_map: std::collections::HashMap<&str, &pu_core::trigger_def::TriggerDef> = + idle_triggers.iter().map(|t| (t.name.as_str(), t)).collect(); + + for (agent_id, trigger_name, seq_index, wt_path) in &candidates { + let trigger = match trigger_map.get(trigger_name.as_str()) { + Some(t) => t, + None => { + // Trigger was removed since spawn — mark failed + self.update_trigger_state( + project_root, + agent_id, + pu_core::types::TriggerState::Failed, + None, + None, + ) + .await; + continue; + } + }; + + let sequence = &trigger.sequence; + let seq_index = *seq_index as usize; + if seq_index >= sequence.len() { + self.update_trigger_state( + project_root, + agent_id, + pu_core::types::TriggerState::Completed, + None, + None, + ) + .await; + continue; + } + + let action = &sequence[seq_index]; + let cwd = wt_path + .as_deref() + .unwrap_or_else(|| Path::new(project_root)); + + // If action has a gate, evaluate it first (no lock held) + if let Some(ref gate) = action.gate { + let resolved_run = + pu_core::trigger_def::substitute_variables(&gate.run, &trigger.variables); + + // Mark as Gating while the command runs + self.update_trigger_state( + project_root, + agent_id, + pu_core::types::TriggerState::Gating, + None, + None, + ) + .await; + + match crate::gate::run_gate_command(&resolved_run, cwd).await { + Ok((exit_code, stdout, stderr)) => { + let expect_exit = gate.expect_exit.unwrap_or(0); + if exit_code != expect_exit { + let max_retries = action + .max_retries + .unwrap_or(crate::gate::DEFAULT_GATE_MAX_RETRIES); + let manifest = self.read_manifest_async(project_root).await; + let attempts = manifest + .ok() + .and_then(|m| { + m.find_agent(agent_id).map(|loc| match loc { + pu_core::types::AgentLocation::Root(a) => a.gate_attempts, + pu_core::types::AgentLocation::Worktree { + agent, .. + } => agent.gate_attempts, + }) + }) + .flatten() + .unwrap_or(0); + + if attempts < max_retries { + let failure_msg = format!( + "\n\nGate '{resolved_run}' failed (exit {exit_code}, expected {expect_exit}):\n{stdout}{stderr}\nPlease fix the issues and try again.\n" + ); + if let Err(e) = self.inject_text(agent_id, &failure_msg).await { + tracing::warn!(agent_id, "failed to inject gate failure: {e}"); + } + self.update_trigger_state( + project_root, + agent_id, + pu_core::types::TriggerState::Active, + None, + Some(attempts + 1), + ) + .await; + } else { + self.update_trigger_state( + project_root, + agent_id, + pu_core::types::TriggerState::Failed, + None, + None, + ) + .await; + } + continue; + } + } + Err(e) => { + tracing::warn!(agent_id, gate = %resolved_run, "gate command error: {e}"); + self.update_trigger_state( + project_root, + agent_id, + pu_core::types::TriggerState::Failed, + None, + None, + ) + .await; + continue; + } + } + } + + // Inject text if present — only advance on success + if let Some(ref inject_text) = action.inject { + let resolved = + pu_core::trigger_def::substitute_variables(inject_text, &trigger.variables); + match self.inject_text(agent_id, &resolved).await { + Ok(true) => {} // success, proceed to advance + Ok(false) => { + tracing::warn!(agent_id, "inject_text: session not found, marking failed"); + self.update_trigger_state( + project_root, + agent_id, + pu_core::types::TriggerState::Failed, + None, + None, + ) + .await; + continue; + } + Err(e) => { + tracing::warn!(agent_id, "inject_text failed: {e}, marking failed"); + self.update_trigger_state( + project_root, + agent_id, + pu_core::types::TriggerState::Failed, + None, + None, + ) + .await; + continue; + } + } + } + + // Advance sequence index + let new_index = seq_index as u32 + 1; + let new_state = if new_index >= sequence.len() as u32 { + pu_core::types::TriggerState::Completed + } else { + pu_core::types::TriggerState::Active + }; + self.update_trigger_state(project_root, agent_id, new_state, Some(new_index), Some(0)) + .await; + } + } + + /// Inject text into an agent's PTY using chunked typing + Enter submission. + /// Returns `Ok(true)` on success, `Ok(false)` if the session was not found. + pub(super) async fn inject_text( + &self, + agent_id: &str, + text: &str, + ) -> Result { + let fd = { + let sessions = self.sessions.lock().await; + sessions.get(agent_id).map(|handle| handle.master_fd()) + }; + match fd { + Some(fd) => { + self.pty_host + .write_chunked_submit(&fd, text.as_bytes()) + .await?; + Ok(true) + } + None => Ok(false), + } + } + + pub(super) async fn update_trigger_state( + &self, + project_root: &str, + agent_id: &str, + state: pu_core::types::TriggerState, + seq_index: Option, + gate_attempts: Option, + ) { + let agent_id = agent_id.to_string(); + let pr = project_root.to_string(); + let result = tokio::task::spawn_blocking(move || { + manifest::update_manifest(Path::new(&pr), move |mut m| { + if let Some(agent) = m.find_agent_mut(&agent_id) { + agent.trigger_state = Some(state); + if let Some(idx) = seq_index { + agent.trigger_seq_index = Some(idx); + } + if let Some(attempts) = gate_attempts { + agent.gate_attempts = Some(attempts); + } + } + m + }) + }) + .await; + match result { + Ok(Ok(_)) => { + self.notify_status_change(project_root).await; + } + Ok(Err(e)) => { + tracing::warn!("failed to update trigger state in manifest: {e}"); + } + Err(e) => { + tracing::warn!("trigger state update task panicked: {e}"); + } + } + } +} diff --git a/crates/pu-engine/src/engine/worktree_ops.rs b/crates/pu-engine/src/engine/worktree_ops.rs new file mode 100644 index 0000000..7132f33 --- /dev/null +++ b/crates/pu-engine/src/engine/worktree_ops.rs @@ -0,0 +1,200 @@ +use std::path::Path; + +use indexmap::IndexMap; +use pu_core::config; +use pu_core::manifest; +use pu_core::paths; +use pu_core::protocol::Response; +use pu_core::types::{WorktreeEntry, WorktreeStatus}; + +use crate::git; + +use super::Engine; + +impl Engine { + pub(super) async fn handle_create_worktree( + &self, + project_root: &str, + name: Option, + base: Option, + ) -> Response { + let root_path = Path::new(project_root); + + // Ensure initialized + if !paths::manifest_path(root_path).exists() { + return Response::Error { + code: "NOT_INITIALIZED".into(), + message: "not initialized — run `pu init` first".into(), + }; + } + + // Load config for env_files + let cfg = match config::load_config_strict(root_path) { + Ok(c) => c, + Err(e) => { + return Response::Error { + code: "CONFIG_ERROR".into(), + message: format!("failed to load config: {e}"), + }; + } + }; + + // Resolve name + let Some(raw) = name else { + return Response::Error { + code: "INVALID_ARGUMENT".into(), + message: "worktree creation requires a name".into(), + }; + }; + let worktree_name = pu_core::id::normalize_worktree_name(&raw); + if worktree_name.is_empty() { + return Response::Error { + code: "INVALID_ARGUMENT".into(), + message: "worktree creation requires a name".into(), + }; + } + + let base_branch = match base { + Some(b) => b, + None => git::resolve_base_ref(root_path, "HEAD") + .await + .unwrap_or_else(|_| "HEAD".into()), + }; + let wt_id = pu_core::id::worktree_id(); + let wt_path = paths::worktree_path(root_path, &wt_id); + let branch = format!("pu/{worktree_name}"); + let rollback_branch = branch.clone(); + + if let Err(e) = git::create_worktree(root_path, &wt_path, &branch, &base_branch).await { + return Response::Error { + code: "CREATE_WORKTREE_FAILED".into(), + message: format!("failed to create worktree: {e}"), + }; + } + + // Install git hooks for trigger gate enforcement + if let Err(e) = git::install_hooks(&wt_path, root_path).await { + tracing::warn!("failed to install git hooks in worktree: {e}"); + } + + // Copy env files into new worktree + for env_file in &cfg.env_files { + let src = root_path.join(env_file); + let dst = wt_path.join(env_file); + match tokio::fs::copy(&src, &dst).await { + Ok(_) => {} + Err(e) if e.kind() == std::io::ErrorKind::NotFound && !src.exists() => {} + Err(e) => tracing::warn!("failed to copy {env_file} to worktree: {e}"), + } + } + + // Write manifest entry (worktree only, no agents) + let cwd = wt_path.to_string_lossy().to_string(); + let wt_id_clone = wt_id.clone(); + let manifest_result = manifest::update_manifest(root_path, move |mut m| { + m.worktrees + .entry(wt_id_clone.clone()) + .or_insert_with(|| WorktreeEntry { + id: wt_id_clone, + name: worktree_name.clone(), + path: cwd, + branch, + base_branch: Some(base_branch.clone()), + status: WorktreeStatus::Active, + agents: IndexMap::new(), + created_at: chrono::Utc::now(), + merged_at: None, + }); + m + }); + + if let Err(e) = manifest_result { + // Rollback: remove worktree + branch + self.rollback_worktree(root_path, Some(&wt_id), Some(&rollback_branch)) + .await; + return Response::Error { + code: "CREATE_WORKTREE_FAILED".into(), + message: format!("failed to update manifest: {e}"), + }; + } + + self.notify_status_change(project_root).await; + + Response::CreateWorktreeResult { worktree_id: wt_id } + } + + pub(super) async fn handle_delete_worktree( + &self, + project_root: &str, + worktree_id: &str, + ) -> Response { + let m = match self.read_manifest_async(project_root).await { + Ok(m) => m, + Err(e) => return Self::error_response(&e), + }; + + let wt = match m.worktrees.get(worktree_id) { + Some(wt) => wt.clone(), + None => { + return Response::Error { + code: "WORKTREE_NOT_FOUND".into(), + message: format!("worktree {worktree_id} not found"), + }; + } + }; + + // 1. Kill all agents in the worktree + let agent_ids: Vec = wt.agents.keys().cloned().collect(); + self.kill_agents(&agent_ids).await; + + // 2. Remove git worktree directory + let root_path = Path::new(project_root); + let wt_path = paths::worktree_path(root_path, worktree_id); + git::remove_worktree(root_path, &wt_path).await.ok(); + + // 3. Delete local branch (soft-fail) + let branch = wt.branch.clone(); + let branch_deleted = git::delete_local_branch(root_path, &branch).await.is_ok(); + + // 4. Delete remote branch (soft-fail) + let remote_deleted = git::delete_remote_branch(root_path, &branch).await.is_ok(); + + // 5. Remove worktree from manifest + let wt_id = worktree_id.to_string(); + let killed_agents = agent_ids.clone(); + let pr = project_root.to_string(); + tokio::task::spawn_blocking(move || { + manifest::update_manifest(Path::new(&pr), move |mut m| { + m.worktrees.shift_remove(&wt_id); + m + }) + .ok(); + }) + .await + .ok(); + + self.notify_status_change(project_root).await; + + Response::DeleteWorktreeResult { + worktree_id: worktree_id.to_string(), + killed_agents, + branch_deleted, + remote_deleted, + } + } + + pub(super) async fn rollback_worktree( + &self, + root_path: &Path, + worktree_id: Option<&str>, + branch: Option<&str>, + ) { + if let Some(wt_id) = worktree_id { + let wt_path = paths::worktree_path(root_path, wt_id); + git::remove_worktree(root_path, &wt_path).await.ok(); + } + if let Some(b) = branch { + git::delete_local_branch(root_path, b).await.ok(); + } + } +}