diff --git a/Cargo.lock b/Cargo.lock index 361f774e..947f7f01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3718,6 +3718,12 @@ dependencies = [ "tempfile", ] +[[package]] +name = "ndk-context" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b" + [[package]] name = "nibble_vec" version = "0.1.0" @@ -3789,6 +3795,31 @@ dependencies = [ "libc", ] +[[package]] +name = "objc2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c2599ce0ec54857b29ce62166b0ed9b4f6f1a70ccc9a71165b6154caca8c05" +dependencies = [ + "objc2-encode", +] + +[[package]] +name = "objc2-encode" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" + +[[package]] +name = "objc2-foundation" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3e0adef53c21f888deb4fa59fc59f7eb17404926ee8a6f59f5df0fd7f9f3272" +dependencies = [ + "bitflags", + "objc2", +] + [[package]] name = "once_cell" version = "1.21.3" @@ -5390,6 +5421,7 @@ dependencies = [ "urlencoding", "uuid", "walkdir", + "webbrowser", "yaml-rust2", ] @@ -6292,6 +6324,22 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webbrowser" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00f1243ef785213e3a32fa0396093424a3a6ea566f9948497e5a2309261a4c97" +dependencies = [ + "core-foundation 0.10.1", + "jni", + "log", + "ndk-context", + "objc2", + "objc2-foundation", + "url", + "web-sys", +] + [[package]] name = "webpki-root-certs" version = "1.0.5" diff --git a/Cargo.toml b/Cargo.toml index 604e3c80..e496ebbf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,7 @@ colored = "3" crossterm = "0.29" # Terminal raw mode for interactive input inquire = "0.9" # Interactive terminal prompts with autocomplete rustyline = "17" # Readline-style input with completions +webbrowser = "1" # Open URLs in default browser prettytable = "0.10" term_size = "0.3" diff --git a/src/agent/mod.rs b/src/agent/mod.rs index e714947d..1404ec69 100644 --- a/src/agent/mod.rs +++ b/src/agent/mod.rs @@ -184,6 +184,14 @@ pub async fn run_interactive( session.print_banner(); + // Display platform context if a project is selected + if session.platform_session.is_project_selected() { + println!( + "{}", + format!("Platform context: {}", session.platform_session.display_context()).dimmed() + ); + } + // NOTE: Terminal layout with ANSI scroll regions is disabled for now. // The scroll region approach conflicts with the existing input/output flow. // TODO: Implement proper scroll region support that integrates with the input handler. @@ -577,7 +585,23 @@ pub async fn run_interactive( .tool(PrometheusConnectTool::new(bg_manager.clone())) // RAG retrieval tools for compressed tool outputs .tool(RetrieveOutputTool::new()) - .tool(ListOutputsTool::new()); + .tool(ListOutputsTool::new()) + // Platform tools for project management + .tool(ListOrganizationsTool::new()) + .tool(ListProjectsTool::new()) + .tool(SelectProjectTool::new()) + .tool(CurrentContextTool::new()) + .tool(OpenProviderSettingsTool::new()) + .tool(CheckProviderConnectionTool::new()) + .tool(ListDeploymentCapabilitiesTool::new()) + // Deployment tools for service management + .tool(CreateDeploymentConfigTool::new()) + .tool(DeployServiceTool::new(project_path_buf.clone())) + .tool(ListDeploymentConfigsTool::new()) + .tool(TriggerDeploymentTool::new()) + .tool(GetDeploymentStatusTool::new()) + .tool(ListDeploymentsTool::new()) + .tool(GetServiceLogsTool::new()); // Add tools based on mode if is_planning { @@ -678,7 +702,23 @@ pub async fn run_interactive( .tool(PrometheusConnectTool::new(bg_manager.clone())) // RAG retrieval tools for compressed tool outputs .tool(RetrieveOutputTool::new()) - .tool(ListOutputsTool::new()); + .tool(ListOutputsTool::new()) + // Platform tools for project management + .tool(ListOrganizationsTool::new()) + .tool(ListProjectsTool::new()) + .tool(SelectProjectTool::new()) + .tool(CurrentContextTool::new()) + .tool(OpenProviderSettingsTool::new()) + .tool(CheckProviderConnectionTool::new()) + .tool(ListDeploymentCapabilitiesTool::new()) + // Deployment tools for service management + .tool(CreateDeploymentConfigTool::new()) + .tool(DeployServiceTool::new(project_path_buf.clone())) + .tool(ListDeploymentConfigsTool::new()) + .tool(TriggerDeploymentTool::new()) + .tool(GetDeploymentStatusTool::new()) + .tool(ListDeploymentsTool::new()) + .tool(GetServiceLogsTool::new()); // Add tools based on mode if is_planning { @@ -770,7 +810,23 @@ pub async fn run_interactive( .tool(PrometheusConnectTool::new(bg_manager.clone())) // RAG retrieval tools for compressed tool outputs .tool(RetrieveOutputTool::new()) - .tool(ListOutputsTool::new()); + .tool(ListOutputsTool::new()) + // Platform tools for project management + .tool(ListOrganizationsTool::new()) + .tool(ListProjectsTool::new()) + .tool(SelectProjectTool::new()) + .tool(CurrentContextTool::new()) + .tool(OpenProviderSettingsTool::new()) + .tool(CheckProviderConnectionTool::new()) + .tool(ListDeploymentCapabilitiesTool::new()) + // Deployment tools for service management + .tool(CreateDeploymentConfigTool::new()) + .tool(DeployServiceTool::new(project_path_buf.clone())) + .tool(ListDeploymentConfigsTool::new()) + .tool(TriggerDeploymentTool::new()) + .tool(GetDeploymentStatusTool::new()) + .tool(ListDeploymentsTool::new()) + .tool(GetServiceLogsTool::new()); // Add tools based on mode if is_planning { @@ -2210,7 +2266,23 @@ pub async fn run_query( .tool(PrometheusConnectTool::new(bg_manager.clone())) // RAG retrieval tools for compressed tool outputs .tool(RetrieveOutputTool::new()) - .tool(ListOutputsTool::new()); + .tool(ListOutputsTool::new()) + // Platform tools for project management + .tool(ListOrganizationsTool::new()) + .tool(ListProjectsTool::new()) + .tool(SelectProjectTool::new()) + .tool(CurrentContextTool::new()) + .tool(OpenProviderSettingsTool::new()) + .tool(CheckProviderConnectionTool::new()) + .tool(ListDeploymentCapabilitiesTool::new()) + // Deployment tools for service management + .tool(CreateDeploymentConfigTool::new()) + .tool(DeployServiceTool::new(project_path_buf.clone())) + .tool(ListDeploymentConfigsTool::new()) + .tool(TriggerDeploymentTool::new()) + .tool(GetDeploymentStatusTool::new()) + .tool(ListDeploymentsTool::new()) + .tool(GetServiceLogsTool::new()); // Add generation tools if this is a generation query if is_generation { @@ -2279,7 +2351,23 @@ pub async fn run_query( .tool(PrometheusConnectTool::new(bg_manager.clone())) // RAG retrieval tools for compressed tool outputs .tool(RetrieveOutputTool::new()) - .tool(ListOutputsTool::new()); + .tool(ListOutputsTool::new()) + // Platform tools for project management + .tool(ListOrganizationsTool::new()) + .tool(ListProjectsTool::new()) + .tool(SelectProjectTool::new()) + .tool(CurrentContextTool::new()) + .tool(OpenProviderSettingsTool::new()) + .tool(CheckProviderConnectionTool::new()) + .tool(ListDeploymentCapabilitiesTool::new()) + // Deployment tools for service management + .tool(CreateDeploymentConfigTool::new()) + .tool(DeployServiceTool::new(project_path_buf.clone())) + .tool(ListDeploymentConfigsTool::new()) + .tool(TriggerDeploymentTool::new()) + .tool(GetDeploymentStatusTool::new()) + .tool(ListDeploymentsTool::new()) + .tool(GetServiceLogsTool::new()); // Add generation tools if this is a generation query if is_generation { @@ -2337,7 +2425,23 @@ pub async fn run_query( .tool(PrometheusConnectTool::new(bg_manager.clone())) // RAG retrieval tools for compressed tool outputs .tool(RetrieveOutputTool::new()) - .tool(ListOutputsTool::new()); + .tool(ListOutputsTool::new()) + // Platform tools for project management + .tool(ListOrganizationsTool::new()) + .tool(ListProjectsTool::new()) + .tool(SelectProjectTool::new()) + .tool(CurrentContextTool::new()) + .tool(OpenProviderSettingsTool::new()) + .tool(CheckProviderConnectionTool::new()) + .tool(ListDeploymentCapabilitiesTool::new()) + // Deployment tools for service management + .tool(CreateDeploymentConfigTool::new()) + .tool(DeployServiceTool::new(project_path_buf.clone())) + .tool(ListDeploymentConfigsTool::new()) + .tool(TriggerDeploymentTool::new()) + .tool(GetDeploymentStatusTool::new()) + .tool(ListDeploymentsTool::new()) + .tool(GetServiceLogsTool::new()); // Add generation tools if this is a generation query if is_generation { diff --git a/src/agent/prompts/mod.rs b/src/agent/prompts/mod.rs index f6143394..e25e72e6 100644 --- a/src/agent/prompts/mod.rs +++ b/src/agent/prompts/mod.rs @@ -169,7 +169,9 @@ Just identify โ†’ explain โ†’ fix โ†’ proceed. /// Thinking guidelines - prevent "oops" and self-doubt patterns const THINKING_GUIDELINES: &str = r#" -- Plan briefly (2-3 sentences), then execute +- Do NOT narrate what you're about to do (e.g., "I'll call X tool" or "The user wants Y so I'll Z") +- Just take action directly without announcing it +- Plan internally, execute externally - users see results, not reasoning - Do NOT second-guess yourself with phrases like "oops", "I should have", or "I made a mistake" - If you made an error, fix it without self-deprecation - just fix it - Show confidence in your actions diff --git a/src/agent/session/mod.rs b/src/agent/session/mod.rs index e2ab4efe..d8e8c7db 100644 --- a/src/agent/session/mod.rs +++ b/src/agent/session/mod.rs @@ -20,6 +20,7 @@ pub use providers::{get_available_models, get_configured_providers, prompt_api_k use crate::agent::commands::TokenUsage; use crate::agent::{AgentResult, ProviderType}; +use crate::platform::PlatformSession; use colored::Colorize; use std::io; use std::path::Path; @@ -35,6 +36,8 @@ pub struct ChatSession { pub plan_mode: PlanMode, /// Session loaded via /resume command, to be processed by main loop pub pending_resume: Option, + /// Platform session state (selected project/org context) + pub platform_session: PlatformSession, } impl ChatSession { @@ -45,6 +48,9 @@ impl ChatSession { ProviderType::Bedrock => "global.anthropic.claude-sonnet-4-20250514-v1:0".to_string(), }; + // Load platform session from disk (returns default if not exists) + let platform_session = PlatformSession::load().unwrap_or_default(); + Self { provider, model: model.unwrap_or(default_model), @@ -53,6 +59,18 @@ impl ChatSession { token_usage: TokenUsage::new(), plan_mode: PlanMode::default(), pending_resume: None, + platform_session, + } + } + + /// Update the platform session and save to disk + pub fn update_platform_session(&mut self, session: PlatformSession) { + self.platform_session = session; + if let Err(e) = self.platform_session.save() { + eprintln!( + "{}", + format!("Warning: Failed to save platform session: {}", e).yellow() + ); } } @@ -242,8 +260,18 @@ impl ChatSession { pub fn read_input(&self) -> io::Result { use crate::agent::ui::input::read_input_with_file_picker; + // Build prompt with platform context if project is selected + let prompt = if self.platform_session.is_project_selected() { + format!( + "{} >", + self.platform_session.display_context() + ) + } else { + ">".to_string() + }; + Ok(read_input_with_file_picker( - ">", + &prompt, &self.project_path, self.plan_mode.is_planning(), )) diff --git a/src/agent/session/ui.rs b/src/agent/session/ui.rs index 08380107..0aa852b7 100644 --- a/src/agent/session/ui.rs +++ b/src/agent/session/ui.rs @@ -203,6 +203,39 @@ pub fn print_banner(session: &ChatSession) { ); println!(" {}", "Your AI-powered code analysis assistant".dimmed()); + // Show platform context (selected project/organization) + if session.platform_session.is_project_selected() { + println!( + " {} {}: {}/{}", + "๐Ÿ“ฆ", + "Project".white(), + session + .platform_session + .org_name + .as_deref() + .unwrap_or("?") + .cyan(), + session + .platform_session + .project_name + .as_deref() + .unwrap_or("?") + .cyan() + ); + } else { + println!( + " {} {} {}", + "๐Ÿ“ฆ", + "Project:".white(), + "(none selected)".dimmed() + ); + println!( + " {} {}", + "โ†’".cyan(), + "sync-ctl org list".dimmed() + ); + } + // Check for incomplete plans and show a hint let incomplete_plans = find_incomplete_plans(&session.project_path); if !incomplete_plans.is_empty() { diff --git a/src/agent/tools/mod.rs b/src/agent/tools/mod.rs index 1c1ea344..4ff99f9a 100644 --- a/src/agent/tools/mod.rs +++ b/src/agent/tools/mod.rs @@ -60,6 +60,19 @@ //! ### Web //! - `WebFetchTool` - Fetch content from URLs (converts HTML to markdown) //! +//! ### Platform (Syncable Platform API) +//! - `ListOrganizationsTool` - List organizations the user belongs to +//! - `ListProjectsTool` - List projects within an organization +//! - `SelectProjectTool` - Select a project as current context +//! - `CurrentContextTool` - Get the currently selected project context +//! - `OpenProviderSettingsTool` - Open cloud provider settings in browser +//! - `CheckProviderConnectionTool` - Check if a cloud provider is connected +//! - `ListDeploymentConfigsTool` - List deployment configurations for a project +//! - `TriggerDeploymentTool` - Trigger a deployment using a config +//! - `GetDeploymentStatusTool` - Get deployment task status and progress +//! - `ListDeploymentsTool` - List recent deployments with URLs +//! - `GetServiceLogsTool` - Get container logs for a deployed service +//! //! ## Error Handling Pattern //! //! Tools use the shared error utilities in `error.rs`: @@ -115,6 +128,7 @@ mod k8s_optimize; mod kubelint; pub mod output_store; mod plan; +pub mod platform; mod prometheus_connect; mod prometheus_discover; pub mod response; @@ -156,6 +170,13 @@ pub use k8s_drift::K8sDriftTool; pub use k8s_optimize::K8sOptimizeTool; pub use kubelint::KubelintTool; pub use plan::{PlanCreateTool, PlanListTool, PlanNextTool, PlanUpdateTool}; +pub use platform::{ + CheckProviderConnectionTool, CreateDeploymentConfigTool, CurrentContextTool, + DeployServiceTool, GetDeploymentStatusTool, GetServiceLogsTool, + ListDeploymentCapabilitiesTool, ListDeploymentConfigsTool, ListDeploymentsTool, + ListOrganizationsTool, ListProjectsTool, OpenProviderSettingsTool, SelectProjectTool, + TriggerDeploymentTool, +}; pub use prometheus_connect::PrometheusConnectTool; pub use prometheus_discover::PrometheusDiscoverTool; pub use security::{SecurityScanTool, VulnerabilitiesTool}; diff --git a/src/agent/tools/platform/analyze_codebase.rs b/src/agent/tools/platform/analyze_codebase.rs new file mode 100644 index 00000000..4cdbdc06 --- /dev/null +++ b/src/agent/tools/platform/analyze_codebase.rs @@ -0,0 +1,496 @@ +//! Analyze codebase tool for the agent +//! +//! Wraps the full `analyze_project()` analyzer function to provide comprehensive +//! project analysis including languages, frameworks, entry points, ports, +//! environment variables, and build scripts. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::path::Path; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::analyzer::{ + AnalysisConfig, ProjectAnalysis, ProjectType, TechnologyCategory, + analyze_project_with_config, +}; + +/// Arguments for the analyze codebase tool +#[derive(Debug, Deserialize)] +pub struct AnalyzeCodebaseArgs { + /// Path to the project directory to analyze (defaults to current directory) + #[serde(default = "default_project_path")] + pub project_path: String, + /// Whether to include dev dependencies in analysis (defaults to false) + #[serde(default)] + pub include_dev_dependencies: bool, +} + +fn default_project_path() -> String { + ".".to_string() +} + +/// Error type for analyze codebase operations +#[derive(Debug, thiserror::Error)] +#[error("Analyze codebase error: {0}")] +pub struct AnalyzeCodebaseError(String); + +/// Tool to perform comprehensive codebase analysis +/// +/// Provides detailed information about a project's technology stack, +/// build requirements, and deployment configuration recommendations. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct AnalyzeCodebaseTool; + +impl AnalyzeCodebaseTool { + /// Create a new AnalyzeCodebaseTool + pub fn new() -> Self { + Self + } +} + +impl Tool for AnalyzeCodebaseTool { + const NAME: &'static str = "analyze_codebase"; + + type Error = AnalyzeCodebaseError; + type Args = AnalyzeCodebaseArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Perform comprehensive analysis of a codebase to understand its technology stack and deployment requirements. + +**Use this tool to understand HOW to configure a deployment.** For quick Dockerfile discovery, use `analyze_project` instead. + +**What it detects:** +- Programming languages with versions and confidence scores +- Frameworks and libraries (React, Next.js, Express, Django, etc.) +- Entry points and exposed ports +- Environment variables the application needs +- Build scripts (npm run build, etc.) +- Docker configuration if present + +**Parameters:** +- project_path: Path to the project directory (defaults to ".") +- include_dev_dependencies: Include dev dependencies in analysis (default: false) + +**Use Cases:** +- Understanding a project's technology stack before configuring deployment +- Discovering required environment variables for secrets setup +- Finding available build scripts for CI/CD configuration +- Recommending appropriate Dockerfile base images + +**Returns:** +- languages: Detected languages with versions +- technologies: Frameworks, libraries, and tools +- ports: Exposed ports from various sources +- environment_variables: Environment variables the app needs +- build_scripts: Available build commands +- deployment_hints: Derived recommendations for deployment +- next_steps: Guidance on what to do next + +**Comparison with analyze_project:** +- `analyze_project`: Fast, focused on Dockerfiles only - "what can I deploy?" +- `analyze_codebase`: Comprehensive analysis - "how should I configure deployment?""# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "project_path": { + "type": "string", + "description": "Path to the project directory to analyze (defaults to current directory)", + "default": "." + }, + "include_dev_dependencies": { + "type": "boolean", + "description": "Include dev dependencies in analysis (default: false)", + "default": false + } + }, + "required": [] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + let project_path = Path::new(&args.project_path); + + // Validate path exists + if !project_path.exists() { + return Ok(format_error_for_llm( + "analyze_codebase", + ErrorCategory::FileNotFound, + &format!("Project path does not exist: {}", args.project_path), + Some(vec![ + "Check that the path is correct", + "Use an absolute path or path relative to current directory", + ]), + )); + } + + if !project_path.is_dir() { + return Ok(format_error_for_llm( + "analyze_codebase", + ErrorCategory::ValidationFailed, + &format!("Path is not a directory: {}", args.project_path), + Some(vec!["Provide a directory path, not a file path"]), + )); + } + + // Configure analysis + let config = AnalysisConfig { + include_dev_dependencies: args.include_dev_dependencies, + deep_analysis: true, + ..Default::default() + }; + + // Perform analysis + match analyze_project_with_config(project_path, &config) { + Ok(analysis) => { + let result = format_analysis_for_llm(&args.project_path, &analysis); + serde_json::to_string_pretty(&result) + .map_err(|e| AnalyzeCodebaseError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_error_for_llm( + "analyze_codebase", + ErrorCategory::InternalError, + &format!("Failed to analyze codebase: {}", e), + Some(vec![ + "Check that you have read permissions for the project directory", + "Ensure the path is accessible", + "Try running from the project root directory", + ]), + )), + } + } +} + +/// Format ProjectAnalysis into LLM-friendly JSON +fn format_analysis_for_llm(project_path: &str, analysis: &ProjectAnalysis) -> serde_json::Value { + // Format languages + let languages: Vec = analysis + .languages + .iter() + .map(|lang| { + json!({ + "name": lang.name, + "version": lang.version, + "confidence": lang.confidence, + "package_manager": lang.package_manager, + }) + }) + .collect(); + + // Format technologies (frameworks, libraries) + let technologies: Vec = analysis + .technologies + .iter() + .map(|tech| { + json!({ + "name": tech.name, + "version": tech.version, + "category": format_category(&tech.category), + "is_primary": tech.is_primary, + "confidence": tech.confidence, + }) + }) + .collect(); + + // Format ports + let ports: Vec = analysis + .ports + .iter() + .map(|port| { + json!({ + "number": port.number, + "protocol": format!("{:?}", port.protocol), + "description": port.description, + }) + }) + .collect(); + + // Format environment variables + let env_vars: Vec = analysis + .environment_variables + .iter() + .map(|env| { + json!({ + "name": env.name, + "required": env.required, + "default_value": env.default_value, + "description": env.description, + }) + }) + .collect(); + + // Format build scripts + let build_scripts: Vec = analysis + .build_scripts + .iter() + .map(|script| { + json!({ + "name": script.name, + "command": script.command, + "description": script.description, + "is_default": script.is_default, + }) + }) + .collect(); + + // Derive deployment hints + let deployment_hints = derive_deployment_hints(analysis); + + // Determine next steps + let next_steps = determine_next_steps(analysis); + + json!({ + "success": true, + "project_path": project_path, + "languages": languages, + "technologies": technologies, + "ports": ports, + "environment_variables": env_vars, + "build_scripts": build_scripts, + "project_type": format!("{:?}", analysis.project_type), + "architecture_type": format!("{:?}", analysis.architecture_type), + "analysis_metadata": { + "confidence_score": analysis.analysis_metadata.confidence_score, + "files_analyzed": analysis.analysis_metadata.files_analyzed, + "duration_ms": analysis.analysis_metadata.analysis_duration_ms, + }, + "deployment_hints": deployment_hints, + "summary": format_summary(analysis), + "next_steps": next_steps, + }) +} + +/// Format technology category for output +fn format_category(category: &TechnologyCategory) -> String { + match category { + TechnologyCategory::MetaFramework => "MetaFramework".to_string(), + TechnologyCategory::FrontendFramework => "FrontendFramework".to_string(), + TechnologyCategory::BackendFramework => "BackendFramework".to_string(), + TechnologyCategory::Library(lib_type) => format!("Library:{:?}", lib_type), + TechnologyCategory::BuildTool => "BuildTool".to_string(), + TechnologyCategory::Database => "Database".to_string(), + TechnologyCategory::Testing => "Testing".to_string(), + TechnologyCategory::Runtime => "Runtime".to_string(), + TechnologyCategory::PackageManager => "PackageManager".to_string(), + } +} + +/// Derive deployment hints from analysis +fn derive_deployment_hints(analysis: &ProjectAnalysis) -> serde_json::Value { + // Suggested port: first detected port or framework default + let suggested_port = analysis + .ports + .first() + .map(|p| p.number) + .or_else(|| infer_default_port(analysis)); + + // Check if build step is needed + let needs_build_step = !analysis.build_scripts.is_empty() + || analysis.technologies.iter().any(|t| { + matches!( + t.category, + TechnologyCategory::MetaFramework | TechnologyCategory::FrontendFramework + ) + }); + + // Recommend Dockerfile base image + let recommended_dockerfile_base = infer_dockerfile_base(analysis); + + // Check for Docker presence + let has_dockerfile = analysis + .docker_analysis + .as_ref() + .map(|d| !d.dockerfiles.is_empty()) + .unwrap_or(false); + + json!({ + "suggested_port": suggested_port, + "needs_build_step": needs_build_step, + "recommended_dockerfile_base": recommended_dockerfile_base, + "has_existing_dockerfile": has_dockerfile, + "required_env_vars": analysis.environment_variables.iter() + .filter(|e| e.required) + .map(|e| e.name.clone()) + .collect::>(), + }) +} + +/// Infer default port based on detected frameworks +fn infer_default_port(analysis: &ProjectAnalysis) -> Option { + for tech in &analysis.technologies { + let name_lower = tech.name.to_lowercase(); + if name_lower.contains("next") || name_lower.contains("nuxt") { + return Some(3000); + } + if name_lower.contains("vite") || name_lower.contains("vue") { + return Some(5173); + } + if name_lower.contains("angular") { + return Some(4200); + } + if name_lower.contains("django") { + return Some(8000); + } + if name_lower.contains("flask") { + return Some(5000); + } + if name_lower.contains("express") || name_lower.contains("fastify") { + return Some(3000); + } + if name_lower.contains("spring") { + return Some(8080); + } + if name_lower.contains("actix") || name_lower.contains("axum") { + return Some(8080); + } + } + + // Default based on language + for lang in &analysis.languages { + match lang.name.to_lowercase().as_str() { + "python" => return Some(8000), + "go" => return Some(8080), + "rust" => return Some(8080), + "java" | "kotlin" => return Some(8080), + "javascript" | "typescript" => return Some(3000), + _ => {} + } + } + + None +} + +/// Infer recommended Dockerfile base image +fn infer_dockerfile_base(analysis: &ProjectAnalysis) -> Option { + // Check primary language + for lang in &analysis.languages { + match lang.name.to_lowercase().as_str() { + "javascript" | "typescript" => { + // Check for Bun + if analysis.technologies.iter().any(|t| t.name.to_lowercase() == "bun") { + return Some("oven/bun:1-alpine".to_string()); + } + return Some("node:20-alpine".to_string()); + } + "python" => return Some("python:3.12-slim".to_string()), + "go" => return Some("golang:1.22-alpine".to_string()), + "rust" => return Some("rust:1.75-alpine".to_string()), + "java" => return Some("eclipse-temurin:21-jre-alpine".to_string()), + "kotlin" => return Some("eclipse-temurin:21-jre-alpine".to_string()), + _ => {} + } + } + + None +} + +/// Determine next steps based on analysis +fn determine_next_steps(analysis: &ProjectAnalysis) -> Vec { + let mut steps = Vec::new(); + + let has_dockerfile = analysis + .docker_analysis + .as_ref() + .map(|d| !d.dockerfiles.is_empty()) + .unwrap_or(false); + + if has_dockerfile { + steps.push("Use analyze_project to get specific Dockerfile details".to_string()); + steps.push("Use list_deployment_capabilities to see available deployment targets".to_string()); + steps.push("Use create_deployment_config to create a deployment configuration".to_string()); + } else { + steps.push("Create a Dockerfile for your application (recommended base image in deployment_hints)".to_string()); + steps.push("After creating Dockerfile, use analyze_project to verify it's detected".to_string()); + } + + if !analysis.environment_variables.is_empty() { + let required_count = analysis.environment_variables.iter().filter(|e| e.required).count(); + if required_count > 0 { + steps.push(format!( + "Configure {} required environment variable{} before deployment", + required_count, + if required_count == 1 { "" } else { "s" } + )); + } + } + + steps +} + +/// Format a human-readable summary +fn format_summary(analysis: &ProjectAnalysis) -> String { + let lang_names: Vec<&str> = analysis.languages.iter().map(|l| l.name.as_str()).collect(); + + let primary_tech: Vec<&str> = analysis + .technologies + .iter() + .filter(|t| t.is_primary) + .map(|t| t.name.as_str()) + .collect(); + + let project_type = match analysis.project_type { + ProjectType::WebApplication => "web application", + ProjectType::ApiService => "API service", + ProjectType::CliTool => "CLI tool", + ProjectType::Library => "library", + ProjectType::MobileApp => "mobile app", + ProjectType::DesktopApp => "desktop app", + ProjectType::Microservice => "microservice", + ProjectType::StaticSite => "static site", + ProjectType::Hybrid => "hybrid project", + ProjectType::Unknown => "project", + }; + + let lang_str = if lang_names.is_empty() { + "Unknown language".to_string() + } else { + lang_names.join(", ") + }; + + let tech_str = if primary_tech.is_empty() { + String::new() + } else { + format!(" using {}", primary_tech.join(", ")) + }; + + format!("{} {}{}", lang_str, project_type, tech_str) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(AnalyzeCodebaseTool::NAME, "analyze_codebase"); + } + + #[test] + fn test_tool_creation() { + let tool = AnalyzeCodebaseTool::new(); + assert!(format!("{:?}", tool).contains("AnalyzeCodebaseTool")); + } + + #[test] + fn test_default_project_path() { + assert_eq!(default_project_path(), "."); + } + + #[test] + fn test_format_category() { + assert_eq!( + format_category(&TechnologyCategory::MetaFramework), + "MetaFramework" + ); + assert_eq!( + format_category(&TechnologyCategory::BackendFramework), + "BackendFramework" + ); + } +} diff --git a/src/agent/tools/platform/analyze_project.rs b/src/agent/tools/platform/analyze_project.rs new file mode 100644 index 00000000..71287092 --- /dev/null +++ b/src/agent/tools/platform/analyze_project.rs @@ -0,0 +1,205 @@ +//! Analyze project tool for the agent +//! +//! Wraps the existing `discover_dockerfiles_for_deployment` analyzer function +//! to allow the agent to analyze projects for deployment. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::path::Path; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::analyzer::discover_dockerfiles_for_deployment; + +/// Arguments for the analyze project tool +#[derive(Debug, Deserialize)] +pub struct AnalyzeProjectArgs { + /// Path to the project directory to analyze (defaults to current directory) + #[serde(default = "default_project_path")] + pub project_path: String, +} + +fn default_project_path() -> String { + ".".to_string() +} + +/// Error type for analyze project operations +#[derive(Debug, thiserror::Error)] +#[error("Analyze project error: {0}")] +pub struct AnalyzeProjectError(String); + +/// Tool to analyze a project directory for deployment +/// +/// Discovers Dockerfiles and their build configurations to help +/// prepare for deployment. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct AnalyzeProjectTool; + +impl AnalyzeProjectTool { + /// Create a new AnalyzeProjectTool + pub fn new() -> Self { + Self + } +} + +impl Tool for AnalyzeProjectTool { + const NAME: &'static str = "analyze_project"; + + type Error = AnalyzeProjectError; + type Args = AnalyzeProjectArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Analyze a project directory to discover Dockerfiles and build configurations for deployment. + +Before deploying, use this tool to understand what can be deployed from a project. + +**What it detects:** +- Dockerfiles and their variants (Dockerfile.dev, Dockerfile.prod, etc.) +- Build context paths for each Dockerfile +- Exposed ports from EXPOSE instructions or inferred from base images +- Multi-stage build configurations +- Suggested service names based on directory structure + +**Parameters:** +- project_path: Path to the project directory (defaults to ".") + +**Use Cases:** +- Before creating a deployment config, analyze the project structure +- Understand what services can be deployed from a monorepo +- Find the correct Dockerfile and build context for deployment + +**Returns:** +- dockerfiles: Array of discovered Dockerfiles with deployment metadata +- summary: Human-readable summary of what was found"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "project_path": { + "type": "string", + "description": "Path to the project directory to analyze (defaults to current directory)", + "default": "." + } + }, + "required": [] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + let project_path = Path::new(&args.project_path); + + // Validate path exists + if !project_path.exists() { + return Ok(format_error_for_llm( + "analyze_project", + ErrorCategory::FileNotFound, + &format!("Project path does not exist: {}", args.project_path), + Some(vec![ + "Check that the path is correct", + "Use an absolute path or path relative to current directory", + ]), + )); + } + + if !project_path.is_dir() { + return Ok(format_error_for_llm( + "analyze_project", + ErrorCategory::ValidationFailed, + &format!("Path is not a directory: {}", args.project_path), + Some(vec!["Provide a directory path, not a file path"]), + )); + } + + // Call the existing analyzer function + match discover_dockerfiles_for_deployment(project_path) { + Ok(dockerfiles) => { + let dockerfile_count = dockerfiles.len(); + + // Build response with discovered Dockerfiles + let dockerfile_data: Vec = dockerfiles + .into_iter() + .map(|df| { + json!({ + "path": df.path.display().to_string(), + "build_context": df.build_context, + "suggested_service_name": df.suggested_service_name, + "suggested_port": df.suggested_port, + "base_image": df.base_image, + "is_multistage": df.is_multistage, + "environment": df.environment, + }) + }) + .collect(); + + let summary = if dockerfile_count == 0 { + "No Dockerfiles found in this project. You may need to create a Dockerfile before deploying.".to_string() + } else { + format!( + "Found {} Dockerfile{} suitable for deployment", + dockerfile_count, + if dockerfile_count == 1 { "" } else { "s" } + ) + }; + + let result = json!({ + "success": true, + "project_path": args.project_path, + "dockerfiles": dockerfile_data, + "dockerfile_count": dockerfile_count, + "summary": summary, + "next_steps": if dockerfile_count > 0 { + vec![ + "Use analyze_codebase for deeper analysis of build requirements and environment variables", + "Use list_deployment_capabilities to see available deployment targets", + "Use create_deployment_config to create a deployment configuration" + ] + } else { + vec![ + "Use analyze_codebase to understand the project's technology stack and recommended Dockerfile base image", + "Create a Dockerfile for your application", + "Consider using a multi-stage build for smaller images" + ] + } + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| AnalyzeProjectError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_error_for_llm( + "analyze_project", + ErrorCategory::InternalError, + &format!("Failed to analyze project: {}", e), + Some(vec![ + "Check that you have read permissions for the project directory", + "Ensure the path is accessible", + ]), + )), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(AnalyzeProjectTool::NAME, "analyze_project"); + } + + #[test] + fn test_tool_creation() { + let tool = AnalyzeProjectTool::new(); + assert!(format!("{:?}", tool).contains("AnalyzeProjectTool")); + } + + #[test] + fn test_default_project_path() { + assert_eq!(default_project_path(), "."); + } +} diff --git a/src/agent/tools/platform/check_provider_connection.rs b/src/agent/tools/platform/check_provider_connection.rs new file mode 100644 index 00000000..3f3eee92 --- /dev/null +++ b/src/agent/tools/platform/check_provider_connection.rs @@ -0,0 +1,271 @@ +//! Check provider connection tool for the agent +//! +//! Checks if a cloud provider is connected to a project. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::{CloudProvider, PlatformApiClient, PlatformApiError}; + +/// Arguments for the check provider connection tool +#[derive(Debug, Deserialize)] +pub struct CheckProviderConnectionArgs { + /// The project ID to check + pub project_id: String, + /// The cloud provider to check (gcp, aws, azure, hetzner) + pub provider: String, +} + +/// Error type for check provider connection operations +#[derive(Debug, thiserror::Error)] +#[error("Check provider connection error: {0}")] +pub struct CheckProviderConnectionError(String); + +/// Tool to check if a cloud provider is connected to a project +/// +/// SECURITY NOTE: This tool only returns connection STATUS (connected/not connected). +/// It NEVER returns actual credentials, tokens, or API keys. The agent should never +/// have access to sensitive authentication material. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct CheckProviderConnectionTool; + +impl CheckProviderConnectionTool { + /// Create a new CheckProviderConnectionTool + pub fn new() -> Self { + Self + } +} + +impl Tool for CheckProviderConnectionTool { + const NAME: &'static str = "check_provider_connection"; + + type Error = CheckProviderConnectionError; + type Args = CheckProviderConnectionArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Check if a cloud provider is connected to a project. + +Returns connection status (connected or not connected) for the specified provider. +This tool NEVER returns actual credentials - only connection status. + +**Supported Providers:** +- gcp (Google Cloud Platform) +- aws (Amazon Web Services) +- azure (Microsoft Azure) +- hetzner (Hetzner Cloud) + +**Use Cases:** +- Verify a provider was connected after user completes setup in browser +- Check prerequisites before deployment operations +- Determine which providers are available for a project + +**Prerequisites:** +- User must be authenticated via `sync-ctl auth login` +- A project must be selected (use select_project first)"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "The UUID of the project to check" + }, + "provider": { + "type": "string", + "enum": ["gcp", "aws", "azure", "hetzner"], + "description": "The cloud provider to check: gcp, aws, azure, or hetzner" + } + }, + "required": ["project_id", "provider"] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate project_id + if args.project_id.trim().is_empty() { + return Ok(format_error_for_llm( + "check_provider_connection", + ErrorCategory::ValidationFailed, + "project_id cannot be empty", + Some(vec![ + "Use list_projects to find valid project IDs", + "Use select_project to set the current project context", + ]), + )); + } + + // Parse and validate provider + let provider: CloudProvider = match args.provider.parse() { + Ok(p) => p, + Err(_) => { + return Ok(format_error_for_llm( + "check_provider_connection", + ErrorCategory::ValidationFailed, + &format!("Invalid provider: '{}'. Must be one of: gcp, aws, azure, hetzner", args.provider), + Some(vec![ + "Use 'gcp' for Google Cloud Platform", + "Use 'aws' for Amazon Web Services", + "Use 'azure' for Microsoft Azure", + "Use 'hetzner' for Hetzner Cloud", + ]), + )); + } + }; + + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("check_provider_connection", e)); + } + }; + + // Check the connection status + match client.check_provider_connection(&provider, &args.project_id).await { + Ok(Some(status)) => { + // Provider is connected + let result = json!({ + "connected": true, + "provider": provider.as_str(), + "provider_name": provider.display_name(), + "project_id": args.project_id, + "credential_id": status.id, + "message": format!("{} is connected to this project", provider.display_name()) + // NOTE: We intentionally do NOT include any credential values here + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| CheckProviderConnectionError(format!("Failed to serialize: {}", e))) + } + Ok(None) => { + // Provider is NOT connected + let result = json!({ + "connected": false, + "provider": provider.as_str(), + "provider_name": provider.display_name(), + "project_id": args.project_id, + "message": format!("{} is NOT connected to this project", provider.display_name()), + "next_steps": [ + "Use open_provider_settings to open the settings page", + "Have the user connect their account in the browser", + "Call check_provider_connection again to verify" + ] + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| CheckProviderConnectionError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("check_provider_connection", e)), + } + } +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Resource not found: {}", msg), + Some(vec![ + "The project ID may be incorrect", + "Use list_projects to find valid project IDs", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec![ + "The user does not have access to this project", + "Contact the project admin for access", + ]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec![ + "Check network connectivity", + "The Syncable API may be temporarily unavailable", + ]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + Some(vec!["This may be a temporary API issue"]), + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec![ + "The Syncable API is experiencing issues", + "Try again later", + ]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec![ + "Check your internet connection", + "The Syncable API may be temporarily unavailable", + ]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(CheckProviderConnectionTool::NAME, "check_provider_connection"); + } + + #[test] + fn test_tool_creation() { + let tool = CheckProviderConnectionTool::new(); + assert!(format!("{:?}", tool).contains("CheckProviderConnectionTool")); + } + + #[test] + fn test_provider_parsing() { + assert!("gcp".parse::().is_ok()); + assert!("aws".parse::().is_ok()); + assert!("azure".parse::().is_ok()); + assert!("hetzner".parse::().is_ok()); + assert!("invalid".parse::().is_err()); + } +} diff --git a/src/agent/tools/platform/create_deployment_config.rs b/src/agent/tools/platform/create_deployment_config.rs new file mode 100644 index 00000000..eefcfe68 --- /dev/null +++ b/src/agent/tools/platform/create_deployment_config.rs @@ -0,0 +1,428 @@ +//! Create deployment config tool for the agent +//! +//! Allows the agent to create a new deployment configuration for a service. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::types::CreateDeploymentConfigRequest; +use crate::platform::api::{PlatformApiClient, PlatformApiError}; + +/// Arguments for the create deployment config tool +#[derive(Debug, Deserialize)] +pub struct CreateDeploymentConfigArgs { + /// The project UUID + pub project_id: String, + /// Service name for the deployment + pub service_name: String, + /// Repository ID from GitHub integration + pub repository_id: i64, + /// Full repository name (e.g., "owner/repo") + pub repository_full_name: String, + /// Port the service listens on + pub port: i32, + /// Git branch to deploy from + pub branch: String, + /// Target type: "kubernetes" or "cloud_runner" + pub target_type: String, + /// Cloud provider: "gcp" or "hetzner" + pub provider: String, + /// Environment ID for deployment + pub environment_id: String, + /// Path to Dockerfile relative to repo root + pub dockerfile_path: Option, + /// Build context path relative to repo root + pub build_context: Option, + /// Cluster ID (required for kubernetes target) + pub cluster_id: Option, + /// Registry ID (optional - will provision new if not provided) + pub registry_id: Option, + /// Enable auto-deploy on push (defaults to true) + #[serde(default = "default_auto_deploy")] + pub auto_deploy_enabled: bool, +} + +fn default_auto_deploy() -> bool { + true +} + +/// Error type for create deployment config operations +#[derive(Debug, thiserror::Error)] +#[error("Create deployment config error: {0}")] +pub struct CreateDeploymentConfigError(String); + +/// Tool to create a new deployment configuration +/// +/// Creates a deployment config that defines how to build and deploy a service. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct CreateDeploymentConfigTool; + +impl CreateDeploymentConfigTool { + /// Create a new CreateDeploymentConfigTool + pub fn new() -> Self { + Self + } +} + +impl Tool for CreateDeploymentConfigTool { + const NAME: &'static str = "create_deployment_config"; + + type Error = CreateDeploymentConfigError; + type Args = CreateDeploymentConfigArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Create a new deployment configuration for a service. + +A deployment config defines how to build and deploy a service, including: +- Source repository and branch +- Dockerfile location and build context +- Target (Cloud Runner or Kubernetes) +- Port configuration +- Auto-deploy settings + +**Required Parameters:** +- project_id: The project UUID +- service_name: Name for the service (lowercase, hyphens allowed) +- repository_id: GitHub repository ID (from platform GitHub integration) +- repository_full_name: Full repo name like "owner/repo" +- port: Port the service listens on +- branch: Git branch to deploy from (e.g., "main") +- target_type: "kubernetes" or "cloud_runner" +- provider: "gcp" or "hetzner" +- environment_id: Environment to deploy to + +**Optional Parameters:** +- dockerfile_path: Path to Dockerfile (default: "Dockerfile") +- build_context: Build context path (default: ".") +- cluster_id: Required for kubernetes target +- registry_id: Container registry ID (provisions new if not provided) +- auto_deploy_enabled: Enable auto-deploy on push (default: true) + +**Prerequisites:** +- User must be authenticated +- GitHub repository must be connected to the project +- Provider must be connected (check with check_provider_connection) +- For kubernetes: cluster must exist (check with list_deployment_capabilities) + +**Returns:** +- config_id: The created deployment config ID +- service_name, branch, target_type, provider +- next_steps: How to trigger a deployment"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "The UUID of the project" + }, + "service_name": { + "type": "string", + "description": "Name for the service (lowercase, hyphens allowed)" + }, + "repository_id": { + "type": "integer", + "description": "GitHub repository ID from platform integration" + }, + "repository_full_name": { + "type": "string", + "description": "Full repository name (e.g., 'owner/repo')" + }, + "port": { + "type": "integer", + "description": "Port the service listens on" + }, + "branch": { + "type": "string", + "description": "Git branch to deploy from" + }, + "target_type": { + "type": "string", + "enum": ["kubernetes", "cloud_runner"], + "description": "Deployment target type" + }, + "provider": { + "type": "string", + "enum": ["gcp", "hetzner"], + "description": "Cloud provider" + }, + "environment_id": { + "type": "string", + "description": "Environment ID for deployment" + }, + "dockerfile_path": { + "type": "string", + "description": "Path to Dockerfile relative to repo root" + }, + "build_context": { + "type": "string", + "description": "Build context path relative to repo root" + }, + "cluster_id": { + "type": "string", + "description": "Cluster ID (required for kubernetes target)" + }, + "registry_id": { + "type": "string", + "description": "Registry ID (optional - provisions new if not provided)" + }, + "auto_deploy_enabled": { + "type": "boolean", + "description": "Enable auto-deploy on push (default: true)" + } + }, + "required": [ + "project_id", "service_name", "repository_id", "repository_full_name", + "port", "branch", "target_type", "provider", "environment_id" + ] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate required fields + if args.project_id.trim().is_empty() { + return Ok(format_error_for_llm( + "create_deployment_config", + ErrorCategory::ValidationFailed, + "project_id cannot be empty", + Some(vec![ + "Use list_projects to find valid project IDs", + "Use current_context to get the selected project", + ]), + )); + } + + if args.service_name.trim().is_empty() { + return Ok(format_error_for_llm( + "create_deployment_config", + ErrorCategory::ValidationFailed, + "service_name cannot be empty", + Some(vec![ + "Use analyze_project to discover suggested service names", + "Service name should be lowercase with hyphens", + ]), + )); + } + + // Validate target_type + let valid_targets = ["kubernetes", "cloud_runner"]; + if !valid_targets.contains(&args.target_type.as_str()) { + return Ok(format_error_for_llm( + "create_deployment_config", + ErrorCategory::ValidationFailed, + &format!( + "Invalid target_type '{}'. Must be 'kubernetes' or 'cloud_runner'", + args.target_type + ), + Some(vec![ + "Use 'cloud_runner' for GCP Cloud Run or Hetzner containers", + "Use 'kubernetes' for deploying to a K8s cluster", + ]), + )); + } + + // Validate provider + let valid_providers = ["gcp", "hetzner"]; + if !valid_providers.contains(&args.provider.as_str()) { + return Ok(format_error_for_llm( + "create_deployment_config", + ErrorCategory::ValidationFailed, + &format!( + "Invalid provider '{}'. Must be 'gcp' or 'hetzner'", + args.provider + ), + Some(vec![ + "Use list_deployment_capabilities to see connected providers", + "Connect a provider in platform settings first", + ]), + )); + } + + // Kubernetes target requires cluster_id + if args.target_type == "kubernetes" && args.cluster_id.is_none() { + return Ok(format_error_for_llm( + "create_deployment_config", + ErrorCategory::ValidationFailed, + "cluster_id is required for kubernetes target", + Some(vec![ + "Use list_deployment_capabilities to find available clusters", + "Or use 'cloud_runner' target which doesn't require a cluster", + ]), + )); + } + + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("create_deployment_config", e)); + } + }; + + // Build the request + // Note: Send both field name variants (dockerfile/dockerfilePath, context/buildContext) + // for backend compatibility - different endpoints may expect different field names + let request = CreateDeploymentConfigRequest { + project_id: args.project_id.clone(), + service_name: args.service_name.clone(), + repository_id: args.repository_id, + repository_full_name: args.repository_full_name.clone(), + dockerfile_path: args.dockerfile_path.clone(), + dockerfile: args.dockerfile_path.clone(), // Alias for backend compatibility + build_context: args.build_context.clone(), + context: args.build_context.clone(), // Alias for backend compatibility + port: args.port, + branch: args.branch.clone(), + target_type: args.target_type.clone(), + cloud_provider: args.provider.clone(), + environment_id: args.environment_id.clone(), + cluster_id: args.cluster_id.clone(), + registry_id: args.registry_id.clone(), + auto_deploy_enabled: args.auto_deploy_enabled, + is_public: None, + cloud_runner_config: None, + }; + + // Create the deployment config + match client.create_deployment_config(&request).await { + Ok(config) => { + let result = json!({ + "success": true, + "config_id": config.id, + "service_name": config.service_name, + "branch": config.branch, + "target_type": args.target_type, + "provider": args.provider, + "auto_deploy_enabled": args.auto_deploy_enabled, + "message": format!( + "Deployment config created for service '{}' on {} ({})", + config.service_name, args.target_type, args.provider + ), + "next_steps": [ + format!("Use trigger_deployment with config_id '{}' to deploy", config.id), + "Use get_deployment_status to monitor deployment progress", + if args.auto_deploy_enabled { + "Auto-deploy is enabled - pushing to the branch will trigger deployments" + } else { + "Auto-deploy is disabled - deployments must be triggered manually" + } + ] + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| CreateDeploymentConfigError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("create_deployment_config", e)), + } + } +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Resource not found: {}", msg), + Some(vec![ + "The project ID may be incorrect", + "The repository may not be connected to the project", + "Use list_projects to find valid project IDs", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec![ + "The user does not have permission to create deployment configs", + "Contact the project admin for access", + ]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec![ + "Check network connectivity", + "The Syncable API may be temporarily unavailable", + ]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + Some(vec!["This may be a temporary API issue"]), + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec![ + "Check the error message for details", + "The repository may not be properly connected", + ]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec![ + "The Syncable API is experiencing issues", + "Try again later", + ]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec![ + "Check your internet connection", + "The Syncable API may be temporarily unavailable", + ]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(CreateDeploymentConfigTool::NAME, "create_deployment_config"); + } + + #[test] + fn test_tool_creation() { + let tool = CreateDeploymentConfigTool::new(); + assert!(format!("{:?}", tool).contains("CreateDeploymentConfigTool")); + } + + #[test] + fn test_default_auto_deploy() { + assert!(default_auto_deploy()); + } +} diff --git a/src/agent/tools/platform/current_context.rs b/src/agent/tools/platform/current_context.rs new file mode 100644 index 00000000..5edae122 --- /dev/null +++ b/src/agent/tools/platform/current_context.rs @@ -0,0 +1,131 @@ +//! Current context tool for the agent +//! +//! Allows the agent to query the currently selected project context. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::PlatformSession; + +/// Arguments for the current context tool (none required) +#[derive(Debug, Deserialize)] +pub struct CurrentContextArgs {} + +/// Error type for current context operations +#[derive(Debug, thiserror::Error)] +#[error("Current context error: {0}")] +pub struct CurrentContextError(String); + +/// Tool to get the currently selected project context +/// +/// This tool reads the platform session from `~/.syncable/platform-session.json` +/// and returns information about the selected project and organization. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct CurrentContextTool; + +impl CurrentContextTool { + /// Create a new CurrentContextTool + pub fn new() -> Self { + Self + } +} + +impl Tool for CurrentContextTool { + const NAME: &'static str = "current_context"; + + type Error = CurrentContextError; + type Args = CurrentContextArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Get the currently selected project context. + +Returns information about the currently selected project and organization, +or indicates if no project is selected. + +**Use Cases:** +- Checking which project is currently active before operations +- Verifying context after selection +- Determining if context setup is needed + +**No Prerequisites:** +- This tool can be called at any time +- Returns helpful message if no project is selected"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": {}, + "required": [] + }), + } + } + + async fn call(&self, _args: Self::Args) -> Result { + // Load the platform session + let session = match PlatformSession::load() { + Ok(s) => s, + Err(e) => { + return Ok(format_error_for_llm( + "current_context", + ErrorCategory::InternalError, + &format!("Failed to load platform session: {}", e), + Some(vec![ + "The session file may be corrupted", + "Try selecting a project with select_project", + ]), + )); + } + }; + + // Check if a project is selected + if !session.is_project_selected() { + let result = json!({ + "success": true, + "has_context": false, + "message": "No project currently selected", + "suggestion": "Use list_organizations and list_projects to find a project, then select_project to set context" + }); + + return serde_json::to_string_pretty(&result) + .map_err(|e| CurrentContextError(format!("Failed to serialize: {}", e))); + } + + // Return the current context + let result = json!({ + "success": true, + "has_context": true, + "context": { + "project_id": session.project_id, + "project_name": session.project_name, + "organization_id": session.org_id, + "organization_name": session.org_name, + "display": session.display_context(), + "last_updated": session.last_updated.map(|dt| dt.to_rfc3339()) + } + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| CurrentContextError(format!("Failed to serialize: {}", e))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(CurrentContextTool::NAME, "current_context"); + } + + #[test] + fn test_tool_creation() { + let tool = CurrentContextTool::new(); + assert!(format!("{:?}", tool).contains("CurrentContextTool")); + } +} diff --git a/src/agent/tools/platform/deploy_service.rs b/src/agent/tools/platform/deploy_service.rs new file mode 100644 index 00000000..9ee0b192 --- /dev/null +++ b/src/agent/tools/platform/deploy_service.rs @@ -0,0 +1,959 @@ +//! Deploy service tool for the agent +//! +//! A compound tool that enables conversational deployment with intelligent recommendations. +//! Analyzes the project, provides recommendations with reasoning, and executes deployment. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::path::PathBuf; +use std::str::FromStr; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::analyzer::{AnalysisConfig, TechnologyCategory, analyze_project_with_config}; +use crate::platform::api::types::{ + CloudProvider, CreateDeploymentConfigRequest, ProjectRepository, build_cloud_runner_config, +}; +use crate::platform::api::{PlatformApiClient, PlatformApiError, TriggerDeploymentRequest}; +use crate::platform::PlatformSession; +use crate::wizard::{ + RecommendationInput, recommend_deployment, get_provider_deployment_statuses, +}; +use std::process::Command; + +/// Arguments for the deploy service tool +#[derive(Debug, Deserialize)] +pub struct DeployServiceArgs { + /// Optional: specific subdirectory/service to deploy (for monorepos) + pub path: Option, + /// Optional: override recommended provider (gcp, hetzner) + pub provider: Option, + /// Optional: override machine type selection + pub machine_type: Option, + /// Optional: override region selection + pub region: Option, + /// Optional: override detected port + pub port: Option, + /// Whether to make the service publicly accessible (default: false for safety) + /// Internal services can only be accessed within the cluster/network + #[serde(default)] + pub is_public: bool, + /// If true (default), show recommendation but don't deploy yet + /// If false with settings, deploy immediately + #[serde(default = "default_preview")] + pub preview_only: bool, +} + +fn default_preview() -> bool { + true +} + +/// Error type for deploy service operations +#[derive(Debug, thiserror::Error)] +#[error("Deploy service error: {0}")] +pub struct DeployServiceError(String); + +/// Tool to analyze a project and deploy it with intelligent recommendations +/// +/// Provides an end-to-end deployment experience: +/// 1. Analyzes the project (language, framework, ports, health endpoints) +/// 2. Checks available deployment capabilities +/// 3. Generates smart recommendations with reasoning +/// 4. Shows a preview for user confirmation +/// 5. Creates deployment config and triggers deployment +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DeployServiceTool { + project_path: PathBuf, +} + +impl DeployServiceTool { + /// Create a new DeployServiceTool + pub fn new(project_path: PathBuf) -> Self { + Self { project_path } + } +} + +impl Tool for DeployServiceTool { + const NAME: &'static str = "deploy_service"; + + type Error = DeployServiceError; + type Args = DeployServiceArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Analyze a project and deploy it with intelligent recommendations. + +This tool provides an end-to-end deployment experience: +1. Analyzes the project to detect language, framework, ports, and health endpoints +2. Checks available deployment capabilities (providers, clusters, registries) +3. Generates smart recommendations with reasoning +4. Shows a preview for user confirmation +5. Creates deployment config and triggers deployment + +**Default behavior (preview_only=true):** +Returns analysis and recommendations. User should confirm before actual deployment. + +**Direct deployment (preview_only=false):** +Uses provided overrides or recommendation defaults to deploy immediately. + +**Parameters:** +- path: Optional subdirectory for monorepo services +- provider: Override recommendation (gcp, hetzner) +- machine_type: Override machine selection (e.g., cx22, e2-small) +- region: Override region selection (e.g., nbg1, us-central1) +- port: Override detected port +- is_public: Whether service should be publicly accessible (default: false) +- preview_only: If true (default), show recommendation only + +**IMPORTANT - Public vs Internal:** +- is_public=false (default): Service is internal-only, not accessible from internet +- is_public=true: Service gets a public URL, accessible from anywhere +- ALWAYS show this in the preview and ask user before deploying public services + +**What it analyzes:** +- Programming language and framework +- Port configuration from source code, package.json, Dockerfiles +- Health check endpoints (/health, /healthz, etc.) +- Existing infrastructure (K8s manifests, Helm charts) + +**Recommendation reasoning includes:** +- Why a specific provider was chosen +- Why a machine type fits the workload (based on memory requirements) +- Where the port was detected from +- Confidence level in the recommendation + +**Example flow:** +User: "deploy this service" +1. Call with preview_only=true โ†’ Shows recommendation +2. User: "yes, deploy it" โ†’ Call with preview_only=false to deploy +3. User: "make it public" โ†’ Call with preview_only=true AND is_public=true to show NEW preview +4. User: "yes" โ†’ NOW call with preview_only=false to deploy + +**CRITICAL - Human in the loop:** +- NEVER deploy (preview_only=false) immediately after user requests a CHANGE +- If user says "make it public", "use GCP", "change region", etc. โ†’ show NEW preview first +- Only deploy after user explicitly confirms the final settings with "yes", "deploy", "confirm" +- A change request is NOT a deployment confirmation + +**Prerequisites:** +- User must be authenticated (sync-ctl auth login) +- A project must be selected (use select_project first) +- Provider must be connected (check with list_deployment_capabilities)"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "Subdirectory to deploy (for monorepos)" + }, + "provider": { + "type": "string", + "enum": ["gcp", "hetzner"], + "description": "Override: cloud provider" + }, + "machine_type": { + "type": "string", + "description": "Override: machine type (e.g., cx22, e2-small)" + }, + "region": { + "type": "string", + "description": "Override: deployment region" + }, + "port": { + "type": "integer", + "description": "Override: port to expose" + }, + "is_public": { + "type": "boolean", + "description": "Whether service should be publicly accessible. Default: false (internal only). Set to true for public URL." + }, + "preview_only": { + "type": "boolean", + "description": "If true (default), show recommendation only. If false, deploy." + } + } + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // 1. Determine analysis path + let analysis_path = if let Some(ref subpath) = args.path { + self.project_path.join(subpath) + } else { + self.project_path.clone() + }; + + // Validate path exists + if !analysis_path.exists() { + return Ok(format_error_for_llm( + "deploy_service", + ErrorCategory::FileNotFound, + &format!("Path not found: {}", analysis_path.display()), + Some(vec!["Check if the path exists", "Use list_directory to explore"]), + )); + } + + // 2. Run project analysis + let config = AnalysisConfig { + deep_analysis: true, + ..Default::default() + }; + + let analysis = match analyze_project_with_config(&analysis_path, &config) { + Ok(a) => a, + Err(e) => { + return Ok(format_error_for_llm( + "deploy_service", + ErrorCategory::InternalError, + &format!("Analysis failed: {}", e), + Some(vec!["Check if the directory contains a valid project"]), + )); + } + }; + + // 3. Get API client and context + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(_) => { + return Ok(format_error_for_llm( + "deploy_service", + ErrorCategory::PermissionDenied, + "Not authenticated", + Some(vec!["Run: sync-ctl auth login"]), + )); + } + }; + + // Load platform session for context + let session = match PlatformSession::load() { + Ok(s) => s, + Err(_) => { + return Ok(format_error_for_llm( + "deploy_service", + ErrorCategory::InternalError, + "Failed to load platform session", + Some(vec!["Try selecting a project with select_project"]), + )); + } + }; + + if !session.is_project_selected() { + return Ok(format_error_for_llm( + "deploy_service", + ErrorCategory::ValidationFailed, + "No project selected", + Some(vec!["Use select_project to choose a project first"]), + )); + } + + let project_id = session.project_id.clone().unwrap_or_default(); + let environment_id = session.environment_id.clone(); + + // 4. Check for existing deployment configs (duplicate detection) + let existing_configs = match client.list_deployment_configs(&project_id).await { + Ok(configs) => configs, + Err(e) => { + // Non-fatal - continue without duplicate detection + tracing::warn!("Failed to fetch existing configs: {}", e); + Vec::new() + } + }; + + // Get service name early to check for duplicates + let service_name = get_service_name(&analysis_path); + + // Find existing config with same service name + let existing_config = existing_configs + .iter() + .find(|c| c.service_name.eq_ignore_ascii_case(&service_name)); + + // 5. Get environment info for display + let environments = match client.list_environments(&project_id).await { + Ok(envs) => envs, + Err(_) => Vec::new(), + }; + + // Resolve environment name for display + let (resolved_env_id, resolved_env_name, is_production) = if let Some(ref env_id) = environment_id { + let env = environments.iter().find(|e| e.id == *env_id); + let name = env.map(|e| e.name.clone()).unwrap_or_else(|| "Unknown".to_string()); + let is_prod = name.to_lowercase().contains("prod"); + (env_id.clone(), name, is_prod) + } else if let Some(existing) = &existing_config { + // Use the environment from existing config + let env = environments.iter().find(|e| e.id == existing.environment_id); + let name = env.map(|e| e.name.clone()).unwrap_or_else(|| "Unknown".to_string()); + let is_prod = name.to_lowercase().contains("prod"); + (existing.environment_id.clone(), name, is_prod) + } else if let Some(first_env) = environments.first() { + let is_prod = first_env.name.to_lowercase().contains("prod"); + (first_env.id.clone(), first_env.name.clone(), is_prod) + } else { + ("".to_string(), "No environment".to_string(), false) + }; + + // 6. Get available providers + let capabilities = match get_provider_deployment_statuses(&client, &project_id).await { + Ok(c) => c, + Err(e) => { + return Ok(format_error_for_llm( + "deploy_service", + ErrorCategory::NetworkError, + &format!("Failed to get deployment capabilities: {}", e), + None, + )); + } + }; + + // Check if any provider is available + let available_providers: Vec = capabilities + .iter() + .filter(|s| s.provider.is_available() && s.is_connected) + .map(|s| s.provider.clone()) + .collect(); + + if available_providers.is_empty() { + return Ok(format_error_for_llm( + "deploy_service", + ErrorCategory::ResourceUnavailable, + "No cloud providers connected", + Some(vec![ + "Connect GCP or Hetzner in platform settings", + "Use open_provider_settings to configure a provider", + ]), + )); + } + + // 5. Check for existing K8s clusters + let has_existing_k8s = capabilities.iter().any(|s| !s.clusters.is_empty()); + + // 6. Generate recommendation + let recommendation_input = RecommendationInput { + analysis: analysis.clone(), + available_providers: available_providers.clone(), + has_existing_k8s, + user_region_hint: args.region.clone(), + }; + + let recommendation = recommend_deployment(recommendation_input); + + // 7. Extract analysis summary + let primary_language = analysis.languages.first() + .map(|l| l.name.clone()) + .unwrap_or_else(|| "Unknown".to_string()); + + let primary_framework = analysis.technologies.iter() + .find(|t| matches!(t.category, TechnologyCategory::BackendFramework | TechnologyCategory::MetaFramework)) + .map(|t| t.name.clone()) + .unwrap_or_else(|| "None detected".to_string()); + + let has_dockerfile = analysis.docker_analysis + .as_ref() + .map(|d| !d.dockerfiles.is_empty()) + .unwrap_or(false); + + let has_k8s = analysis.infrastructure + .as_ref() + .map(|i| i.has_kubernetes) + .unwrap_or(false); + + // 10. If preview_only, return recommendation + if args.preview_only { + // Build the deployment mode info + let (deployment_mode, mode_explanation, next_steps) = if let Some(existing) = &existing_config { + ( + "REDEPLOY", + format!( + "Service '{}' already has a deployment config (ID: {}). Deploying will trigger a REDEPLOY of the existing service.", + existing.service_name, existing.id + ), + vec![ + "To redeploy with current config: call deploy_service with preview_only=false".to_string(), + "This will trigger a new deployment of the existing service".to_string(), + "The existing configuration will be used".to_string(), + ] + ) + } else { + ( + "NEW_DEPLOYMENT", + format!( + "No existing deployment config found for '{}'. This will create a NEW deployment configuration.", + service_name + ), + vec![ + "To deploy with these settings: call deploy_service with preview_only=false".to_string(), + "To customize: specify provider, machine_type, region, or port parameters".to_string(), + "To see more options: check the alternatives section above".to_string(), + ] + ) + }; + + // Production warning + let production_warning = if is_production { + Some("โš ๏ธ WARNING: This will deploy to PRODUCTION environment. Please confirm you intend to deploy to production.") + } else { + None + }; + + let response = json!({ + "status": "recommendation", + "deployment_mode": deployment_mode, + "mode_explanation": mode_explanation, + "environment": { + "id": resolved_env_id, + "name": resolved_env_name, + "is_production": is_production, + }, + "production_warning": production_warning, + "existing_config": existing_config.map(|c| json!({ + "id": c.id, + "service_name": c.service_name, + "environment_id": c.environment_id, + "branch": c.branch, + "port": c.port, + "auto_deploy_enabled": c.auto_deploy_enabled, + "created_at": c.created_at.to_rfc3339(), + })), + "analysis": { + "path": analysis_path.display().to_string(), + "language": primary_language, + "framework": primary_framework, + "detected_port": recommendation.port, + "port_source": recommendation.port_source, + "health_endpoint": recommendation.health_check_path, + "has_dockerfile": has_dockerfile, + "has_kubernetes": has_k8s, + }, + "recommendation": { + "provider": recommendation.provider.as_str(), + "provider_reasoning": recommendation.provider_reasoning, + "target": recommendation.target.as_str(), + "target_reasoning": recommendation.target_reasoning, + "machine_type": recommendation.machine_type, + "machine_reasoning": recommendation.machine_reasoning, + "region": recommendation.region, + "region_reasoning": recommendation.region_reasoning, + "port": recommendation.port, + "health_check_path": recommendation.health_check_path, + "is_public": args.is_public, + "is_public_note": if args.is_public { + "Service will be PUBLICLY accessible from the internet" + } else { + "Service will be INTERNAL only (not accessible from internet)" + }, + "confidence": recommendation.confidence, + }, + "alternatives": { + "providers": recommendation.alternatives.providers.iter().map(|p| json!({ + "provider": p.provider.as_str(), + "available": p.available, + "reason_if_unavailable": p.reason_if_unavailable, + })).collect::>(), + "machine_types": recommendation.alternatives.machine_types.iter().map(|m| json!({ + "machine_type": m.machine_type, + "vcpu": m.vcpu, + "memory_gb": m.memory_gb, + "description": m.description, + })).collect::>(), + "regions": recommendation.alternatives.regions.iter().map(|r| json!({ + "region": r.region, + "display_name": r.display_name, + })).collect::>(), + }, + "service_name": service_name, + "next_steps": next_steps, + "confirmation_prompt": if existing_config.is_some() { + format!( + "REDEPLOY '{}' to {} environment?{}", + service_name, + resolved_env_name, + if is_production { " โš ๏ธ (PRODUCTION)" } else { "" } + ) + } else { + format!( + "Deploy NEW service '{}' to {} ({}) with {} in {} on {} environment?{}", + service_name, + recommendation.provider.display_name(), + recommendation.target.display_name(), + recommendation.machine_type, + recommendation.region, + resolved_env_name, + if is_production { " โš ๏ธ (PRODUCTION)" } else { "" } + ) + }, + }); + + return serde_json::to_string_pretty(&response) + .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e))); + } + + // 11. Execute deployment - EITHER redeploy existing OR create new + + // If existing config found, trigger redeploy instead of creating new config + if let Some(existing) = &existing_config { + let trigger_request = TriggerDeploymentRequest { + project_id: project_id.clone(), + config_id: existing.id.clone(), + commit_sha: None, + }; + + return match client.trigger_deployment(&trigger_request).await { + Ok(response) => { + let result = json!({ + "status": "redeployed", + "deployment_mode": "REDEPLOY", + "config_id": existing.id, + "task_id": response.backstage_task_id, + "service_name": service_name, + "environment": { + "id": resolved_env_id, + "name": resolved_env_name, + "is_production": is_production, + }, + "message": format!( + "Redeploy triggered for existing service '{}' on {} environment. Task ID: {}", + service_name, resolved_env_name, response.backstage_task_id + ), + "next_steps": [ + format!("Monitor progress: use get_deployment_status with task_id '{}'", response.backstage_task_id), + "View logs after deployment: use get_service_logs", + ], + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("deploy_service", e)), + }; + } + + // NEW DEPLOYMENT PATH - no existing config found + let final_provider = args.provider + .as_ref() + .and_then(|p| CloudProvider::from_str(p).ok()) + .unwrap_or(recommendation.provider.clone()); + + let final_machine = args.machine_type + .clone() + .unwrap_or(recommendation.machine_type.clone()); + + let final_region = args.region + .clone() + .unwrap_or(recommendation.region.clone()); + + let final_port = args.port + .unwrap_or(recommendation.port); + + // Get repository info + let repositories = match client.list_project_repositories(&project_id).await { + Ok(repos) => repos, + Err(e) => { + return Ok(format_error_for_llm( + "deploy_service", + ErrorCategory::NetworkError, + &format!("Failed to get repositories: {}", e), + Some(vec!["Ensure a repository is connected to the project"]), + )); + } + }; + + // Smart repository selection: match local git remote or find non-gitops repo + let repo = match find_matching_repository(&repositories.repositories, &self.project_path) { + Some(r) => r, + None => { + return Ok(format_error_for_llm( + "deploy_service", + ErrorCategory::ResourceUnavailable, + "No repository connected to project", + Some(vec![ + "Connect a GitHub repository to the project first", + "Use the platform UI to connect a repository", + ]), + )); + } + }; + + tracing::info!( + "Deploy service: Using repository {} (id: {}), default_branch: {:?}", + repo.repository_full_name, + repo.repository_id, + repo.default_branch + ); + + // Use resolved environment ID from earlier + if resolved_env_id.is_empty() { + return Ok(format_error_for_llm( + "deploy_service", + ErrorCategory::ResourceUnavailable, + "No environment found for project", + Some(vec!["Create an environment in the platform first"]), + )); + } + + // Build deployment config request + // Derive dockerfile path and build context from DockerfileInfo + // + // IMPORTANT: Paths must be relative to the REPO ROOT for Cloud Runner. + // Cloud Runner clones the GitHub repo and builds from there. + // + // Example: User analyzes path="services/contact-intelligence" which has a Dockerfile. + // The GitHub repo structure is: + // repo-root/ + // services/ + // contact-intelligence/ + // Dockerfile + // + // Cloud Runner needs: + // dockerfile: "services/contact-intelligence/Dockerfile" + // context: "services/contact-intelligence" + // + // NOT: + // dockerfile: "Dockerfile", context: "." (would look at repo root) + let (dockerfile_path, build_context) = analysis.docker_analysis + .as_ref() + .and_then(|d| d.dockerfiles.first()) + .map(|df| { + // Get dockerfile filename (e.g., "Dockerfile" or "Dockerfile.prod") + let dockerfile_name = df.path.file_name() + .map(|n| n.to_string_lossy().to_string()) + .unwrap_or_else(|| "Dockerfile".to_string()); + + // Derive dockerfile's directory relative to analysis_path + let analysis_relative_dir = df.path.parent() + .and_then(|p| p.strip_prefix(&analysis_path).ok()) + .map(|p| p.to_string_lossy().to_string()) + .unwrap_or_default(); + + // Build paths relative to REPO ROOT by prepending args.path (the subdirectory) + // This ensures Cloud Runner finds the Dockerfile in the cloned repo + let subpath = args.path.as_deref().unwrap_or(""); + + if subpath.is_empty() { + // Analyzing repo root - use paths as-is + if analysis_relative_dir.is_empty() { + (dockerfile_name, ".".to_string()) + } else { + (format!("{}/{}", analysis_relative_dir, dockerfile_name), analysis_relative_dir) + } + } else { + // Analyzing a subdirectory - prepend subpath to make repo-root-relative + if analysis_relative_dir.is_empty() { + // Dockerfile at root of analyzed subdir + // e.g., subpath="services/contact-intelligence" -> dockerfile="services/contact-intelligence/Dockerfile" + (format!("{}/{}", subpath, dockerfile_name), subpath.to_string()) + } else { + // Dockerfile in nested dir within analyzed subdir + // e.g., subpath="services", analysis_relative_dir="contact-intelligence" + let full_context = format!("{}/{}", subpath, analysis_relative_dir); + (format!("{}/{}", full_context, dockerfile_name), full_context) + } + } + }) + .unwrap_or_else(|| { + // No dockerfile found - use subpath as context if provided, else root + let subpath = args.path.as_deref().unwrap_or(""); + if subpath.is_empty() { + ("Dockerfile".to_string(), ".".to_string()) + } else { + (format!("{}/Dockerfile", subpath), subpath.to_string()) + } + }); + + tracing::debug!( + "Deploy service docker config: dockerfile_path={}, build_context={}, subpath={:?}", + dockerfile_path, + build_context, + args.path + ); + + let cloud_runner_config = build_cloud_runner_config( + &final_provider, + &final_region, + &final_machine, + args.is_public, + recommendation.health_check_path.as_deref(), + ); + + let config_request = CreateDeploymentConfigRequest { + project_id: project_id.clone(), + service_name: service_name.clone(), + repository_id: repo.repository_id, + repository_full_name: repo.repository_full_name.clone(), + dockerfile_path: Some(dockerfile_path.clone()), + dockerfile: Some(dockerfile_path.clone()), + build_context: Some(build_context.clone()), + context: Some(build_context.clone()), + port: final_port as i32, + branch: repo.default_branch.clone().unwrap_or_else(|| "main".to_string()), + target_type: recommendation.target.as_str().to_string(), + cloud_provider: final_provider.as_str().to_string(), + environment_id: resolved_env_id.clone(), + cluster_id: None, // Cloud Runner doesn't need cluster + registry_id: None, // Auto-provision + auto_deploy_enabled: true, + is_public: Some(args.is_public), + cloud_runner_config: Some(cloud_runner_config), + }; + + // Create config + let config = match client.create_deployment_config(&config_request).await { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("deploy_service", e)); + } + }; + + // Trigger deployment + let trigger_request = TriggerDeploymentRequest { + project_id: project_id.clone(), + config_id: config.id.clone(), + commit_sha: None, + }; + + match client.trigger_deployment(&trigger_request).await { + Ok(response) => { + let result = json!({ + "status": "deployed", + "deployment_mode": "NEW_DEPLOYMENT", + "config_id": config.id, + "task_id": response.backstage_task_id, + "service_name": service_name, + "environment": { + "id": resolved_env_id, + "name": resolved_env_name, + "is_production": is_production, + }, + "provider": final_provider.as_str(), + "machine_type": final_machine, + "region": final_region, + "port": final_port, + "docker_config": { + "dockerfile_path": dockerfile_path, + "build_context": build_context, + }, + "message": format!( + "NEW deployment started for '{}' on {} environment. Task ID: {}", + service_name, resolved_env_name, response.backstage_task_id + ), + "next_steps": [ + format!("Monitor progress: use get_deployment_status with task_id '{}'", response.backstage_task_id), + "View logs after deployment: use get_service_logs", + ], + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("deploy_service", e)), + } + } +} + +/// Extract service name from path +fn get_service_name(path: &PathBuf) -> String { + path.file_name() + .and_then(|n| n.to_str()) + .map(|n| n.to_lowercase().replace(['_', ' '], "-")) + .unwrap_or_else(|| "service".to_string()) +} + +/// Detect the git remote URL from a directory +fn detect_git_remote(project_path: &PathBuf) -> Option { + let output = Command::new("git") + .args(["remote", "get-url", "origin"]) + .current_dir(project_path) + .output() + .ok()?; + + if output.status.success() { + let url = String::from_utf8(output.stdout).ok()?; + Some(url.trim().to_string()) + } else { + None + } +} + +/// Parse repository full name from git remote URL +/// Handles both SSH (git@github.com:owner/repo.git) and HTTPS (https://github.com/owner/repo.git) +fn parse_repo_from_url(url: &str) -> Option { + let url = url.trim(); + + // SSH format: git@github.com:owner/repo.git + if url.starts_with("git@") { + let parts: Vec<&str> = url.split(':').collect(); + if parts.len() == 2 { + let path = parts[1].trim_end_matches(".git"); + return Some(path.to_string()); + } + } + + // HTTPS format: https://github.com/owner/repo.git + if url.starts_with("https://") || url.starts_with("http://") { + if let Some(path) = url.split('/').skip(3).collect::>().join("/").strip_suffix(".git") { + return Some(path.to_string()); + } + // Without .git suffix + let path: String = url.split('/').skip(3).collect::>().join("/"); + if !path.is_empty() { + return Some(path); + } + } + + None +} + +/// Find repository matching local git remote, or fall back to non-gitops repo +fn find_matching_repository<'a>( + repositories: &'a [ProjectRepository], + project_path: &PathBuf, +) -> Option<&'a ProjectRepository> { + // First, try to detect from local git remote + if let Some(detected_name) = detect_git_remote(project_path).and_then(|url| parse_repo_from_url(&url)) { + tracing::debug!("Detected local git remote: {}", detected_name); + + if let Some(repo) = repositories.iter().find(|r| { + r.repository_full_name.eq_ignore_ascii_case(&detected_name) + }) { + tracing::debug!("Matched detected repo: {}", repo.repository_full_name); + return Some(repo); + } + } + + // Fall back: find first non-GitOps repository + // GitOps repos are typically infrastructure/config repos, not application repos + if let Some(repo) = repositories.iter().find(|r| { + r.is_primary_git_ops != Some(true) && + !r.repository_full_name.to_lowercase().contains("infrastructure") && + !r.repository_full_name.to_lowercase().contains("gitops") + }) { + tracing::debug!("Using non-gitops repo: {}", repo.repository_full_name); + return Some(repo); + } + + // Last resort: first repo + repositories.first() +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Resource not found: {}", msg), + Some(vec![ + "The project ID may be incorrect", + "Use list_projects to find valid project IDs", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec!["Contact the project admin for access"]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec!["Check network connectivity"]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + None, + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec!["Try again later"]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec!["Check your internet connection"]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(DeployServiceTool::NAME, "deploy_service"); + } + + #[test] + fn test_default_preview_only() { + assert!(default_preview()); + } + + #[test] + fn test_get_service_name() { + assert_eq!( + get_service_name(&PathBuf::from("/path/to/my_service")), + "my-service" + ); + assert_eq!( + get_service_name(&PathBuf::from("/path/to/MyApp")), + "myapp" + ); + assert_eq!( + get_service_name(&PathBuf::from("/path/to/api-service")), + "api-service" + ); + } + + #[test] + fn test_tool_creation() { + let tool = DeployServiceTool::new(PathBuf::from("/test")); + assert!(format!("{:?}", tool).contains("DeployServiceTool")); + } + + #[tokio::test] + async fn test_nonexistent_path_returns_error() { + let tool = DeployServiceTool::new(PathBuf::from("/nonexistent/path/that/does/not/exist")); + let args = DeployServiceArgs { + path: Some("nope".to_string()), + provider: None, + machine_type: None, + region: None, + port: None, + preview_only: true, + }; + + let result = tool.call(args).await.unwrap(); + assert!(result.contains("error") || result.contains("not found") || result.contains("Path not found")); + } +} diff --git a/src/agent/tools/platform/get_deployment_status.rs b/src/agent/tools/platform/get_deployment_status.rs new file mode 100644 index 00000000..834674f9 --- /dev/null +++ b/src/agent/tools/platform/get_deployment_status.rs @@ -0,0 +1,328 @@ +//! Get deployment status tool for the agent +//! +//! Allows the agent to check the status of a deployment task. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::{PlatformApiClient, PlatformApiError}; + +/// Arguments for the get deployment status tool +#[derive(Debug, Deserialize)] +pub struct GetDeploymentStatusArgs { + /// The task ID to check status for + pub task_id: String, + /// Optional project ID to check actual deployment status (for public_url) + pub project_id: Option, + /// Optional service name to find the specific deployment + pub service_name: Option, +} + +/// Error type for get deployment status operations +#[derive(Debug, thiserror::Error)] +#[error("Get deployment status error: {0}")] +pub struct GetDeploymentStatusError(String); + +/// Tool to get deployment task status +/// +/// Returns the current status of a deployment including progress percentage, +/// current step, and overall status. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct GetDeploymentStatusTool; + +impl GetDeploymentStatusTool { + /// Create a new GetDeploymentStatusTool + pub fn new() -> Self { + Self + } +} + +impl Tool for GetDeploymentStatusTool { + const NAME: &'static str = "get_deployment_status"; + + type Error = GetDeploymentStatusError; + type Args = GetDeploymentStatusArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Get the status of a deployment task and optionally check the actual service status. + +Returns the current status of a deployment, including progress percentage, +current step, overall status, and optionally the public URL if the service is ready. + +**CRITICAL - DO NOT POLL IN A LOOP:** +After checking status, you MUST inform the user and WAIT for them to ask again. +DO NOT call this tool repeatedly in succession. Deployments take 1-3 minutes. +The response includes an "action" field - follow it: +- "STOP_POLLING": Deployment is done (success or failure). Tell the user. +- "INFORM_USER_AND_WAIT": Tell user the current status and wait for them to ask for updates. + +**IMPORTANT for Cloud Runner:** +The task may show "completed" when infrastructure is provisioned, but the actual +service build and deployment takes longer. Pass project_id and service_name to +also check if the service has a public URL (meaning it's actually ready). + +**Status Values:** +- Task status: "processing", "completed", "failed" +- Overall status: "generating", "building", "deploying", "healthy", "failed" +- Service ready: Only when public_url is available + +**Prerequisites:** +- User must be authenticated via `sync-ctl auth login` +- A deployment must have been triggered (use trigger_deployment first) + +**Use Cases:** +- Check deployment status ONCE after triggering, then inform user +- Let user ask for updates when they want them +- Get error details if deployment failed"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "task_id": { + "type": "string", + "description": "The deployment task ID (from trigger_deployment response)" + }, + "project_id": { + "type": "string", + "description": "Optional: Project ID to check actual service status and public URL" + }, + "service_name": { + "type": "string", + "description": "Optional: Service name to find the specific deployment" + } + }, + "required": ["task_id"] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate task_id + if args.task_id.trim().is_empty() { + return Ok(format_error_for_llm( + "get_deployment_status", + ErrorCategory::ValidationFailed, + "task_id cannot be empty", + Some(vec![ + "Use trigger_deployment to start a deployment and get a task_id", + "Use list_deployments to find previous deployment task IDs", + ]), + )); + } + + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("get_deployment_status", e)); + } + }; + + // Get the deployment status (Backstage task) + match client.get_deployment_status(&args.task_id).await { + Ok(status) => { + let task_complete = status.status == "completed"; + let is_failed = status.status == "failed" || status.overall_status == "failed"; + let is_healthy = status.overall_status == "healthy"; + + // Also check actual deployment if project_id and service_name provided + // This is crucial for Cloud Runner where task completes but service takes longer + let (service_status, public_url, service_ready) = if let (Some(project_id), Some(service_name)) = (&args.project_id, &args.service_name) { + match client.list_deployments(project_id, Some(10)).await { + Ok(paginated) => { + // Find the deployment for this service + let deployment = paginated.data.iter() + .find(|d| d.service_name.eq_ignore_ascii_case(service_name)); + + match deployment { + Some(d) => ( + Some(d.status.clone()), + d.public_url.clone(), + d.public_url.is_some() && d.status == "running" + ), + None => (None, None, false) + } + } + Err(_) => (None, None, false) + } + } else { + (None, None, false) + }; + + // True completion = task done AND (service has URL or no service check requested) + let truly_ready = if args.project_id.is_some() { + service_ready + } else { + is_healthy + }; + + let mut result = json!({ + "success": true, + "task_id": args.task_id, + "task_status": status.status, + "task_progress": status.progress, + "current_step": status.current_step, + "overall_status": status.overall_status, + "overall_message": status.overall_message, + "task_complete": task_complete, + "is_failed": is_failed, + "service_ready": truly_ready + }); + + // Add service-specific info if we checked + if let Some(svc_status) = service_status { + result["service_status"] = json!(svc_status); + } + if let Some(url) = &public_url { + result["public_url"] = json!(url); + } + + // Add error details if failed + if let Some(error) = &status.error { + result["error"] = json!(error); + } + + // Add next steps based on actual status + // IMPORTANT: Guide agent to STOP polling and inform user + if is_failed { + result["next_steps"] = json!([ + "STOP - Deployment failed. Inform the user of the error.", + "Review the error message for details", + "Check the deployment configuration", + "Verify the code builds successfully locally" + ]); + result["action"] = json!("STOP_POLLING"); + } else if truly_ready && public_url.is_some() { + result["next_steps"] = json!([ + format!("STOP - Service is live at: {}", public_url.as_ref().unwrap()), + "Deployment completed successfully!", + "Inform the user their service is ready" + ]); + result["action"] = json!("STOP_POLLING"); + } else if task_complete && !truly_ready { + result["next_steps"] = json!([ + "STOP POLLING - Inform the user that deployment is in progress", + "Infrastructure is ready, Cloud Runner is building the container", + "Tell the user to wait 1-2 minutes, then they can ask you to check status again", + "DO NOT call get_deployment_status again automatically - wait for user to ask" + ]); + result["action"] = json!("INFORM_USER_AND_WAIT"); + result["estimated_wait"] = json!("1-2 minutes"); + result["note"] = json!("Task shows 100% but container is still being built/deployed. This is normal. DO NOT poll repeatedly - inform the user and wait for them to ask for status."); + } else if !task_complete { + result["next_steps"] = json!([ + format!("STOP POLLING - Deployment is {} ({}% complete)", status.overall_status, status.progress), + "Inform the user of current progress", + "Tell them to wait and ask again in 30 seconds if they want an update", + "DO NOT call get_deployment_status again automatically" + ]); + result["action"] = json!("INFORM_USER_AND_WAIT"); + } + + serde_json::to_string_pretty(&result) + .map_err(|e| GetDeploymentStatusError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("get_deployment_status", e)), + } + } +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Deployment task not found: {}", msg), + Some(vec![ + "The task_id may be incorrect or expired", + "Use trigger_deployment to start a new deployment", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec![ + "The user does not have access to this deployment", + "Contact the project admin for access", + ]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec![ + "Check network connectivity", + "The Syncable API may be temporarily unavailable", + ]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + Some(vec!["This may be a temporary API issue"]), + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec![ + "The Syncable API is experiencing issues", + "Try again later", + ]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec![ + "Check your internet connection", + "The Syncable API may be temporarily unavailable", + ]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(GetDeploymentStatusTool::NAME, "get_deployment_status"); + } + + #[test] + fn test_tool_creation() { + let tool = GetDeploymentStatusTool::new(); + assert!(format!("{:?}", tool).contains("GetDeploymentStatusTool")); + } +} diff --git a/src/agent/tools/platform/get_service_logs.rs b/src/agent/tools/platform/get_service_logs.rs new file mode 100644 index 00000000..66a24764 --- /dev/null +++ b/src/agent/tools/platform/get_service_logs.rs @@ -0,0 +1,270 @@ +//! Get service logs tool for the agent +//! +//! Allows the agent to fetch container logs for deployed services. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::{PlatformApiClient, PlatformApiError}; + +/// Arguments for the get service logs tool +#[derive(Debug, Deserialize)] +pub struct GetServiceLogsArgs { + /// Service ID (from list_deployments output) + pub service_id: String, + /// Start time filter (ISO timestamp, optional) + pub start: Option, + /// End time filter (ISO timestamp, optional) + pub end: Option, + /// Maximum number of log lines to return (default: 100) + pub limit: Option, +} + +/// Error type for get service logs operations +#[derive(Debug, thiserror::Error)] +#[error("Get service logs error: {0}")] +pub struct GetServiceLogsError(String); + +/// Tool to get container logs for a deployed service +/// +/// Returns recent log entries with timestamps and container metadata. +/// Supports time filtering and line limits for efficient log retrieval. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct GetServiceLogsTool; + +impl GetServiceLogsTool { + /// Create a new GetServiceLogsTool + pub fn new() -> Self { + Self + } +} + +impl Tool for GetServiceLogsTool { + const NAME: &'static str = "get_service_logs"; + + type Error = GetServiceLogsError; + type Args = GetServiceLogsArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Get container logs for a deployed service. + +Returns recent log entries from the service's containers with timestamps +and metadata. Useful for debugging and monitoring deployed services. + +**Parameters:** +- service_id: The deployment/service ID (from list_deployments output) +- start: Optional ISO timestamp to filter logs from (e.g., "2024-01-01T00:00:00Z") +- end: Optional ISO timestamp to filter logs until +- limit: Optional max number of log lines (default: 100) + +**Prerequisites:** +- User must be authenticated via `sync-ctl auth login` +- Service must be deployed (use list_deployments to find service IDs) + +**Use Cases:** +- Debug application errors by viewing recent logs +- Monitor service behavior after deployment +- Investigate issues by filtering logs to a specific time range +- View startup logs to verify configuration"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "service_id": { + "type": "string", + "description": "The deployment/service ID (from list_deployments output)" + }, + "start": { + "type": "string", + "description": "Optional: ISO timestamp to filter logs from (e.g., \"2024-01-01T00:00:00Z\")" + }, + "end": { + "type": "string", + "description": "Optional: ISO timestamp to filter logs until" + }, + "limit": { + "type": "integer", + "description": "Optional: max number of log lines to return (default 100)" + } + }, + "required": ["service_id"] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate service_id + if args.service_id.trim().is_empty() { + return Ok(format_error_for_llm( + "get_service_logs", + ErrorCategory::ValidationFailed, + "service_id cannot be empty", + Some(vec![ + "Use list_deployments to find valid service IDs", + "The service_id is the 'id' field from deployment entries", + ]), + )); + } + + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("get_service_logs", e)); + } + }; + + // Fetch logs + let start_ref = args.start.as_deref(); + let end_ref = args.end.as_deref(); + + match client + .get_service_logs(&args.service_id, start_ref, end_ref, args.limit) + .await + { + Ok(response) => { + if response.data.is_empty() { + return Ok(json!({ + "success": true, + "logs": [], + "count": 0, + "stats": { + "entries_returned": 0, + "query_time_ms": response.stats.query_time_ms + }, + "message": "No logs found for this service. The service may not have produced any logs yet, or the time filter may be too restrictive." + }) + .to_string()); + } + + // Format log entries for readability + let log_entries: Vec = response + .data + .iter() + .map(|entry| { + json!({ + "timestamp": entry.timestamp, + "message": entry.message, + "labels": entry.labels + }) + }) + .collect(); + + let result = json!({ + "success": true, + "logs": log_entries, + "count": response.data.len(), + "stats": { + "entries_returned": response.stats.entries_returned, + "query_time_ms": response.stats.query_time_ms + }, + "message": format!("Retrieved {} log entries", response.data.len()) + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| GetServiceLogsError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("get_service_logs", e)), + } + } +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Service not found: {}", msg), + Some(vec![ + "The service_id may be incorrect or the service no longer exists", + "Use list_deployments to find valid service IDs", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec![ + "The user does not have access to view logs for this service", + "Contact the project admin for access", + ]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec![ + "Check network connectivity", + "The Syncable API may be temporarily unavailable", + ]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + Some(vec!["This may be a temporary API issue"]), + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec![ + "The Syncable API is experiencing issues", + "Try again later", + ]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec![ + "Check your internet connection", + "The Syncable API may be temporarily unavailable", + ]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(GetServiceLogsTool::NAME, "get_service_logs"); + } + + #[test] + fn test_tool_creation() { + let tool = GetServiceLogsTool::new(); + assert!(format!("{:?}", tool).contains("GetServiceLogsTool")); + } +} diff --git a/src/agent/tools/platform/list_deployment_capabilities.rs b/src/agent/tools/platform/list_deployment_capabilities.rs new file mode 100644 index 00000000..c6d129bc --- /dev/null +++ b/src/agent/tools/platform/list_deployment_capabilities.rs @@ -0,0 +1,313 @@ +//! List deployment capabilities tool for the agent +//! +//! Wraps the existing `get_provider_deployment_statuses` function to allow +//! the agent to discover available deployment options for a project. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::{PlatformApiClient, PlatformApiError}; +use crate::wizard::get_provider_deployment_statuses; + +/// Arguments for the list deployment capabilities tool +#[derive(Debug, Deserialize)] +pub struct ListDeploymentCapabilitiesArgs { + /// The project UUID to check capabilities for + pub project_id: String, +} + +/// Error type for list deployment capabilities operations +#[derive(Debug, thiserror::Error)] +#[error("List deployment capabilities error: {0}")] +pub struct ListDeploymentCapabilitiesError(String); + +/// Tool to list available deployment capabilities for a project +/// +/// Returns information about connected providers, available clusters, +/// registries, and Cloud Run availability. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ListDeploymentCapabilitiesTool; + +impl ListDeploymentCapabilitiesTool { + /// Create a new ListDeploymentCapabilitiesTool + pub fn new() -> Self { + Self + } +} + +impl Tool for ListDeploymentCapabilitiesTool { + const NAME: &'static str = "list_deployment_capabilities"; + + type Error = ListDeploymentCapabilitiesError; + type Args = ListDeploymentCapabilitiesArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"List available deployment capabilities for a project. + +Returns information about which cloud providers are connected and what deployment +targets are available (clusters, registries, Cloud Run). + +**Parameters:** +- project_id: The UUID of the project to check + +**Prerequisites:** +- User must be authenticated via `sync-ctl auth login` +- User must have access to the project + +**What it returns:** +- providers: Array of provider status objects with: + - provider: Provider name (Gcp, Hetzner, Aws, Azure, Scaleway, Cyso) + - is_available: Whether the provider is currently supported (false = coming soon) + - is_connected: Whether the provider has cloud credentials + - cloud_runner_available: Whether Cloud Run/serverless is available + - clusters: Array of available Kubernetes clusters + - registries: Array of available container registries + - summary: Human-readable status + +**Provider Availability:** +- Available now: GCP, Hetzner +- Coming soon: AWS, Azure, Scaleway, Cyso Cloud + +**Use Cases:** +- Before creating a deployment, check what options are available +- Verify a provider is connected before attempting deployment +- Find cluster and registry IDs for deployment configuration"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "The UUID of the project" + } + }, + "required": ["project_id"] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate project_id + if args.project_id.trim().is_empty() { + return Ok(format_error_for_llm( + "list_deployment_capabilities", + ErrorCategory::ValidationFailed, + "project_id cannot be empty", + Some(vec![ + "Use list_projects to find valid project IDs", + "Use current_context to get the currently selected project", + ]), + )); + } + + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("list_deployment_capabilities", e)); + } + }; + + // Get provider deployment statuses + match get_provider_deployment_statuses(&client, &args.project_id).await { + Ok(statuses) => { + // Count available and connected providers (only available providers can deploy) + let available_connected_count = statuses + .iter() + .filter(|s| s.provider.is_available() && s.is_connected) + .count(); + let total_clusters: usize = statuses.iter().map(|s| s.clusters.len()).sum(); + let total_registries: usize = statuses.iter().map(|s| s.registries.len()).sum(); + + // Build provider data + let provider_data: Vec = statuses + .iter() + .map(|s| { + let clusters: Vec = s + .clusters + .iter() + .map(|c| { + json!({ + "id": c.id, + "name": c.name, + "region": c.region, + "is_healthy": c.is_healthy, + }) + }) + .collect(); + + let registries: Vec = s + .registries + .iter() + .map(|r| { + json!({ + "id": r.id, + "name": r.name, + "region": r.region, + "is_ready": r.is_ready, + }) + }) + .collect(); + + json!({ + "provider": format!("{:?}", s.provider), + "is_available": s.provider.is_available(), + "is_connected": s.is_connected, + "cloud_runner_available": s.cloud_runner_available, + "clusters": clusters, + "registries": registries, + "summary": if s.provider.is_available() { + s.summary.clone() + } else { + "Coming soon".to_string() + }, + }) + }) + .collect(); + + // Build summary + let summary = if available_connected_count == 0 { + "No available providers connected. Connect GCP or Hetzner in platform settings.".to_string() + } else { + let mut parts = vec![format!("{} provider{} ready", available_connected_count, if available_connected_count == 1 { "" } else { "s" })]; + if total_clusters > 0 { + parts.push(format!("{} cluster{}", total_clusters, if total_clusters == 1 { "" } else { "s" })); + } + if total_registries > 0 { + parts.push(format!("{} registr{}", total_registries, if total_registries == 1 { "y" } else { "ies" })); + } + parts.join(", ") + }; + + let result = json!({ + "success": true, + "project_id": args.project_id, + "providers": provider_data, + "summary": summary, + "available_connected_count": available_connected_count, + "total_clusters": total_clusters, + "total_registries": total_registries, + "coming_soon_providers": ["AWS", "Azure", "Scaleway", "Cyso Cloud"], + "next_steps": if available_connected_count > 0 { + vec![ + "Use analyze_project to discover Dockerfiles in the project", + "Use create_deployment_config to create a deployment configuration", + "For Cloud Run deployments, no cluster is needed", + "Note: AWS, Azure, Scaleway, and Cyso Cloud are coming soon" + ] + } else { + vec![ + "Use open_provider_settings to connect GCP or Hetzner", + "After connecting, run this tool again to see available options", + "Note: AWS, Azure, Scaleway, and Cyso Cloud are coming soon" + ] + } + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| ListDeploymentCapabilitiesError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("list_deployment_capabilities", e)), + } + } +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Resource not found: {}", msg), + Some(vec![ + "The project ID may be incorrect", + "Use list_projects to find valid project IDs", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec![ + "The user does not have access to this project", + "Contact the project admin for access", + ]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec![ + "Check network connectivity", + "The Syncable API may be temporarily unavailable", + ]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + Some(vec!["This may be a temporary API issue"]), + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec![ + "The Syncable API is experiencing issues", + "Try again later", + ]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec![ + "Check your internet connection", + "The Syncable API may be temporarily unavailable", + ]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(ListDeploymentCapabilitiesTool::NAME, "list_deployment_capabilities"); + } + + #[test] + fn test_tool_creation() { + let tool = ListDeploymentCapabilitiesTool::new(); + assert!(format!("{:?}", tool).contains("ListDeploymentCapabilitiesTool")); + } +} diff --git a/src/agent/tools/platform/list_deployment_configs.rs b/src/agent/tools/platform/list_deployment_configs.rs new file mode 100644 index 00000000..d9330212 --- /dev/null +++ b/src/agent/tools/platform/list_deployment_configs.rs @@ -0,0 +1,239 @@ +//! List deployment configs tool for the agent +//! +//! Allows the agent to list deployment configurations for a project. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::{PlatformApiClient, PlatformApiError}; + +/// Arguments for the list deployment configs tool +#[derive(Debug, Deserialize)] +pub struct ListDeploymentConfigsArgs { + /// The project ID to list deployment configs for + pub project_id: String, +} + +/// Error type for list deployment configs operations +#[derive(Debug, thiserror::Error)] +#[error("List deployment configs error: {0}")] +pub struct ListDeploymentConfigsError(String); + +/// Tool to list deployment configurations for a project +/// +/// Returns all deployment configs with service names, branches, target types, +/// and auto-deploy settings. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ListDeploymentConfigsTool; + +impl ListDeploymentConfigsTool { + /// Create a new ListDeploymentConfigsTool + pub fn new() -> Self { + Self + } +} + +impl Tool for ListDeploymentConfigsTool { + const NAME: &'static str = "list_deployment_configs"; + + type Error = ListDeploymentConfigsError; + type Args = ListDeploymentConfigsArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"List deployment configurations for a project. + +Returns all deployment configs associated with the project, including: +- Service name and branch +- Target type (kubernetes or cloud_runner) +- Auto-deploy status +- Port configuration + +**Prerequisites:** +- User must be authenticated via `sync-ctl auth login` +- A project must be selected (use select_project first) + +**Use Cases:** +- View available deployment configurations before triggering a deployment +- Check auto-deploy settings for services +- Find the config_id needed to trigger a deployment"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "The UUID of the project to list deployment configs for" + } + }, + "required": ["project_id"] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate project_id + if args.project_id.trim().is_empty() { + return Ok(format_error_for_llm( + "list_deployment_configs", + ErrorCategory::ValidationFailed, + "project_id cannot be empty", + Some(vec![ + "Use list_projects to find valid project IDs", + "Use select_project to set the current project context", + ]), + )); + } + + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("list_deployment_configs", e)); + } + }; + + // Fetch deployment configs + match client.list_deployment_configs(&args.project_id).await { + Ok(configs) => { + if configs.is_empty() { + return Ok(json!({ + "success": true, + "configs": [], + "count": 0, + "message": "No deployment configs found for this project. You may need to create a deployment configuration first." + }) + .to_string()); + } + + let config_list: Vec = configs + .iter() + .map(|config| { + json!({ + "id": config.id, + "service_name": config.service_name, + "repository": config.repository_full_name, + "branch": config.branch, + "target_type": config.target_type, + "port": config.port, + "auto_deploy_enabled": config.auto_deploy_enabled, + "deployment_strategy": config.deployment_strategy, + "environment_id": config.environment_id, + "created_at": config.created_at.to_rfc3339() + }) + }) + .collect(); + + let result = json!({ + "success": true, + "configs": config_list, + "count": configs.len(), + "message": format!("Found {} deployment configuration(s)", configs.len()) + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| ListDeploymentConfigsError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("list_deployment_configs", e)), + } + } +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Resource not found: {}", msg), + Some(vec![ + "The project ID may be incorrect", + "Use list_projects to find valid project IDs", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec![ + "The user does not have access to this project", + "Contact the project admin for access", + ]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec![ + "Check network connectivity", + "The Syncable API may be temporarily unavailable", + ]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + Some(vec!["This may be a temporary API issue"]), + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec![ + "The Syncable API is experiencing issues", + "Try again later", + ]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec![ + "Check your internet connection", + "The Syncable API may be temporarily unavailable", + ]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(ListDeploymentConfigsTool::NAME, "list_deployment_configs"); + } + + #[test] + fn test_tool_creation() { + let tool = ListDeploymentConfigsTool::new(); + assert!(format!("{:?}", tool).contains("ListDeploymentConfigsTool")); + } +} diff --git a/src/agent/tools/platform/list_deployments.rs b/src/agent/tools/platform/list_deployments.rs new file mode 100644 index 00000000..a24aac5c --- /dev/null +++ b/src/agent/tools/platform/list_deployments.rs @@ -0,0 +1,247 @@ +//! List deployments tool for the agent +//! +//! Allows the agent to list recent deployments for a project. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::{PlatformApiClient, PlatformApiError}; + +/// Arguments for the list deployments tool +#[derive(Debug, Deserialize)] +pub struct ListDeploymentsArgs { + /// The project ID to list deployments for + pub project_id: String, + /// Optional limit on number of deployments to return (default 10) + pub limit: Option, +} + +/// Error type for list deployments operations +#[derive(Debug, thiserror::Error)] +#[error("List deployments error: {0}")] +pub struct ListDeploymentsError(String); + +/// Tool to list recent deployments for a project +/// +/// Returns a paginated list of deployments with status, commit info, and public URLs. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ListDeploymentsTool; + +impl ListDeploymentsTool { + /// Create a new ListDeploymentsTool + pub fn new() -> Self { + Self + } +} + +impl Tool for ListDeploymentsTool { + const NAME: &'static str = "list_deployments"; + + type Error = ListDeploymentsError; + type Args = ListDeploymentsArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"List recent deployments for a project. + +Returns a list of deployments with their status, commit SHA, public URLs, +and creation timestamps. + +**Parameters:** +- project_id: The project UUID +- limit: Optional number of deployments to return (default 10) + +**Prerequisites:** +- User must be authenticated via `sync-ctl auth login` + +**Use Cases:** +- View deployment history for a project +- Find the public URL of a deployed service +- Check the status of recent deployments +- Get task IDs for checking deployment status"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "The UUID of the project to list deployments for" + }, + "limit": { + "type": "integer", + "description": "Optional: number of deployments to return (default 10)" + } + }, + "required": ["project_id"] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate project_id + if args.project_id.trim().is_empty() { + return Ok(format_error_for_llm( + "list_deployments", + ErrorCategory::ValidationFailed, + "project_id cannot be empty", + Some(vec![ + "Use list_projects to find valid project IDs", + "Use select_project to set the current project context", + ]), + )); + } + + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("list_deployments", e)); + } + }; + + // Fetch deployments + match client.list_deployments(&args.project_id, args.limit).await { + Ok(paginated) => { + if paginated.data.is_empty() { + return Ok(json!({ + "success": true, + "deployments": [], + "count": 0, + "has_more": false, + "message": "No deployments found for this project. Use trigger_deployment to start a deployment." + }) + .to_string()); + } + + let deployment_list: Vec = paginated + .data + .iter() + .map(|deployment| { + json!({ + "id": deployment.id, + "service_name": deployment.service_name, + "repository": deployment.repository_full_name, + "status": deployment.status, + "task_id": deployment.backstage_task_id, + "commit_sha": deployment.commit_sha, + "public_url": deployment.public_url, + "created_at": deployment.created_at.to_rfc3339() + }) + }) + .collect(); + + let result = json!({ + "success": true, + "deployments": deployment_list, + "count": paginated.data.len(), + "has_more": paginated.pagination.has_more, + "next_cursor": paginated.pagination.next_cursor, + "message": format!("Found {} deployment(s)", paginated.data.len()) + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| ListDeploymentsError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("list_deployments", e)), + } + } +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Resource not found: {}", msg), + Some(vec![ + "The project ID may be incorrect", + "Use list_projects to find valid project IDs", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec![ + "The user does not have access to this project", + "Contact the project admin for access", + ]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec![ + "Check network connectivity", + "The Syncable API may be temporarily unavailable", + ]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + Some(vec!["This may be a temporary API issue"]), + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec![ + "The Syncable API is experiencing issues", + "Try again later", + ]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec![ + "Check your internet connection", + "The Syncable API may be temporarily unavailable", + ]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(ListDeploymentsTool::NAME, "list_deployments"); + } + + #[test] + fn test_tool_creation() { + let tool = ListDeploymentsTool::new(); + assert!(format!("{:?}", tool).contains("ListDeploymentsTool")); + } +} diff --git a/src/agent/tools/platform/list_organizations.rs b/src/agent/tools/platform/list_organizations.rs new file mode 100644 index 00000000..9e169d4d --- /dev/null +++ b/src/agent/tools/platform/list_organizations.rs @@ -0,0 +1,201 @@ +//! List organizations tool for the agent +//! +//! Allows the agent to list all organizations the authenticated user belongs to. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::{PlatformApiClient, PlatformApiError}; + +/// Arguments for the list organizations tool (none required) +#[derive(Debug, Deserialize)] +pub struct ListOrganizationsArgs {} + +/// Error type for list organizations operations +#[derive(Debug, thiserror::Error)] +#[error("List organizations error: {0}")] +pub struct ListOrganizationsError(String); + +/// Tool to list all organizations the authenticated user belongs to +/// +/// This tool queries the Syncable Platform API to retrieve all organizations +/// that the currently authenticated user is a member of. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ListOrganizationsTool; + +impl ListOrganizationsTool { + /// Create a new ListOrganizationsTool + pub fn new() -> Self { + Self + } +} + +impl Tool for ListOrganizationsTool { + const NAME: &'static str = "list_organizations"; + + type Error = ListOrganizationsError; + type Args = ListOrganizationsArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"List all organizations the authenticated user belongs to. + +Returns a list of organizations with their IDs, names, and slugs. +Use this to discover available organizations before listing projects. + +**Prerequisites:** +- User must be authenticated via `sync-ctl auth login` + +**Use Cases:** +- Finding the organization ID to list projects +- Discovering which organizations the user has access to +- Getting organization details for project selection"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": {}, + "required": [] + }), + } + } + + async fn call(&self, _args: Self::Args) -> Result { + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("list_organizations", e)); + } + }; + + // Fetch organizations + match client.list_organizations().await { + Ok(orgs) => { + if orgs.is_empty() { + return Ok(json!({ + "success": true, + "organizations": [], + "count": 0, + "message": "No organizations found. You may need to create or join an organization." + }) + .to_string()); + } + + let org_list: Vec = orgs + .iter() + .map(|org| { + json!({ + "id": org.id, + "name": org.name, + "slug": org.slug, + "created_at": org.created_at.to_rfc3339() + }) + }) + .collect(); + + let result = json!({ + "success": true, + "organizations": org_list, + "count": orgs.len() + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| ListOrganizationsError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("list_organizations", e)), + } + } +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Resource not found: {}", msg), + Some(vec!["The requested resource does not exist"]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec!["The user does not have access to this resource"]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec![ + "Check network connectivity", + "The Syncable API may be temporarily unavailable", + ]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + Some(vec!["This may be a temporary API issue"]), + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec![ + "The Syncable API is experiencing issues", + "Try again later", + ]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec![ + "Check your internet connection", + "The Syncable API may be temporarily unavailable", + ]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(ListOrganizationsTool::NAME, "list_organizations"); + } + + #[test] + fn test_tool_creation() { + let tool = ListOrganizationsTool::new(); + assert!(format!("{:?}", tool).contains("ListOrganizationsTool")); + } +} diff --git a/src/agent/tools/platform/list_projects.rs b/src/agent/tools/platform/list_projects.rs new file mode 100644 index 00000000..d7618442 --- /dev/null +++ b/src/agent/tools/platform/list_projects.rs @@ -0,0 +1,232 @@ +//! List projects tool for the agent +//! +//! Allows the agent to list all projects within an organization. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::{PlatformApiClient, PlatformApiError}; + +/// Arguments for the list projects tool +#[derive(Debug, Deserialize)] +pub struct ListProjectsArgs { + /// The organization ID to list projects for + pub organization_id: String, +} + +/// Error type for list projects operations +#[derive(Debug, thiserror::Error)] +#[error("List projects error: {0}")] +pub struct ListProjectsError(String); + +/// Tool to list all projects within an organization +/// +/// This tool queries the Syncable Platform API to retrieve all projects +/// in the specified organization that the user has access to. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ListProjectsTool; + +impl ListProjectsTool { + /// Create a new ListProjectsTool + pub fn new() -> Self { + Self + } +} + +impl Tool for ListProjectsTool { + const NAME: &'static str = "list_projects"; + + type Error = ListProjectsError; + type Args = ListProjectsArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"List all projects within an organization. + +Returns a list of projects with their IDs, names, and descriptions. +Use this after getting organization IDs from list_organizations. + +**Prerequisites:** +- User must be authenticated via `sync-ctl auth login` +- User must have access to the specified organization + +**Use Cases:** +- Finding project IDs to select a project context +- Discovering available projects in an organization +- Getting project details before selection"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "organization_id": { + "type": "string", + "description": "The UUID of the organization to list projects for" + } + }, + "required": ["organization_id"] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate organization_id is not empty + if args.organization_id.trim().is_empty() { + return Ok(format_error_for_llm( + "list_projects", + ErrorCategory::ValidationFailed, + "organization_id cannot be empty", + Some(vec![ + "Use list_organizations to find valid organization IDs", + "Pass the organization ID as a UUID string", + ]), + )); + } + + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("list_projects", e)); + } + }; + + // Fetch projects for the organization + match client.list_projects(&args.organization_id).await { + Ok(projects) => { + if projects.is_empty() { + return Ok(json!({ + "success": true, + "organization_id": args.organization_id, + "projects": [], + "count": 0, + "message": "No projects found in this organization. You may need to create a project." + }) + .to_string()); + } + + let project_list: Vec = projects + .iter() + .map(|proj| { + json!({ + "id": proj.id, + "name": proj.name, + "description": proj.description, + "organization_id": proj.organization_id, + "created_at": proj.created_at.to_rfc3339() + }) + }) + .collect(); + + let result = json!({ + "success": true, + "organization_id": args.organization_id, + "projects": project_list, + "count": projects.len() + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| ListProjectsError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("list_projects", e)), + } + } +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Organization not found: {}", msg), + Some(vec![ + "The organization ID may be incorrect", + "Use list_organizations to find valid organization IDs", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec![ + "The user does not have access to this organization", + "Contact the organization admin for access", + ]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec![ + "Check network connectivity", + "The Syncable API may be temporarily unavailable", + ]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + Some(vec!["This may be a temporary API issue"]), + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec![ + "The Syncable API is experiencing issues", + "Try again later", + ]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec![ + "Check your internet connection", + "The Syncable API may be temporarily unavailable", + ]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(ListProjectsTool::NAME, "list_projects"); + } + + #[test] + fn test_tool_creation() { + let tool = ListProjectsTool::new(); + assert!(format!("{:?}", tool).contains("ListProjectsTool")); + } +} diff --git a/src/agent/tools/platform/mod.rs b/src/agent/tools/platform/mod.rs new file mode 100644 index 00000000..eada3331 --- /dev/null +++ b/src/agent/tools/platform/mod.rs @@ -0,0 +1,97 @@ +//! Platform tools for managing Syncable platform resources +//! +//! This module provides agent tools for interacting with the Syncable Platform API: +//! - Listing organizations and projects +//! - Selecting and managing project context +//! - Querying current context state +//! - Cloud provider connection management +//! - Service deployment management +//! - Service log retrieval +//! - Project analysis for deployment +//! +//! ## Tools +//! +//! - `ListOrganizationsTool` - List organizations the user belongs to +//! - `ListProjectsTool` - List projects within an organization +//! - `SelectProjectTool` - Select a project as the current context +//! - `CurrentContextTool` - Get the currently selected project context +//! - `OpenProviderSettingsTool` - Open cloud provider settings in browser +//! - `CheckProviderConnectionTool` - Check if a cloud provider is connected +//! - `ListDeploymentConfigsTool` - List deployment configurations for a project +//! - `TriggerDeploymentTool` - Trigger a deployment using a config +//! - `GetDeploymentStatusTool` - Get deployment task status +//! - `ListDeploymentsTool` - List recent deployments for a project +//! - `GetServiceLogsTool` - Get container logs for a deployed service +//! - `AnalyzeProjectTool` - Analyze project for Dockerfiles and deployment options +//! - `AnalyzeCodebaseTool` - Comprehensive codebase analysis (languages, frameworks, ports, env vars) +//! - `ListDeploymentCapabilitiesTool` - List available deployment targets and providers +//! - `CreateDeploymentConfigTool` - Create a new deployment configuration +//! - `ProvisionRegistryTool` - Provision a new container registry +//! +//! ## Prerequisites +//! +//! All tools require the user to be authenticated via `sync-ctl auth login`. +//! +//! ## Example Flow +//! +//! 1. User asks: "What projects do I have access to?" +//! 2. Agent calls `list_organizations` to get available organizations +//! 3. Agent calls `list_projects` for each organization +//! 4. User asks: "Select the 'my-project' project" +//! 5. Agent calls `select_project` with the project and organization IDs +//! 6. Agent can then use `current_context` to verify the selection +//! +//! ## Cloud Provider Connection Flow +//! +//! 1. Agent calls `check_provider_connection` to see if GCP/AWS/etc is connected +//! 2. If not connected, agent calls `open_provider_settings` to open browser +//! 3. User completes OAuth flow in browser +//! 4. Agent calls `check_provider_connection` again to verify +//! +//! ## Deployment Flow +//! +//! 1. Agent calls `list_deployment_configs` to see available deployment configs +//! 2. Agent calls `trigger_deployment` with project_id and config_id +//! 3. Agent calls `get_deployment_status` with task_id to monitor progress +//! 4. Agent calls `list_deployments` to see deployment history and public URLs +//! 5. Agent calls `get_service_logs` to view container logs for debugging +//! +//! **SECURITY NOTE:** The agent NEVER handles actual credentials (OAuth tokens, +//! API keys). It only checks connection STATUS. All credential handling happens +//! securely in the browser through the platform's OAuth flow. + +mod analyze_codebase; +mod analyze_project; +mod check_provider_connection; +mod create_deployment_config; +mod current_context; +mod deploy_service; +mod get_deployment_status; +mod get_service_logs; +mod list_deployment_capabilities; +mod list_deployment_configs; +mod list_deployments; +mod list_organizations; +mod list_projects; +mod open_provider_settings; +mod provision_registry; +mod select_project; +mod trigger_deployment; + +pub use analyze_codebase::AnalyzeCodebaseTool; +pub use analyze_project::AnalyzeProjectTool; +pub use check_provider_connection::CheckProviderConnectionTool; +pub use create_deployment_config::CreateDeploymentConfigTool; +pub use current_context::CurrentContextTool; +pub use deploy_service::DeployServiceTool; +pub use get_deployment_status::GetDeploymentStatusTool; +pub use get_service_logs::GetServiceLogsTool; +pub use list_deployment_capabilities::ListDeploymentCapabilitiesTool; +pub use list_deployment_configs::ListDeploymentConfigsTool; +pub use list_deployments::ListDeploymentsTool; +pub use list_organizations::ListOrganizationsTool; +pub use list_projects::ListProjectsTool; +pub use open_provider_settings::OpenProviderSettingsTool; +pub use provision_registry::ProvisionRegistryTool; +pub use select_project::SelectProjectTool; +pub use trigger_deployment::TriggerDeploymentTool; diff --git a/src/agent/tools/platform/open_provider_settings.rs b/src/agent/tools/platform/open_provider_settings.rs new file mode 100644 index 00000000..f3e3d940 --- /dev/null +++ b/src/agent/tools/platform/open_provider_settings.rs @@ -0,0 +1,159 @@ +//! Open provider settings tool for the agent +//! +//! Opens the cloud providers settings page in the user's browser. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; + +/// Arguments for the open provider settings tool +#[derive(Debug, Deserialize)] +pub struct OpenProviderSettingsArgs { + /// The project ID to open settings for + pub project_id: String, +} + +/// Error type for open provider settings operations +#[derive(Debug, thiserror::Error)] +#[error("Open provider settings error: {0}")] +pub struct OpenProviderSettingsError(String); + +/// Tool to open the cloud providers settings page in the browser +/// +/// This tool opens the Syncable platform's cloud providers settings page +/// where users can connect their GCP, AWS, Azure, or Hetzner accounts. +/// +/// SECURITY NOTE: The actual credential connection happens entirely in the +/// browser through the platform's secure OAuth flow. The CLI agent NEVER +/// handles or sees the actual credentials. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct OpenProviderSettingsTool; + +impl OpenProviderSettingsTool { + /// Create a new OpenProviderSettingsTool + pub fn new() -> Self { + Self + } +} + +impl Tool for OpenProviderSettingsTool { + const NAME: &'static str = "open_provider_settings"; + + type Error = OpenProviderSettingsError; + type Args = OpenProviderSettingsArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Open the cloud providers settings page in the user's browser. + +This opens the Syncable platform's settings page where users can connect their +cloud provider accounts (GCP, AWS, Azure, Hetzner). + +**Important:** +- The actual credential connection happens in the browser, NOT through the CLI +- After calling this tool, ask the user to confirm when they've completed the setup +- Use check_provider_connection to verify the connection was successful + +**Workflow:** +1. Call open_provider_settings with the project_id +2. Ask user: "Please connect your [provider] account in the browser. Let me know when done." +3. Call check_provider_connection to verify the connection + +**Prerequisites:** +- User must be authenticated via `sync-ctl auth login` +- User must have a valid project_id (from select_project or list_projects)"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "The UUID of the project to configure cloud providers for" + } + }, + "required": ["project_id"] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate input + if args.project_id.trim().is_empty() { + return Ok(format_error_for_llm( + "open_provider_settings", + ErrorCategory::ValidationFailed, + "project_id cannot be empty", + Some(vec![ + "Use list_projects to find valid project IDs", + "Use select_project to set the current project context", + ]), + )); + } + + // Build the settings URL + let url = format!( + "https://syncable.dev/projects/{}/settings?tab=cloud-providers", + args.project_id + ); + + // Open the URL in the default browser + match open::that(&url) { + Ok(()) => { + let result = json!({ + "success": true, + "message": "Opened cloud providers settings in your browser", + "url": url, + "next_steps": [ + "Connect your cloud provider account in the browser", + "Once done, tell me which provider you connected", + "I'll verify the connection with check_provider_connection" + ] + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| OpenProviderSettingsError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_error_for_llm( + "open_provider_settings", + ErrorCategory::ExternalCommandFailed, + &format!("Failed to open browser: {}", e), + Some(vec![ + &format!("You can manually open: {}", url), + "Check if a default browser is configured", + ]), + )), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(OpenProviderSettingsTool::NAME, "open_provider_settings"); + } + + #[test] + fn test_tool_creation() { + let tool = OpenProviderSettingsTool::new(); + assert!(format!("{:?}", tool).contains("OpenProviderSettingsTool")); + } + + #[test] + fn test_settings_url_format() { + let project_id = "proj-12345-uuid"; + let expected_url = format!( + "https://syncable.dev/projects/{}/settings?tab=cloud-providers", + project_id + ); + assert!(expected_url.contains(project_id)); + assert!(expected_url.contains("cloud-providers")); + } +} diff --git a/src/agent/tools/platform/provision_registry.rs b/src/agent/tools/platform/provision_registry.rs new file mode 100644 index 00000000..65c0dccf --- /dev/null +++ b/src/agent/tools/platform/provision_registry.rs @@ -0,0 +1,418 @@ +//! Provision registry tool for the agent +//! +//! Allows the agent to provision a new container registry for storing images. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::time::Duration; +use tokio::time::sleep; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::types::{CreateRegistryRequest, RegistryTaskState}; +use crate::platform::api::{PlatformApiClient, PlatformApiError}; + +/// Maximum time to wait for registry provisioning (5 minutes) +const PROVISIONING_TIMEOUT_SECS: u64 = 300; +/// Polling interval between status checks +const POLL_INTERVAL_SECS: u64 = 3; + +/// Arguments for the provision registry tool +#[derive(Debug, Deserialize)] +pub struct ProvisionRegistryArgs { + /// The project UUID + pub project_id: String, + /// Cluster ID to associate registry with + pub cluster_id: String, + /// Cluster name for display + pub cluster_name: String, + /// Cloud provider: "gcp" or "hetzner" + pub provider: String, + /// Region for the registry + pub region: String, + /// Name for the registry (auto-generated if not provided) + pub registry_name: Option, + /// GCP project ID (required for GCP provider) + pub gcp_project_id: Option, +} + +/// Error type for provision registry operations +#[derive(Debug, thiserror::Error)] +#[error("Provision registry error: {0}")] +pub struct ProvisionRegistryError(String); + +/// Tool to provision a new container registry +/// +/// Creates a container registry for storing Docker images used in deployments. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ProvisionRegistryTool; + +impl ProvisionRegistryTool { + /// Create a new ProvisionRegistryTool + pub fn new() -> Self { + Self + } +} + +impl Tool for ProvisionRegistryTool { + const NAME: &'static str = "provision_registry"; + + type Error = ProvisionRegistryError; + type Args = ProvisionRegistryArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Provision a new container registry for storing Docker images. + +A container registry is required for deployments. This tool starts provisioning +and polls until completion (may take 1-3 minutes). + +**Parameters:** +- project_id: The project UUID +- cluster_id: Cluster ID to associate the registry with +- cluster_name: Cluster name for display purposes +- provider: "gcp" or "hetzner" +- region: Region for the registry (e.g., "us-central1", "nbg1") +- registry_name: Name for the registry (optional - defaults to "main") +- gcp_project_id: Required for GCP provider + +**Prerequisites:** +- User must be authenticated +- Provider must be connected +- Cluster must exist (use list_deployment_capabilities to find clusters) + +**Async Behavior:** +- Provisioning takes 1-3 minutes +- This tool polls until complete or failed +- Returns registry details on success + +**Returns:** +- registry_id: The created registry ID +- registry_name, region, provider +- registry_url: URL for pushing images +- status: "completed" or error details"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "The UUID of the project" + }, + "cluster_id": { + "type": "string", + "description": "Cluster ID to associate registry with" + }, + "cluster_name": { + "type": "string", + "description": "Cluster name for display" + }, + "provider": { + "type": "string", + "enum": ["gcp", "hetzner"], + "description": "Cloud provider" + }, + "region": { + "type": "string", + "description": "Region for the registry" + }, + "registry_name": { + "type": "string", + "description": "Name for the registry (defaults to 'main')" + }, + "gcp_project_id": { + "type": "string", + "description": "GCP project ID (required for GCP)" + } + }, + "required": ["project_id", "cluster_id", "cluster_name", "provider", "region"] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate required fields + if args.project_id.trim().is_empty() { + return Ok(format_error_for_llm( + "provision_registry", + ErrorCategory::ValidationFailed, + "project_id cannot be empty", + Some(vec!["Use list_projects to find valid project IDs"]), + )); + } + + if args.cluster_id.trim().is_empty() { + return Ok(format_error_for_llm( + "provision_registry", + ErrorCategory::ValidationFailed, + "cluster_id cannot be empty", + Some(vec!["Use list_deployment_capabilities to find available clusters"]), + )); + } + + // Validate provider + let valid_providers = ["gcp", "hetzner"]; + if !valid_providers.contains(&args.provider.as_str()) { + return Ok(format_error_for_llm( + "provision_registry", + ErrorCategory::ValidationFailed, + &format!( + "Invalid provider '{}'. Must be 'gcp' or 'hetzner'", + args.provider + ), + Some(vec![ + "Use list_deployment_capabilities to see connected providers", + ]), + )); + } + + // GCP requires gcp_project_id + if args.provider == "gcp" && args.gcp_project_id.is_none() { + return Ok(format_error_for_llm( + "provision_registry", + ErrorCategory::ValidationFailed, + "gcp_project_id is required for GCP provider", + Some(vec![ + "The GCP project ID can be found in the GCP Console", + "This is different from the Syncable project_id", + ]), + )); + } + + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("provision_registry", e)); + } + }; + + // Generate registry name if not provided + let registry_name = args + .registry_name + .as_deref() + .map(sanitize_registry_name) + .unwrap_or_else(|| "main".to_string()); + + // Build the request + let request = CreateRegistryRequest { + project_id: args.project_id.clone(), + cluster_id: args.cluster_id.clone(), + cluster_name: args.cluster_name.clone(), + registry_name: registry_name.clone(), + cloud_provider: args.provider.clone(), + region: args.region.clone(), + gcp_project_id: args.gcp_project_id.clone(), + }; + + // Start provisioning + let response = match client.create_registry(&args.project_id, &request).await { + Ok(r) => r, + Err(e) => { + return Ok(format_api_error("provision_registry", e)); + } + }; + + let task_id = response.task_id; + + // Poll for completion with timeout + let start = std::time::Instant::now(); + loop { + if start.elapsed().as_secs() > PROVISIONING_TIMEOUT_SECS { + return Ok(format_error_for_llm( + "provision_registry", + ErrorCategory::Timeout, + &format!( + "Registry provisioning timed out after {} seconds. Task ID: {}", + PROVISIONING_TIMEOUT_SECS, task_id + ), + Some(vec![ + "The provisioning may still complete in the background", + "Use the platform UI to check the registry status", + &format!("Task ID for reference: {}", task_id), + ]), + )); + } + + sleep(Duration::from_secs(POLL_INTERVAL_SECS)).await; + + let status = match client.get_registry_task_status(&task_id).await { + Ok(s) => s, + Err(e) => { + return Ok(format_error_for_llm( + "provision_registry", + ErrorCategory::NetworkError, + &format!("Failed to get task status: {}", e), + Some(vec![ + "The provisioning may still be running", + &format!("Task ID: {}", task_id), + ]), + )); + } + }; + + match status.status { + RegistryTaskState::Completed => { + let registry_url = status.output.registry_url.clone(); + let final_registry_name = status + .output + .registry_name + .clone() + .unwrap_or_else(|| registry_name.clone()); + + // The task_id serves as the registry identifier for now + // The actual registry ID may be returned in the output after provisioning completes + let result = json!({ + "success": true, + "task_id": task_id, + "registry_name": final_registry_name, + "region": args.region, + "provider": args.provider, + "registry_url": registry_url, + "status": "completed", + "message": format!( + "Registry '{}' provisioned successfully", + final_registry_name + ), + "next_steps": [ + "The registry is now ready for use", + "Use list_deployment_capabilities to get the full registry details", + "Docker images will be pushed to this registry during deployments" + ] + }); + + return serde_json::to_string_pretty(&result) + .map_err(|e| ProvisionRegistryError(format!("Failed to serialize: {}", e))); + } + RegistryTaskState::Failed => { + let error_msg = status + .error + .map(|e| e.message) + .unwrap_or_else(|| "Unknown error".to_string()); + + return Ok(format_error_for_llm( + "provision_registry", + ErrorCategory::ExternalCommandFailed, + &format!("Registry provisioning failed: {}", error_msg), + Some(vec![ + "Check provider connectivity", + "Verify cluster and region are valid", + "The provider may have resource limits", + ]), + )); + } + RegistryTaskState::Cancelled => { + return Ok(format_error_for_llm( + "provision_registry", + ErrorCategory::UserCancelled, + "Registry provisioning was cancelled", + Some(vec!["The task was cancelled externally"]), + )); + } + RegistryTaskState::Processing | RegistryTaskState::Unknown => { + // Continue polling + } + } + } + } +} + +/// Sanitize registry name (lowercase, alphanumeric, hyphens) +fn sanitize_registry_name(name: &str) -> String { + name.to_lowercase() + .chars() + .map(|c| if c.is_alphanumeric() || c == '-' { c } else { '-' }) + .collect::() + .trim_matches('-') + .to_string() +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec!["Run: sync-ctl auth login"]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Resource not found: {}", msg), + Some(vec![ + "The project or cluster ID may be incorrect", + "Use list_deployment_capabilities to find valid IDs", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec!["Contact the project admin for access"]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec!["Check network connectivity"]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + None, + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec!["Try again later"]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec!["Check your internet connection"]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(ProvisionRegistryTool::NAME, "provision_registry"); + } + + #[test] + fn test_tool_creation() { + let tool = ProvisionRegistryTool::new(); + assert!(format!("{:?}", tool).contains("ProvisionRegistryTool")); + } + + #[test] + fn test_sanitize_registry_name() { + assert_eq!(sanitize_registry_name("My Registry"), "my-registry"); + assert_eq!(sanitize_registry_name("test_name"), "test-name"); + assert_eq!(sanitize_registry_name("--test--"), "test"); + assert_eq!(sanitize_registry_name("MAIN"), "main"); + } +} diff --git a/src/agent/tools/platform/select_project.rs b/src/agent/tools/platform/select_project.rs new file mode 100644 index 00000000..ccd8374c --- /dev/null +++ b/src/agent/tools/platform/select_project.rs @@ -0,0 +1,284 @@ +//! Select project tool for the agent +//! +//! Allows the agent to select a project as the current context for platform operations. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::{PlatformApiClient, PlatformApiError}; +use crate::platform::PlatformSession; + +/// Arguments for the select project tool +#[derive(Debug, Deserialize)] +pub struct SelectProjectArgs { + /// The project ID to select + pub project_id: String, + /// The organization ID the project belongs to + pub organization_id: String, +} + +/// Error type for select project operations +#[derive(Debug, thiserror::Error)] +#[error("Select project error: {0}")] +pub struct SelectProjectError(String); + +/// Tool to select a project as the current context +/// +/// This tool sets the current project context for platform operations. +/// The selection is persisted to `~/.syncable/platform-session.json`. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct SelectProjectTool; + +impl SelectProjectTool { + /// Create a new SelectProjectTool + pub fn new() -> Self { + Self + } +} + +impl Tool for SelectProjectTool { + const NAME: &'static str = "select_project"; + + type Error = SelectProjectError; + type Args = SelectProjectArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Select a project as the current context for platform operations. + +This persists the selection so future operations will use this project context. +The selection is stored in ~/.syncable/platform-session.json. + +**Prerequisites:** +- User must be authenticated via `sync-ctl auth login` +- The project_id and organization_id must be valid + +**Use Cases:** +- Setting up context before creating tasks or deployments +- Switching between projects +- Establishing project context for platform-aware operations + +**Workflow:** +1. Use list_organizations to find the organization +2. Use list_projects to find the project within the organization +3. Call select_project with both IDs"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "The UUID of the project to select" + }, + "organization_id": { + "type": "string", + "description": "The UUID of the organization the project belongs to" + } + }, + "required": ["project_id", "organization_id"] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate inputs + if args.project_id.trim().is_empty() { + return Ok(format_error_for_llm( + "select_project", + ErrorCategory::ValidationFailed, + "project_id cannot be empty", + Some(vec![ + "Use list_projects to find valid project IDs", + "Pass the project ID as a UUID string", + ]), + )); + } + + if args.organization_id.trim().is_empty() { + return Ok(format_error_for_llm( + "select_project", + ErrorCategory::ValidationFailed, + "organization_id cannot be empty", + Some(vec![ + "Use list_organizations to find valid organization IDs", + "Pass the organization ID as a UUID string", + ]), + )); + } + + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("select_project", e)); + } + }; + + // Verify project exists and user has access + let project = match client.get_project(&args.project_id).await { + Ok(p) => p, + Err(e) => { + return Ok(format_api_error("select_project", e)); + } + }; + + // Verify organization exists and user has access + let organization = match client.get_organization(&args.organization_id).await { + Ok(o) => o, + Err(e) => { + return Ok(format_api_error("select_project", e)); + } + }; + + // Verify the project belongs to the specified organization + if project.organization_id != args.organization_id { + return Ok(format_error_for_llm( + "select_project", + ErrorCategory::ValidationFailed, + "Project does not belong to the specified organization", + Some(vec![ + &format!( + "Project '{}' belongs to organization '{}'", + project.name, project.organization_id + ), + "Use the correct organization_id for this project", + ]), + )); + } + + // Create and save the session + let session = PlatformSession::with_project( + project.id.clone(), + project.name.clone(), + organization.id.clone(), + organization.name.clone(), + ); + + if let Err(e) = session.save() { + return Ok(format_error_for_llm( + "select_project", + ErrorCategory::InternalError, + &format!("Failed to save session: {}", e), + Some(vec![ + "The session could not be persisted to disk", + "Check permissions on ~/.syncable/ directory", + ]), + )); + } + + // Return success response + let result = json!({ + "success": true, + "message": format!("Selected project '{}' in organization '{}'", project.name, organization.name), + "context": { + "project_id": project.id, + "project_name": project.name, + "organization_id": organization.id, + "organization_name": organization.name + }, + "session_path": PlatformSession::session_path().display().to_string() + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| SelectProjectError(format!("Failed to serialize: {}", e))) + } +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Resource not found: {}", msg), + Some(vec![ + "The project or organization ID may be incorrect", + "Use list_organizations and list_projects to find valid IDs", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec![ + "The user does not have access to this resource", + "Contact the organization or project admin for access", + ]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec![ + "Check network connectivity", + "The Syncable API may be temporarily unavailable", + ]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + Some(vec!["This may be a temporary API issue"]), + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec![ + "The Syncable API is experiencing issues", + "Try again later", + ]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec![ + "Check your internet connection", + "The Syncable API may be temporarily unavailable", + ]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(SelectProjectTool::NAME, "select_project"); + } + + #[test] + fn test_tool_creation() { + let tool = SelectProjectTool::new(); + assert!(format!("{:?}", tool).contains("SelectProjectTool")); + } +} diff --git a/src/agent/tools/platform/trigger_deployment.rs b/src/agent/tools/platform/trigger_deployment.rs new file mode 100644 index 00000000..0bc138d7 --- /dev/null +++ b/src/agent/tools/platform/trigger_deployment.rs @@ -0,0 +1,254 @@ +//! Trigger deployment tool for the agent +//! +//! Allows the agent to trigger a deployment using a deployment config. + +use rig::completion::ToolDefinition; +use rig::tool::Tool; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::agent::tools::error::{ErrorCategory, format_error_for_llm}; +use crate::platform::api::{PlatformApiClient, PlatformApiError, TriggerDeploymentRequest}; + +/// Arguments for the trigger deployment tool +#[derive(Debug, Deserialize)] +pub struct TriggerDeploymentArgs { + /// The project ID for the deployment + pub project_id: String, + /// The deployment config ID to use + pub config_id: String, + /// Optional specific commit SHA to deploy + pub commit_sha: Option, +} + +/// Error type for trigger deployment operations +#[derive(Debug, thiserror::Error)] +#[error("Trigger deployment error: {0}")] +pub struct TriggerDeploymentError(String); + +/// Tool to trigger a deployment using a deployment config +/// +/// Starts a new deployment for the specified configuration. Returns a task ID +/// that can be used to monitor deployment progress. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct TriggerDeploymentTool; + +impl TriggerDeploymentTool { + /// Create a new TriggerDeploymentTool + pub fn new() -> Self { + Self + } +} + +impl Tool for TriggerDeploymentTool { + const NAME: &'static str = "trigger_deployment"; + + type Error = TriggerDeploymentError; + type Args = TriggerDeploymentArgs; + type Output = String; + + async fn definition(&self, _prompt: String) -> ToolDefinition { + ToolDefinition { + name: Self::NAME.to_string(), + description: r#"Trigger a deployment using a deployment configuration. + +Starts a new deployment for the specified config. Returns a task ID that can be +used to monitor deployment progress with `get_deployment_status`. + +**Parameters:** +- project_id: The project UUID +- config_id: The deployment config ID (get from list_deployment_configs) +- commit_sha: Optional specific commit to deploy (defaults to latest on branch) + +**Prerequisites:** +- User must be authenticated via `sync-ctl auth login` +- A deployment config must exist for the project + +**Use Cases:** +- Deploy the latest code from a branch +- Deploy a specific commit version +- Trigger a manual deployment for a service + +**Returns:** +- task_id: Use this to check deployment progress with get_deployment_status +- status: Initial deployment status +- message: Human-readable status message"# + .to_string(), + parameters: json!({ + "type": "object", + "properties": { + "project_id": { + "type": "string", + "description": "The UUID of the project" + }, + "config_id": { + "type": "string", + "description": "The deployment config ID (from list_deployment_configs)" + }, + "commit_sha": { + "type": "string", + "description": "Optional: specific commit SHA to deploy (defaults to latest)" + } + }, + "required": ["project_id", "config_id"] + }), + } + } + + async fn call(&self, args: Self::Args) -> Result { + // Validate project_id + if args.project_id.trim().is_empty() { + return Ok(format_error_for_llm( + "trigger_deployment", + ErrorCategory::ValidationFailed, + "project_id cannot be empty", + Some(vec![ + "Use list_projects to find valid project IDs", + "Use select_project to set the current project context", + ]), + )); + } + + // Validate config_id + if args.config_id.trim().is_empty() { + return Ok(format_error_for_llm( + "trigger_deployment", + ErrorCategory::ValidationFailed, + "config_id cannot be empty", + Some(vec![ + "Use list_deployment_configs to find available deployment configs", + ]), + )); + } + + // Create the API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + return Ok(format_api_error("trigger_deployment", e)); + } + }; + + // Build the request + let request = TriggerDeploymentRequest { + project_id: args.project_id.clone(), + config_id: args.config_id.clone(), + commit_sha: args.commit_sha.clone(), + }; + + // Trigger the deployment + match client.trigger_deployment(&request).await { + Ok(response) => { + let result = json!({ + "success": true, + "task_id": response.backstage_task_id, + "config_id": response.config_id, + "status": response.status, + "message": response.message, + "next_steps": [ + format!("Use get_deployment_status with task_id '{}' to monitor progress", response.backstage_task_id), + "Deployment typically takes 2-5 minutes to complete" + ] + }); + + serde_json::to_string_pretty(&result) + .map_err(|e| TriggerDeploymentError(format!("Failed to serialize: {}", e))) + } + Err(e) => Ok(format_api_error("trigger_deployment", e)), + } + } +} + +/// Format a PlatformApiError for LLM consumption +fn format_api_error(tool_name: &str, error: PlatformApiError) -> String { + match error { + PlatformApiError::Unauthorized => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + "Not authenticated - please run `sync-ctl auth login` first", + Some(vec![ + "The user needs to authenticate with the Syncable platform", + "Run: sync-ctl auth login", + ]), + ), + PlatformApiError::NotFound(msg) => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + &format!("Resource not found: {}", msg), + Some(vec![ + "The project ID or config ID may be incorrect", + "Use list_deployment_configs to find valid config IDs", + ]), + ), + PlatformApiError::PermissionDenied(msg) => format_error_for_llm( + tool_name, + ErrorCategory::PermissionDenied, + &format!("Permission denied: {}", msg), + Some(vec![ + "The user does not have permission to trigger deployments", + "Contact the project admin for access", + ]), + ), + PlatformApiError::RateLimited => format_error_for_llm( + tool_name, + ErrorCategory::ResourceUnavailable, + "Rate limit exceeded - please try again later", + Some(vec!["Wait a moment before retrying"]), + ), + PlatformApiError::HttpError(e) => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + &format!("Network error: {}", e), + Some(vec![ + "Check network connectivity", + "The Syncable API may be temporarily unavailable", + ]), + ), + PlatformApiError::ParseError(msg) => format_error_for_llm( + tool_name, + ErrorCategory::InternalError, + &format!("Failed to parse API response: {}", msg), + Some(vec!["This may be a temporary API issue"]), + ), + PlatformApiError::ApiError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("API error ({}): {}", status, message), + Some(vec!["Check the error message for details"]), + ), + PlatformApiError::ServerError { status, message } => format_error_for_llm( + tool_name, + ErrorCategory::ExternalCommandFailed, + &format!("Server error ({}): {}", status, message), + Some(vec![ + "The Syncable API is experiencing issues", + "Try again later", + ]), + ), + PlatformApiError::ConnectionFailed => format_error_for_llm( + tool_name, + ErrorCategory::NetworkError, + "Could not connect to Syncable API", + Some(vec![ + "Check your internet connection", + "The Syncable API may be temporarily unavailable", + ]), + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tool_name() { + assert_eq!(TriggerDeploymentTool::NAME, "trigger_deployment"); + } + + #[test] + fn test_tool_creation() { + let tool = TriggerDeploymentTool::new(); + assert!(format!("{:?}", tool).contains("TriggerDeploymentTool")); + } +} diff --git a/src/analyzer/context/file_analyzers/docker.rs b/src/analyzer/context/file_analyzers/docker.rs index f43a633e..f6bda3ad 100644 --- a/src/analyzer/context/file_analyzers/docker.rs +++ b/src/analyzer/context/file_analyzers/docker.rs @@ -1,4 +1,4 @@ -use crate::analyzer::{Port, Protocol, context::helpers::create_regex}; +use crate::analyzer::{Port, PortSource, Protocol, context::helpers::create_regex}; use crate::common::file_utils::is_readable_file; use crate::error::{AnalysisError, Result}; use std::collections::{HashMap, HashSet}; @@ -101,6 +101,7 @@ fn analyze_docker_files_at( number: port, protocol, description: Some(format!("Exposed in Dockerfile ({})", root.display())), + source: Some(PortSource::Dockerfile), }); } } @@ -189,6 +190,7 @@ fn analyze_docker_compose( number: port, protocol, description: Some(description), + source: Some(PortSource::DockerCompose), }); } } diff --git a/src/analyzer/context/health_detector.rs b/src/analyzer/context/health_detector.rs new file mode 100644 index 00000000..722c985f --- /dev/null +++ b/src/analyzer/context/health_detector.rs @@ -0,0 +1,386 @@ +//! Health endpoint detection for deployment recommendations. +//! +//! Detects health check endpoints by analyzing: +//! - Source code patterns (route definitions) +//! - Framework conventions (Spring Actuator, etc.) +//! - Configuration files (K8s manifests) + +use crate::analyzer::{DetectedTechnology, HealthEndpoint, HealthEndpointSource, TechnologyCategory}; +use crate::common::file_utils::{is_readable_file, read_file_safe}; +use crate::error::Result; +use regex::Regex; +use std::path::Path; + +/// Common health check paths to scan for +const COMMON_HEALTH_PATHS: &[&str] = &[ + "/health", + "/healthz", + "/ready", + "/readyz", + "/livez", + "/live", + "/api/health", + "/api/v1/health", + "/__health", + "/ping", + "/status", +]; + +/// Detects health endpoints from project analysis +pub fn detect_health_endpoints( + project_root: &Path, + technologies: &[DetectedTechnology], + max_file_size: usize, +) -> Vec { + let mut endpoints = Vec::new(); + + // Check framework-specific defaults first + for tech in technologies { + if let Some(endpoint) = get_framework_health_endpoint(tech) { + endpoints.push(endpoint); + } + } + + // Scan source files for health route definitions + let detected_from_code = scan_for_health_routes(project_root, technologies, max_file_size); + for endpoint in detected_from_code { + // Avoid duplicates - prefer code-detected over framework defaults + if !endpoints.iter().any(|e| e.path == endpoint.path) { + endpoints.push(endpoint); + } else { + // Upgrade existing endpoint if code detection has higher confidence + if let Some(existing) = endpoints.iter_mut().find(|e| e.path == endpoint.path) { + if endpoint.confidence > existing.confidence { + *existing = endpoint; + } + } + } + } + + // Sort by confidence (highest first) + endpoints.sort_by(|a, b| b.confidence.partial_cmp(&a.confidence).unwrap_or(std::cmp::Ordering::Equal)); + + endpoints +} + +/// Get framework-specific health endpoint defaults +fn get_framework_health_endpoint(tech: &DetectedTechnology) -> Option { + match tech.name.as_str() { + // Java frameworks + "Spring Boot" => Some(HealthEndpoint::from_framework("/actuator/health", "Spring Boot Actuator")), + "Quarkus" => Some(HealthEndpoint::from_framework("/q/health", "Quarkus SmallRye Health")), + "Micronaut" => Some(HealthEndpoint::from_framework("/health", "Micronaut")), + + // Node.js frameworks - no standard, but common patterns + "Express" | "Fastify" | "Koa" | "Hono" | "Elysia" | "NestJS" => { + // Return a lower confidence endpoint since these don't have a standard + Some(HealthEndpoint { + path: "/health".to_string(), + confidence: 0.5, + source: HealthEndpointSource::FrameworkDefault, + description: Some(format!("{} common health pattern", tech.name)), + }) + } + + // Python frameworks + "FastAPI" => Some(HealthEndpoint::from_framework("/health", "FastAPI")), + "Django" => Some(HealthEndpoint { + path: "/health/".to_string(), // Django uses trailing slashes + confidence: 0.5, + source: HealthEndpointSource::FrameworkDefault, + description: Some("Django common health pattern".to_string()), + }), + "Flask" => Some(HealthEndpoint { + path: "/health".to_string(), + confidence: 0.5, + source: HealthEndpointSource::FrameworkDefault, + description: Some("Flask common health pattern".to_string()), + }), + + // Go frameworks + "Gin" | "Echo" | "Fiber" | "Chi" => Some(HealthEndpoint { + path: "/health".to_string(), + confidence: 0.5, + source: HealthEndpointSource::FrameworkDefault, + description: Some(format!("{} common health pattern", tech.name)), + }), + + // Rust frameworks + "Actix Web" | "Axum" | "Rocket" => Some(HealthEndpoint { + path: "/health".to_string(), + confidence: 0.5, + source: HealthEndpointSource::FrameworkDefault, + description: Some(format!("{} common health pattern", tech.name)), + }), + + _ => None, + } +} + +/// Scan source files for health route definitions +fn scan_for_health_routes( + project_root: &Path, + technologies: &[DetectedTechnology], + max_file_size: usize, +) -> Vec { + let mut endpoints = Vec::new(); + + // Determine which file types to scan based on detected technologies + let has_js = technologies.iter().any(|t| { + matches!(t.category, TechnologyCategory::BackendFramework | TechnologyCategory::MetaFramework) + && (t.name.contains("Express") || t.name.contains("Fastify") || t.name.contains("Koa") + || t.name.contains("Hono") || t.name.contains("Elysia") || t.name.contains("NestJS") + || t.name.contains("Next") || t.name.contains("Nuxt")) + }); + + let has_python = technologies.iter().any(|t| { + matches!(t.category, TechnologyCategory::BackendFramework) + && (t.name.contains("FastAPI") || t.name.contains("Flask") || t.name.contains("Django")) + }); + + let has_go = technologies.iter().any(|t| { + matches!(t.category, TechnologyCategory::BackendFramework) + && (t.name.contains("Gin") || t.name.contains("Echo") || t.name.contains("Fiber") || t.name.contains("Chi")) + }); + + let has_rust = technologies.iter().any(|t| { + matches!(t.category, TechnologyCategory::BackendFramework) + && (t.name.contains("Actix") || t.name.contains("Axum") || t.name.contains("Rocket")) + }); + + let has_java = technologies.iter().any(|t| { + matches!(t.category, TechnologyCategory::BackendFramework) + && (t.name.contains("Spring") || t.name.contains("Quarkus") || t.name.contains("Micronaut")) + }); + + // Common locations to check + let locations = [ + "src/", + "app/", + "routes/", + "api/", + "server/", + "lib/", + "handlers/", + "controllers/", + ]; + + for location in &locations { + let dir = project_root.join(location); + if dir.is_dir() { + if has_js { + scan_directory_for_patterns(&dir, &["js", "ts", "mjs"], &js_health_patterns(), max_file_size, &mut endpoints); + } + if has_python { + scan_directory_for_patterns(&dir, &["py"], &python_health_patterns(), max_file_size, &mut endpoints); + } + if has_go { + scan_directory_for_patterns(&dir, &["go"], &go_health_patterns(), max_file_size, &mut endpoints); + } + if has_rust { + scan_directory_for_patterns(&dir, &["rs"], &rust_health_patterns(), max_file_size, &mut endpoints); + } + if has_java { + scan_directory_for_patterns(&dir, &["java", "kt"], &java_health_patterns(), max_file_size, &mut endpoints); + } + } + } + + // Also check root-level files + if has_js { + for entry in ["index.js", "index.ts", "app.js", "app.ts", "server.js", "server.ts", "main.js", "main.ts"] { + let path = project_root.join(entry); + if is_readable_file(&path) { + scan_file_for_patterns(&path, &js_health_patterns(), max_file_size, &mut endpoints); + } + } + } + if has_python { + for entry in ["main.py", "app.py", "wsgi.py", "asgi.py"] { + let path = project_root.join(entry); + if is_readable_file(&path) { + scan_file_for_patterns(&path, &python_health_patterns(), max_file_size, &mut endpoints); + } + } + } + if has_go { + let main_go = project_root.join("main.go"); + if is_readable_file(&main_go) { + scan_file_for_patterns(&main_go, &go_health_patterns(), max_file_size, &mut endpoints); + } + } + if has_rust { + let main_rs = project_root.join("src/main.rs"); + if is_readable_file(&main_rs) { + scan_file_for_patterns(&main_rs, &rust_health_patterns(), max_file_size, &mut endpoints); + } + } + + endpoints +} + +/// Scan a directory for health route patterns +fn scan_directory_for_patterns( + dir: &Path, + extensions: &[&str], + patterns: &[(&str, f32)], + max_file_size: usize, + endpoints: &mut Vec, +) { + if let Ok(entries) = std::fs::read_dir(dir) { + for entry in entries.flatten() { + let path = entry.path(); + if path.is_file() { + if let Some(ext) = path.extension() { + if extensions.iter().any(|e| ext == *e) { + scan_file_for_patterns(&path, patterns, max_file_size, endpoints); + } + } + } else if path.is_dir() { + // Skip common non-source directories + let dir_name = path.file_name().map(|n| n.to_string_lossy().to_string()).unwrap_or_default(); + if !["node_modules", ".git", "target", "build", "dist", "__pycache__", ".next", "vendor"].contains(&dir_name.as_str()) { + scan_directory_for_patterns(&path, extensions, patterns, max_file_size, endpoints); + } + } + } + } +} + +/// Scan a single file for health route patterns +fn scan_file_for_patterns( + path: &Path, + patterns: &[(&str, f32)], + max_file_size: usize, + endpoints: &mut Vec, +) { + if let Ok(content) = read_file_safe(path, max_file_size) { + for (pattern, confidence) in patterns { + if let Ok(regex) = Regex::new(pattern) { + for cap in regex.captures_iter(&content) { + if let Some(path_match) = cap.get(1) { + let health_path = path_match.as_str().to_string(); + // Only add if it looks like a health endpoint + if COMMON_HEALTH_PATHS.iter().any(|p| health_path.contains(p) || p.contains(&health_path)) { + if !endpoints.iter().any(|e| e.path == health_path) { + endpoints.push(HealthEndpoint { + path: health_path, + confidence: *confidence, + source: HealthEndpointSource::CodePattern, + description: Some(format!("Found in {}", path.display())), + }); + } + } + } + } + } + } + } +} + +/// JavaScript/TypeScript health route patterns +fn js_health_patterns() -> Vec<(&'static str, f32)> { + vec![ + // Express/Fastify/Koa style: app.get('/health', ...) + (r#"\.(?:get|route)\s*\(\s*['"]([^'"]*(?:health|ready|live|status|ping)[^'"]*)['"]"#, 0.9), + // NestJS style: @Get('health') + (r#"@Get\s*\(\s*['"]([^'"]*(?:health|ready|live|status|ping)[^'"]*)['"]"#, 0.9), + // Hono/Elysia style: .get('/health', ...) + (r#"\.get\s*\(\s*['"]([^'"]*(?:health|ready|live|status|ping)[^'"]*)['"]"#, 0.9), + ] +} + +/// Python health route patterns +fn python_health_patterns() -> Vec<(&'static str, f32)> { + vec![ + // FastAPI/Flask style: @app.get("/health") + (r#"@\w+\.(?:get|route)\s*\(\s*['"]([^'"]*(?:health|ready|live|status|ping)[^'"]*)['"]"#, 0.9), + // Django URL patterns: path('health/', ...) + (r#"path\s*\(\s*['"]([^'"]*(?:health|ready|live|status|ping)[^'"]*)['"]"#, 0.85), + ] +} + +/// Go health route patterns +fn go_health_patterns() -> Vec<(&'static str, f32)> { + vec![ + // http.HandleFunc("/health", ...) + (r#"HandleFunc\s*\(\s*"([^"]*(?:health|ready|live|status|ping)[^"]*)"#, 0.9), + // Gin/Echo: r.GET("/health", ...) + (r#"\.(?:GET|Handle)\s*\(\s*"([^"]*(?:health|ready|live|status|ping)[^"]*)"#, 0.9), + ] +} + +/// Rust health route patterns +fn rust_health_patterns() -> Vec<(&'static str, f32)> { + vec![ + // Actix: .route("/health", ...) + (r#"\.route\s*\(\s*"([^"]*(?:health|ready|live|status|ping)[^"]*)"#, 0.9), + // Axum: .route("/health", get(...)) + (r#"\.route\s*\(\s*"([^"]*(?:health|ready|live|status|ping)[^"]*)"#, 0.9), + ] +} + +/// Java health route patterns +fn java_health_patterns() -> Vec<(&'static str, f32)> { + vec![ + // Spring: @GetMapping("/health") + (r#"@(?:Get|Request)Mapping\s*\(\s*(?:value\s*=\s*)?["']([^"']*(?:health|ready|live|status|ping)[^"']*)["']"#, 0.9), + ] +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_spring_boot_health_endpoint() { + let tech = DetectedTechnology { + name: "Spring Boot".to_string(), + version: None, + category: TechnologyCategory::BackendFramework, + confidence: 0.9, + requires: vec![], + conflicts_with: vec![], + is_primary: true, + file_indicators: vec![], + }; + + let endpoint = get_framework_health_endpoint(&tech).unwrap(); + assert_eq!(endpoint.path, "/actuator/health"); + assert_eq!(endpoint.confidence, 0.7); + } + + #[test] + fn test_express_health_endpoint() { + let tech = DetectedTechnology { + name: "Express".to_string(), + version: None, + category: TechnologyCategory::BackendFramework, + confidence: 0.9, + requires: vec![], + conflicts_with: vec![], + is_primary: true, + file_indicators: vec![], + }; + + let endpoint = get_framework_health_endpoint(&tech).unwrap(); + assert_eq!(endpoint.path, "/health"); + assert_eq!(endpoint.confidence, 0.5); // Lower confidence for non-standard + } + + #[test] + fn test_unknown_framework_no_endpoint() { + let tech = DetectedTechnology { + name: "UnknownFramework".to_string(), + version: None, + category: TechnologyCategory::BackendFramework, + confidence: 0.9, + requires: vec![], + conflicts_with: vec![], + is_primary: true, + file_indicators: vec![], + }; + + assert!(get_framework_health_endpoint(&tech).is_none()); + } +} diff --git a/src/analyzer/context/helpers.rs b/src/analyzer/context/helpers.rs index 1424e7a7..3a76e1db 100644 --- a/src/analyzer/context/helpers.rs +++ b/src/analyzer/context/helpers.rs @@ -1,4 +1,4 @@ -use crate::analyzer::{Port, Protocol}; +use crate::analyzer::{Port, PortSource, Protocol}; use crate::error::{AnalysisError, Result}; use regex::Regex; use std::collections::HashSet; @@ -11,7 +11,7 @@ pub fn create_regex(pattern: &str) -> Result { }) } -/// Extracts ports from command strings +/// Extracts ports from command strings (e.g., npm scripts in package.json) pub fn extract_ports_from_command(command: &str, ports: &mut HashSet) { // Look for common port patterns in commands let patterns = [ @@ -31,6 +31,7 @@ pub fn extract_ports_from_command(command: &str, ports: &mut HashSet) { number: port, protocol: Protocol::Http, description: Some("Port from command".to_string()), + source: Some(PortSource::PackageJson), }); } } diff --git a/src/analyzer/context/infra_detector.rs b/src/analyzer/context/infra_detector.rs new file mode 100644 index 00000000..c8a5a6df --- /dev/null +++ b/src/analyzer/context/infra_detector.rs @@ -0,0 +1,334 @@ +//! Infrastructure detection for deployment recommendations. +//! +//! Detects existing infrastructure configurations: +//! - Kubernetes manifests (k8s/, deploy/, manifests/) +//! - Helm charts (Chart.yaml) +//! - Terraform files (*.tf) +//! - Docker Compose files +//! - Syncable deployment configs (.syncable/) + +use crate::analyzer::InfrastructurePresence; +use crate::common::file_utils::is_readable_file; +use std::path::{Path, PathBuf}; + +/// Common directories where K8s manifests might be found +const K8S_DIRECTORIES: &[&str] = &[ + "k8s", + "kubernetes", + "deploy", + "deployment", + "deployments", + "manifests", + "kube", + "charts", + ".k8s", +]; + +/// Docker compose file variants +const COMPOSE_FILES: &[&str] = &[ + "docker-compose.yml", + "docker-compose.yaml", + "compose.yml", + "compose.yaml", + "docker-compose.dev.yml", + "docker-compose.prod.yml", + "docker-compose.local.yml", +]; + +/// Detect infrastructure presence in a project +pub fn detect_infrastructure(project_root: &Path) -> InfrastructurePresence { + let mut infra = InfrastructurePresence::default(); + + // Detect Docker Compose + for compose_file in COMPOSE_FILES { + if is_readable_file(&project_root.join(compose_file)) { + infra.has_docker_compose = true; + break; + } + } + + // Detect Kubernetes manifests + let k8s_paths = detect_kubernetes_manifests(project_root); + if !k8s_paths.is_empty() { + infra.has_kubernetes = true; + infra.kubernetes_paths = k8s_paths; + } + + // Detect Helm charts + let helm_paths = detect_helm_charts(project_root); + if !helm_paths.is_empty() { + infra.has_helm = true; + infra.helm_chart_paths = helm_paths; + } + + // Detect Terraform + let tf_paths = detect_terraform(project_root); + if !tf_paths.is_empty() { + infra.has_terraform = true; + infra.terraform_paths = tf_paths; + } + + // Detect Syncable deployment config + infra.has_deployment_config = project_root.join(".syncable").is_dir() + || is_readable_file(&project_root.join("syncable.json")) + || is_readable_file(&project_root.join("syncable.yaml")) + || is_readable_file(&project_root.join("syncable.yml")); + + // Generate summary + if infra.has_any() { + let types = infra.detected_types(); + infra.summary = Some(format!("Detected: {}", types.join(", "))); + } + + infra +} + +/// Detect Kubernetes manifest directories and files +fn detect_kubernetes_manifests(project_root: &Path) -> Vec { + let mut paths = Vec::new(); + + // Check common K8s directories + for dir_name in K8S_DIRECTORIES { + let dir_path = project_root.join(dir_name); + if dir_path.is_dir() && has_kubernetes_files(&dir_path) { + paths.push(dir_path); + } + } + + // Check root-level YAML files that might be K8s manifests + if let Ok(entries) = std::fs::read_dir(project_root) { + for entry in entries.flatten() { + let path = entry.path(); + if path.is_file() { + if let Some(ext) = path.extension() { + if (ext == "yaml" || ext == "yml") && is_kubernetes_manifest(&path) { + paths.push(path); + } + } + } + } + } + + paths +} + +/// Check if a directory contains Kubernetes files +fn has_kubernetes_files(dir: &Path) -> bool { + if let Ok(entries) = std::fs::read_dir(dir) { + for entry in entries.flatten() { + let path = entry.path(); + if path.is_file() { + if let Some(ext) = path.extension() { + if (ext == "yaml" || ext == "yml") && is_kubernetes_manifest(&path) { + return true; + } + } + } + } + } + false +} + +/// Check if a YAML file is a Kubernetes manifest (quick check without full parsing) +fn is_kubernetes_manifest(path: &Path) -> bool { + if let Ok(content) = std::fs::read_to_string(path) { + // Check first 2KB of file for K8s markers (fast check) + let check_content = if content.len() > 2048 { + &content[..2048] + } else { + &content + }; + + // K8s manifest indicators + let k8s_kinds = [ + "kind: Deployment", + "kind: Service", + "kind: Pod", + "kind: ConfigMap", + "kind: Secret", + "kind: Ingress", + "kind: StatefulSet", + "kind: DaemonSet", + "kind: Job", + "kind: CronJob", + "kind: PersistentVolumeClaim", + "kind: ServiceAccount", + "kind: Role", + "kind: RoleBinding", + "kind: ClusterRole", + "kind: ClusterRoleBinding", + "kind: NetworkPolicy", + "kind: HorizontalPodAutoscaler", + "kind: PodDisruptionBudget", + "kind: Namespace", + ]; + + // Check for apiVersion + kind pattern (most K8s manifests) + if check_content.contains("apiVersion:") { + for kind in &k8s_kinds { + if check_content.contains(*kind) { + return true; + } + } + } + } + false +} + +/// Detect Helm chart directories +fn detect_helm_charts(project_root: &Path) -> Vec { + let mut paths = Vec::new(); + + // Check if root is a Helm chart + if is_readable_file(&project_root.join("Chart.yaml")) { + paths.push(project_root.to_path_buf()); + } + + // Check common locations + let helm_locations = ["charts", "helm", "deploy/helm", "deployment/helm"]; + for location in &helm_locations { + let dir = project_root.join(location); + if dir.is_dir() { + // Check if it's a chart itself + if is_readable_file(&dir.join("Chart.yaml")) { + paths.push(dir.clone()); + } + // Check subdirectories for charts + if let Ok(entries) = std::fs::read_dir(&dir) { + for entry in entries.flatten() { + let path = entry.path(); + if path.is_dir() && is_readable_file(&path.join("Chart.yaml")) { + paths.push(path); + } + } + } + } + } + + paths +} + +/// Detect Terraform directories +fn detect_terraform(project_root: &Path) -> Vec { + let mut paths = Vec::new(); + + // Check common Terraform locations + let tf_locations = ["terraform", "infra", "infrastructure", "tf", "iac"]; + for location in &tf_locations { + let dir = project_root.join(location); + if dir.is_dir() && has_terraform_files(&dir) { + paths.push(dir); + } + } + + // Check root for Terraform files + if has_terraform_files(project_root) { + paths.push(project_root.to_path_buf()); + } + + paths +} + +/// Check if a directory contains Terraform files +fn has_terraform_files(dir: &Path) -> bool { + if let Ok(entries) = std::fs::read_dir(dir) { + for entry in entries.flatten() { + let path = entry.path(); + if path.is_file() { + if let Some(ext) = path.extension() { + if ext == "tf" { + return true; + } + } + } + } + } + false +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use tempfile::TempDir; + + #[test] + fn test_detect_empty_project() { + let temp_dir = TempDir::new().unwrap(); + let infra = detect_infrastructure(temp_dir.path()); + assert!(!infra.has_any()); + } + + #[test] + fn test_detect_docker_compose() { + let temp_dir = TempDir::new().unwrap(); + fs::write(temp_dir.path().join("docker-compose.yml"), "version: '3'\nservices:\n app:\n build: .").unwrap(); + + let infra = detect_infrastructure(temp_dir.path()); + assert!(infra.has_docker_compose); + assert!(infra.has_any()); + } + + #[test] + fn test_detect_kubernetes_manifest() { + let temp_dir = TempDir::new().unwrap(); + let k8s_dir = temp_dir.path().join("k8s"); + fs::create_dir(&k8s_dir).unwrap(); + fs::write(k8s_dir.join("deployment.yaml"), "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: test").unwrap(); + + let infra = detect_infrastructure(temp_dir.path()); + assert!(infra.has_kubernetes); + assert_eq!(infra.kubernetes_paths.len(), 1); + } + + #[test] + fn test_detect_helm_chart() { + let temp_dir = TempDir::new().unwrap(); + let helm_dir = temp_dir.path().join("charts").join("myapp"); + fs::create_dir_all(&helm_dir).unwrap(); + fs::write(helm_dir.join("Chart.yaml"), "apiVersion: v2\nname: myapp\nversion: 1.0.0").unwrap(); + + let infra = detect_infrastructure(temp_dir.path()); + assert!(infra.has_helm); + assert!(!infra.helm_chart_paths.is_empty()); + } + + #[test] + fn test_detect_terraform() { + let temp_dir = TempDir::new().unwrap(); + let tf_dir = temp_dir.path().join("terraform"); + fs::create_dir(&tf_dir).unwrap(); + fs::write(tf_dir.join("main.tf"), "provider \"aws\" {\n region = \"us-east-1\"\n}").unwrap(); + + let infra = detect_infrastructure(temp_dir.path()); + assert!(infra.has_terraform); + assert!(!infra.terraform_paths.is_empty()); + } + + #[test] + fn test_detect_syncable_config() { + let temp_dir = TempDir::new().unwrap(); + let syncable_dir = temp_dir.path().join(".syncable"); + fs::create_dir(&syncable_dir).unwrap(); + + let infra = detect_infrastructure(temp_dir.path()); + assert!(infra.has_deployment_config); + } + + #[test] + fn test_infrastructure_summary() { + let temp_dir = TempDir::new().unwrap(); + fs::write(temp_dir.path().join("docker-compose.yml"), "version: '3'").unwrap(); + let tf_dir = temp_dir.path().join("terraform"); + fs::create_dir(&tf_dir).unwrap(); + fs::write(tf_dir.join("main.tf"), "provider \"aws\" {}").unwrap(); + + let infra = detect_infrastructure(temp_dir.path()); + assert!(infra.has_docker_compose); + assert!(infra.has_terraform); + assert!(infra.summary.is_some()); + let summary = infra.summary.unwrap(); + assert!(summary.contains("Docker Compose")); + assert!(summary.contains("Terraform")); + } +} diff --git a/src/analyzer/context/language_analyzers/go.rs b/src/analyzer/context/language_analyzers/go.rs index 2d30b8c4..c1a5b200 100644 --- a/src/analyzer/context/language_analyzers/go.rs +++ b/src/analyzer/context/language_analyzers/go.rs @@ -1,5 +1,5 @@ use crate::analyzer::{ - AnalysisConfig, BuildScript, EntryPoint, Port, Protocol, context::helpers::create_regex, + AnalysisConfig, BuildScript, EntryPoint, Port, PortSource, Protocol, context::helpers::create_regex, }; use crate::common::file_utils::{is_readable_file, read_file_safe}; use crate::error::Result; @@ -107,6 +107,7 @@ fn scan_go_file_for_context( number: port, protocol: Protocol::Http, description: Some("Go web server".to_string()), + source: Some(PortSource::SourceCode), }); } } diff --git a/src/analyzer/context/language_analyzers/javascript.rs b/src/analyzer/context/language_analyzers/javascript.rs index b4bb1fed..71a1a80c 100644 --- a/src/analyzer/context/language_analyzers/javascript.rs +++ b/src/analyzer/context/language_analyzers/javascript.rs @@ -1,5 +1,5 @@ use crate::analyzer::{ - AnalysisConfig, BuildScript, EntryPoint, Port, Protocol, + AnalysisConfig, BuildScript, EntryPoint, Port, PortSource, Protocol, context::helpers::{create_regex, extract_ports_from_command, get_script_description}, }; use crate::common::file_utils::{is_readable_file, read_file_safe}; @@ -104,6 +104,7 @@ fn scan_js_file_for_context( number: port, protocol: Protocol::Http, description: Some("HTTP server port".to_string()), + source: Some(PortSource::SourceCode), }); } } @@ -119,6 +120,7 @@ fn scan_js_file_for_context( number: port, protocol: Protocol::Http, description: Some("Express/HTTP server".to_string()), + source: Some(PortSource::SourceCode), }); } } diff --git a/src/analyzer/context/language_analyzers/jvm.rs b/src/analyzer/context/language_analyzers/jvm.rs index 592434c7..474b760f 100644 --- a/src/analyzer/context/language_analyzers/jvm.rs +++ b/src/analyzer/context/language_analyzers/jvm.rs @@ -1,5 +1,5 @@ use crate::analyzer::{ - AnalysisConfig, BuildScript, Port, Protocol, context::helpers::create_regex, + AnalysisConfig, BuildScript, Port, PortSource, Protocol, context::helpers::create_regex, }; use crate::common::file_utils::{is_readable_file, read_file_safe}; use crate::error::Result; @@ -115,6 +115,7 @@ fn analyze_application_properties( number: port, protocol: Protocol::Http, description: Some("Spring Boot server".to_string()), + source: Some(PortSource::ConfigFile), }); } } @@ -129,6 +130,7 @@ fn analyze_application_properties( number: port, protocol: Protocol::Http, description: Some("Spring Boot server (default)".to_string()), + source: Some(PortSource::ConfigFile), }); } } @@ -144,6 +146,7 @@ fn analyze_application_properties( number: port, protocol: Protocol::Http, description: Some("Quarkus HTTP server".to_string()), + source: Some(PortSource::ConfigFile), }); } } @@ -159,6 +162,7 @@ fn analyze_application_properties( number: port, protocol: Protocol::Http, description: Some("Micronaut server".to_string()), + source: Some(PortSource::ConfigFile), }); } } @@ -177,6 +181,7 @@ fn analyze_application_properties( number: port, protocol: Protocol::Http, description: Some("Java HTTP server".to_string()), + source: Some(PortSource::ConfigFile), }); } } @@ -192,6 +197,7 @@ fn analyze_application_properties( number: port, protocol: Protocol::Http, description: Some("MicroProfile server".to_string()), + source: Some(PortSource::ConfigFile), }); } } diff --git a/src/analyzer/context/language_analyzers/python.rs b/src/analyzer/context/language_analyzers/python.rs index beaf7533..7d797016 100644 --- a/src/analyzer/context/language_analyzers/python.rs +++ b/src/analyzer/context/language_analyzers/python.rs @@ -1,5 +1,5 @@ use crate::analyzer::{ - AnalysisConfig, BuildScript, EntryPoint, Port, Protocol, context::helpers::create_regex, + AnalysisConfig, BuildScript, EntryPoint, Port, PortSource, Protocol, context::helpers::create_regex, }; use crate::common::file_utils::{is_readable_file, read_file_safe}; use crate::error::Result; @@ -123,6 +123,7 @@ fn scan_python_file_for_context( number: port, protocol: Protocol::Http, description: Some("Python web server".to_string()), + source: Some(PortSource::SourceCode), }); } } diff --git a/src/analyzer/context/language_analyzers/rust.rs b/src/analyzer/context/language_analyzers/rust.rs index 3326d4c7..dc5a49ef 100644 --- a/src/analyzer/context/language_analyzers/rust.rs +++ b/src/analyzer/context/language_analyzers/rust.rs @@ -1,5 +1,5 @@ use crate::analyzer::{ - AnalysisConfig, BuildScript, EntryPoint, Port, Protocol, context::helpers::create_regex, + AnalysisConfig, BuildScript, EntryPoint, Port, PortSource, Protocol, context::helpers::create_regex, }; use crate::common::file_utils::{is_readable_file, read_file_safe}; use crate::error::Result; @@ -120,6 +120,7 @@ fn scan_rust_file_for_context( number: port, protocol: Protocol::Http, description: Some("Rust web server".to_string()), + source: Some(PortSource::SourceCode), }); } } diff --git a/src/analyzer/context/mod.rs b/src/analyzer/context/mod.rs index 4e300055..1a660676 100644 --- a/src/analyzer/context/mod.rs +++ b/src/analyzer/context/mod.rs @@ -1,9 +1,13 @@ pub mod analysis; pub(crate) mod file_analyzers; +pub(crate) mod health_detector; pub(crate) mod helpers; +pub(crate) mod infra_detector; pub(crate) mod language_analyzers; pub(crate) mod microservices; pub(crate) mod project_type; pub(crate) mod tech_specific; pub use analysis::analyze_context; +pub use health_detector::detect_health_endpoints; +pub use infra_detector::detect_infrastructure; diff --git a/src/analyzer/context/tech_specific.rs b/src/analyzer/context/tech_specific.rs index c859861f..b76b5b3c 100644 --- a/src/analyzer/context/tech_specific.rs +++ b/src/analyzer/context/tech_specific.rs @@ -1,4 +1,4 @@ -use crate::analyzer::{DetectedTechnology, EntryPoint, Port, Protocol}; +use crate::analyzer::{DetectedTechnology, EntryPoint, Port, PortSource, Protocol}; use crate::error::Result; use std::collections::HashSet; use std::path::Path; @@ -17,6 +17,7 @@ pub(crate) fn analyze_technology_specifics( number: 3000, protocol: Protocol::Http, description: Some("Next.js development server".to_string()), + source: Some(PortSource::FrameworkDefault), }); // Look for pages directory @@ -35,6 +36,7 @@ pub(crate) fn analyze_technology_specifics( number: 3000, protocol: Protocol::Http, description: Some(format!("{} server", technology.name)), + source: Some(PortSource::FrameworkDefault), }); } "Encore" => { @@ -43,6 +45,7 @@ pub(crate) fn analyze_technology_specifics( number: 4000, protocol: Protocol::Http, description: Some("Encore development server".to_string()), + source: Some(PortSource::FrameworkDefault), }); } "Astro" => { @@ -51,6 +54,7 @@ pub(crate) fn analyze_technology_specifics( number: 4321, protocol: Protocol::Http, description: Some("Astro development server".to_string()), + source: Some(PortSource::FrameworkDefault), }); } "SvelteKit" => { @@ -59,6 +63,7 @@ pub(crate) fn analyze_technology_specifics( number: 5173, protocol: Protocol::Http, description: Some("SvelteKit development server".to_string()), + source: Some(PortSource::FrameworkDefault), }); } "Nuxt.js" => { @@ -67,6 +72,7 @@ pub(crate) fn analyze_technology_specifics( number: 3000, protocol: Protocol::Http, description: Some("Nuxt.js development server".to_string()), + source: Some(PortSource::FrameworkDefault), }); } "Tanstack Start" => { @@ -75,6 +81,7 @@ pub(crate) fn analyze_technology_specifics( number: 3000, protocol: Protocol::Http, description: Some(format!("{} development server", technology.name)), + source: Some(PortSource::FrameworkDefault), }); } "React Router v7" => { @@ -83,6 +90,7 @@ pub(crate) fn analyze_technology_specifics( number: 5173, protocol: Protocol::Http, description: Some("React Router v7 development server".to_string()), + source: Some(PortSource::FrameworkDefault), }); } "Django" => { @@ -90,6 +98,7 @@ pub(crate) fn analyze_technology_specifics( number: 8000, protocol: Protocol::Http, description: Some("Django development server".to_string()), + source: Some(PortSource::FrameworkDefault), }); } "Flask" | "FastAPI" => { @@ -97,6 +106,7 @@ pub(crate) fn analyze_technology_specifics( number: 5000, protocol: Protocol::Http, description: Some(format!("{} server", technology.name)), + source: Some(PortSource::FrameworkDefault), }); } "Spring Boot" => { @@ -104,6 +114,7 @@ pub(crate) fn analyze_technology_specifics( number: 8080, protocol: Protocol::Http, description: Some("Spring Boot server".to_string()), + source: Some(PortSource::FrameworkDefault), }); } "Actix Web" | "Rocket" => { @@ -111,6 +122,7 @@ pub(crate) fn analyze_technology_specifics( number: 8080, protocol: Protocol::Http, description: Some(format!("{} server", technology.name)), + source: Some(PortSource::FrameworkDefault), }); } _ => {} diff --git a/src/analyzer/docker_analyzer.rs b/src/analyzer/docker_analyzer.rs index 16ef58b5..f603dea9 100644 --- a/src/analyzer/docker_analyzer.rs +++ b/src/analyzer/docker_analyzer.rs @@ -56,6 +56,28 @@ pub struct DockerfileInfo { pub instruction_count: usize, } +/// Dockerfile discovery result for deployment wizard +/// +/// Provides deployment-focused metadata about a Dockerfile including +/// build context path, suggested service name, and port configuration. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct DiscoveredDockerfile { + /// Absolute path to the Dockerfile + pub path: PathBuf, + /// Relative path from project root to Dockerfile directory (build context) + pub build_context: String, + /// Suggested service name based on directory structure + pub suggested_service_name: String, + /// Suggested port for deployment (from EXPOSE or default) + pub suggested_port: Option, + /// Base image from Dockerfile + pub base_image: Option, + /// Whether this is a multi-stage build + pub is_multistage: bool, + /// Environment type (dev, prod, staging) from filename + pub environment: Option, +} + /// Information about a Docker Compose file #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct ComposeFileInfo { @@ -1237,6 +1259,199 @@ fn analyze_environments( environments.into_values().collect() } +// ============================================================================= +// Dockerfile Discovery for Deployment Wizard +// ============================================================================= + +/// Suggests a service name based on Dockerfile path and project structure. +/// +/// Uses the parent directory name if not at project root, otherwise uses +/// the project root's directory name. The name is sanitized to be lowercase +/// with hyphens (suitable for Kubernetes service names). +fn suggest_service_name(dockerfile_path: &Path, project_root: &Path) -> String { + // Get parent directory of Dockerfile + let dockerfile_dir = dockerfile_path.parent().unwrap_or(dockerfile_path); + + // Determine which directory name to use + let name = if dockerfile_dir == project_root { + // Dockerfile is in project root - use project root's directory name + project_root + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or("app") + } else { + // Use the immediate parent directory name + dockerfile_dir + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or("app") + }; + + // Sanitize: lowercase, replace underscores/spaces with hyphens, remove non-alphanumeric + sanitize_service_name(name) +} + +/// Sanitizes a string to be a valid Kubernetes service name. +/// Lowercase, alphanumeric with hyphens, no leading/trailing hyphens. +fn sanitize_service_name(name: &str) -> String { + let sanitized: String = name + .to_lowercase() + .chars() + .map(|c| { + if c.is_ascii_alphanumeric() { + c + } else { + '-' + } + }) + .collect(); + + // Remove consecutive hyphens and trim hyphens from ends + let mut result = String::new(); + let mut prev_hyphen = true; // Start true to skip leading hyphens + + for c in sanitized.chars() { + if c == '-' { + if !prev_hyphen { + result.push(c); + prev_hyphen = true; + } + } else { + result.push(c); + prev_hyphen = false; + } + } + + // Remove trailing hyphen + if result.ends_with('-') { + result.pop(); + } + + if result.is_empty() { + "app".to_string() + } else { + result + } +} + +/// Computes build context path relative to project root. +/// +/// Returns the relative path from project root to the Dockerfile's directory, +/// suitable for use as a Docker build context path. +fn compute_build_context(dockerfile_path: &Path, project_root: &Path) -> String { + let dockerfile_dir = dockerfile_path.parent().unwrap_or(dockerfile_path); + + // Try to get relative path from project root to dockerfile directory + if let Ok(relative) = dockerfile_dir.strip_prefix(project_root) { + let path_str = relative.to_string_lossy().to_string(); + if path_str.is_empty() { + ".".to_string() + } else { + path_str + } + } else { + // Fallback: use "." if we can't compute relative path + ".".to_string() + } +} + +/// Infers default port based on base image. +/// +/// Returns a common default port for well-known base images. +fn infer_default_port(base_image: &Option) -> Option { + let image = base_image.as_ref()?; + let image_lower = image.to_lowercase(); + + // Extract image name without registry/tag + let image_name = image_lower + .split('/') + .last() + .unwrap_or(&image_lower) + .split(':') + .next() + .unwrap_or(&image_lower); + + match image_name { + // Node.js + s if s.starts_with("node") => Some(3000), + // Python web frameworks + s if s.contains("python") => Some(8000), + s if s.contains("flask") => Some(5000), + s if s.contains("django") => Some(8000), + s if s.contains("fastapi") => Some(8000), + // Go + s if s.starts_with("golang") || s.starts_with("go") => Some(8080), + // Rust + s if s.starts_with("rust") => Some(8080), + // Web servers + s if s.starts_with("nginx") => Some(80), + s if s.starts_with("httpd") || s.starts_with("apache") => Some(80), + s if s.starts_with("caddy") => Some(80), + // Java + s if s.contains("openjdk") || s.contains("java") => Some(8080), + s if s.contains("tomcat") => Some(8080), + s if s.contains("spring") => Some(8080), + // Ruby + s if s.starts_with("ruby") => Some(3000), + s if s.contains("rails") => Some(3000), + // PHP + s if s.starts_with("php") => Some(80), + // .NET + s if s.contains("dotnet") || s.contains("aspnet") => Some(80), + // Elixir/Phoenix + s if s.contains("elixir") || s.contains("phoenix") => Some(4000), + // Default: no inference + _ => None, + } +} + +/// Discovers Dockerfiles in a project and returns deployment-focused metadata. +/// +/// This function finds all Dockerfiles in the project, parses them, and returns +/// deployment-relevant information including build context paths, suggested +/// service names, and port configurations. +/// +/// # Arguments +/// +/// * `project_root` - The root directory of the project to analyze +/// +/// # Returns +/// +/// A vector of `DiscoveredDockerfile` structs, one for each Dockerfile found +pub fn discover_dockerfiles_for_deployment( + project_root: &Path, +) -> Result> { + let dockerfiles = find_dockerfiles(project_root)?; + + let discovered: Vec = dockerfiles + .into_iter() + .filter_map(|path| { + let info = parse_dockerfile(&path).ok()?; + let build_context = compute_build_context(&path, project_root); + let suggested_name = suggest_service_name(&path, project_root); + + // Get port from EXPOSE instruction or infer from base image + let suggested_port = info + .exposed_ports + .first() + .copied() + .or_else(|| infer_default_port(&info.base_image)); + + Some(DiscoveredDockerfile { + path, + build_context, + suggested_service_name: suggested_name, + suggested_port, + base_image: info.base_image, + is_multistage: info.is_multistage, + environment: info.environment, + }) + }) + .collect(); + + Ok(discovered) +} + #[cfg(test)] mod tests { use super::*; @@ -1279,4 +1494,147 @@ mod tests { None ); } + + // ============================================================================= + // Dockerfile Discovery Tests + // ============================================================================= + + #[test] + fn test_suggest_service_name_from_subdirectory() { + let path = PathBuf::from("/project/services/api/Dockerfile"); + let root = PathBuf::from("/project"); + assert_eq!(suggest_service_name(&path, &root), "api"); + } + + #[test] + fn test_suggest_service_name_from_root() { + let path = PathBuf::from("/project/Dockerfile"); + let root = PathBuf::from("/project"); + assert_eq!(suggest_service_name(&path, &root), "project"); + } + + #[test] + fn test_suggest_service_name_nested() { + let path = PathBuf::from("/myapp/apps/web-frontend/Dockerfile"); + let root = PathBuf::from("/myapp"); + assert_eq!(suggest_service_name(&path, &root), "web-frontend"); + } + + #[test] + fn test_suggest_service_name_sanitizes() { + // Underscores become hyphens + let path = PathBuf::from("/project/my_service_api/Dockerfile"); + let root = PathBuf::from("/project"); + assert_eq!(suggest_service_name(&path, &root), "my-service-api"); + } + + #[test] + fn test_sanitize_service_name() { + assert_eq!(sanitize_service_name("My_Service"), "my-service"); + assert_eq!(sanitize_service_name("api-v2"), "api-v2"); + assert_eq!(sanitize_service_name("__leading__"), "leading"); + assert_eq!(sanitize_service_name("trailing--"), "trailing"); + assert_eq!(sanitize_service_name("multi---hyphens"), "multi-hyphens"); + assert_eq!(sanitize_service_name("special@#chars!"), "special-chars"); + assert_eq!(sanitize_service_name(""), "app"); // Empty defaults to "app" + } + + #[test] + fn test_compute_build_context_subdirectory() { + let path = PathBuf::from("/project/services/api/Dockerfile"); + let root = PathBuf::from("/project"); + assert_eq!(compute_build_context(&path, &root), "services/api"); + } + + #[test] + fn test_compute_build_context_root() { + let path = PathBuf::from("/project/Dockerfile"); + let root = PathBuf::from("/project"); + assert_eq!(compute_build_context(&path, &root), "."); + } + + #[test] + fn test_compute_build_context_deep_nested() { + let path = PathBuf::from("/myapp/packages/frontend/apps/web/Dockerfile"); + let root = PathBuf::from("/myapp"); + assert_eq!( + compute_build_context(&path, &root), + "packages/frontend/apps/web" + ); + } + + #[test] + fn test_infer_default_port_node() { + assert_eq!(infer_default_port(&Some("node:18".to_string())), Some(3000)); + assert_eq!( + infer_default_port(&Some("node:18-alpine".to_string())), + Some(3000) + ); + } + + #[test] + fn test_infer_default_port_nginx() { + assert_eq!( + infer_default_port(&Some("nginx:latest".to_string())), + Some(80) + ); + assert_eq!( + infer_default_port(&Some("nginx:1.25-alpine".to_string())), + Some(80) + ); + } + + #[test] + fn test_infer_default_port_python() { + assert_eq!( + infer_default_port(&Some("python:3.11".to_string())), + Some(8000) + ); + } + + #[test] + fn test_infer_default_port_go() { + assert_eq!( + infer_default_port(&Some("golang:1.21".to_string())), + Some(8080) + ); + } + + #[test] + fn test_infer_default_port_java() { + assert_eq!( + infer_default_port(&Some("openjdk:17".to_string())), + Some(8080) + ); + } + + #[test] + fn test_infer_default_port_ruby() { + assert_eq!( + infer_default_port(&Some("ruby:3.2".to_string())), + Some(3000) + ); + } + + #[test] + fn test_infer_default_port_with_registry() { + // Should handle images with registry prefix + assert_eq!( + infer_default_port(&Some("gcr.io/my-project/node:18".to_string())), + Some(3000) + ); + assert_eq!( + infer_default_port(&Some("docker.io/library/nginx:latest".to_string())), + Some(80) + ); + } + + #[test] + fn test_infer_default_port_unknown() { + assert_eq!( + infer_default_port(&Some("custom-base:latest".to_string())), + None + ); + assert_eq!(infer_default_port(&None), None); + } } diff --git a/src/analyzer/mod.rs b/src/analyzer/mod.rs index 666c4b5c..a1660b52 100644 --- a/src/analyzer/mod.rs +++ b/src/analyzer/mod.rs @@ -63,8 +63,9 @@ pub use monorepo::{MonorepoDetectionConfig, analyze_monorepo, analyze_monorepo_w // Re-export Docker analysis types pub use docker_analyzer::{ - ComposeFileInfo, DockerAnalysis, DockerEnvironment, DockerService, DockerfileInfo, - NetworkingConfig, OrchestrationPattern, analyze_docker_infrastructure, + ComposeFileInfo, DiscoveredDockerfile, DockerAnalysis, DockerEnvironment, DockerService, + DockerfileInfo, NetworkingConfig, OrchestrationPattern, analyze_docker_infrastructure, + discover_dockerfiles_for_deployment, }; /// Represents a detected programming language @@ -166,12 +167,76 @@ pub struct EntryPoint { pub command: Option, } +/// Source of port detection - indicates where the port was discovered +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub enum PortSource { + /// Detected from Dockerfile EXPOSE directive + Dockerfile, + /// Detected from docker-compose.yml ports section + DockerCompose, + /// Detected from package.json scripts (Node.js) + PackageJson, + /// Inferred from framework defaults (e.g., Express=3000, FastAPI=8000) + FrameworkDefault, + /// Detected from environment variable reference (e.g., process.env.PORT) + EnvVar, + /// Detected from source code analysis (e.g., .listen(3000)) + SourceCode, + /// Detected from configuration files (e.g., config.yaml, settings.py) + ConfigFile, +} + +impl PortSource { + /// Returns a human-readable description of the port source + pub fn description(&self) -> &'static str { + match self { + PortSource::Dockerfile => "Dockerfile EXPOSE", + PortSource::DockerCompose => "docker-compose.yml", + PortSource::PackageJson => "package.json scripts", + PortSource::FrameworkDefault => "framework default", + PortSource::EnvVar => "environment variable", + PortSource::SourceCode => "source code", + PortSource::ConfigFile => "configuration file", + } + } +} + /// Represents exposed network ports #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct Port { pub number: u16, pub protocol: Protocol, pub description: Option, + /// Source where this port was detected (optional for backward compatibility) + #[serde(skip_serializing_if = "Option::is_none")] + pub source: Option, +} + +impl Port { + /// Create a new port with source information + pub fn with_source(number: u16, protocol: Protocol, source: PortSource) -> Self { + Self { + number, + protocol, + description: None, + source: Some(source), + } + } + + /// Create a new port with source and description + pub fn with_source_and_description( + number: u16, + protocol: Protocol, + source: PortSource, + description: impl Into, + ) -> Self { + Self { + number, + protocol, + description: Some(description.into()), + source: Some(source), + } + } } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] @@ -182,6 +247,63 @@ pub enum Protocol { Https, } +/// Source of health endpoint detection +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum HealthEndpointSource { + /// Found by analyzing source code patterns + CodePattern, + /// Known framework convention (e.g., Spring Actuator) + FrameworkDefault, + /// Found in configuration files (e.g., K8s manifests, docker-compose) + ConfigFile, +} + +impl HealthEndpointSource { + /// Returns a human-readable description of the detection source + pub fn description(&self) -> &'static str { + match self { + HealthEndpointSource::CodePattern => "source code analysis", + HealthEndpointSource::FrameworkDefault => "framework convention", + HealthEndpointSource::ConfigFile => "configuration file", + } + } +} + +/// Represents a detected health check endpoint +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct HealthEndpoint { + /// The HTTP path for the health check (e.g., "/health", "/healthz") + pub path: String, + /// Confidence level (0.0-1.0) in this detection + pub confidence: f32, + /// Where this endpoint was detected from + pub source: HealthEndpointSource, + /// Optional description or context + pub description: Option, +} + +impl HealthEndpoint { + /// Create a new health endpoint with high confidence from code analysis + pub fn from_code(path: impl Into, confidence: f32) -> Self { + Self { + path: path.into(), + confidence, + source: HealthEndpointSource::CodePattern, + description: None, + } + } + + /// Create a health endpoint from a framework default + pub fn from_framework(path: impl Into, framework: &str) -> Self { + Self { + path: path.into(), + confidence: 0.7, // Framework defaults have moderate confidence + source: HealthEndpointSource::FrameworkDefault, + description: Some(format!("{} default health endpoint", framework)), + } + } +} + /// Represents environment variables #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct EnvVar { @@ -215,6 +337,47 @@ pub struct BuildScript { pub is_default: bool, } +/// Detected infrastructure files and configurations in the project +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct InfrastructurePresence { + /// Whether Kubernetes manifests were detected + pub has_kubernetes: bool, + /// Paths to directories or files containing K8s manifests + pub kubernetes_paths: Vec, + /// Whether Helm charts were detected + pub has_helm: bool, + /// Paths to Helm chart directories (containing Chart.yaml) + pub helm_chart_paths: Vec, + /// Whether docker-compose files were detected + pub has_docker_compose: bool, + /// Whether Terraform files were detected + pub has_terraform: bool, + /// Paths to directories containing .tf files + pub terraform_paths: Vec, + /// Whether Syncable deployment config exists + pub has_deployment_config: bool, + /// Summary of what was detected for display purposes + pub summary: Option, +} + +impl InfrastructurePresence { + /// Returns true if any infrastructure was detected + pub fn has_any(&self) -> bool { + self.has_kubernetes || self.has_helm || self.has_docker_compose || self.has_terraform || self.has_deployment_config + } + + /// Returns a list of detected infrastructure types + pub fn detected_types(&self) -> Vec<&'static str> { + let mut types = Vec::new(); + if self.has_kubernetes { types.push("Kubernetes"); } + if self.has_helm { types.push("Helm"); } + if self.has_docker_compose { types.push("Docker Compose"); } + if self.has_terraform { types.push("Terraform"); } + if self.has_deployment_config { types.push("Syncable Config"); } + types + } +} + /// Type alias for dependency maps pub type DependencyMap = HashMap; @@ -245,6 +408,9 @@ pub struct ProjectAnalysis { pub dependencies: DependencyMap, pub entry_points: Vec, pub ports: Vec, + /// Detected health check endpoints + #[serde(default)] + pub health_endpoints: Vec, pub environment_variables: Vec, pub project_type: ProjectType, pub build_scripts: Vec, @@ -254,6 +420,9 @@ pub struct ProjectAnalysis { pub architecture_type: ArchitectureType, /// Docker infrastructure analysis pub docker_analysis: Option, + /// Detected infrastructure (K8s, Helm, Terraform, etc.) + #[serde(default)] + pub infrastructure: Option, pub analysis_metadata: AnalysisMetadata, } @@ -408,6 +577,12 @@ pub fn analyze_project_with_config( let dependencies = dependency_parser::parse_dependencies(&project_root, &languages, config)?; let context = context::analyze_context(&project_root, &languages, &frameworks, config)?; + // Detect health check endpoints + let health_endpoints = context::detect_health_endpoints(&project_root, &frameworks, config.max_file_size); + + // Detect infrastructure presence (K8s, Helm, Terraform, etc.) + let infrastructure = context::detect_infrastructure(&project_root); + // Analyze Docker infrastructure let docker_analysis = analyze_docker_infrastructure(&project_root).ok(); @@ -423,12 +598,14 @@ pub fn analyze_project_with_config( dependencies, entry_points: context.entry_points, ports: context.ports, + health_endpoints, environment_variables: context.environment_variables, project_type: context.project_type, build_scripts: context.build_scripts, services: vec![], // TODO: Implement microservice detection architecture_type: ArchitectureType::Monolithic, // TODO: Detect architecture type docker_analysis, + infrastructure: Some(infrastructure), analysis_metadata: AnalysisMetadata { timestamp: Utc::now().to_rfc3339(), analyzer_version: env!("CARGO_PKG_VERSION").to_string(), diff --git a/src/cli.rs b/src/cli.rs index 35e69384..7def281c 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -346,6 +346,34 @@ pub enum Commands { #[command(subcommand)] command: AuthCommand, }, + + /// Manage Syncable projects + Project { + #[command(subcommand)] + command: ProjectCommand, + }, + + /// Manage Syncable organizations + Org { + #[command(subcommand)] + command: OrgCommand, + }, + + /// Manage environments within a project + Env { + #[command(subcommand)] + command: EnvCommand, + }, + + /// Deploy services to the Syncable platform (launches wizard by default) + Deploy { + /// Path to the project directory (default: current directory) + #[arg(value_name = "PROJECT_PATH", default_value = ".")] + path: PathBuf, + + #[command(subcommand)] + command: Option, + }, } #[derive(Subcommand)] @@ -427,6 +455,94 @@ pub enum AuthCommand { }, } +/// Project management subcommands +#[derive(Subcommand)] +pub enum ProjectCommand { + /// List projects in the current organization + List { + /// Organization ID to list projects from (uses current org if not specified) + #[arg(long)] + org_id: Option, + + /// Output format + #[arg(long, value_enum, default_value = "table")] + format: OutputFormat, + }, + + /// Select a project to work with + Select { + /// Project ID to select + id: String, + }, + + /// Show current organization and project context + Current, + + /// Show details of a project + Info { + /// Project ID (uses current project if not specified) + id: Option, + }, +} + +/// Organization management subcommands +#[derive(Subcommand)] +pub enum OrgCommand { + /// List organizations you belong to + List { + /// Output format + #[arg(long, value_enum, default_value = "table")] + format: OutputFormat, + }, + + /// Select an organization to work with + Select { + /// Organization ID to select + id: String, + }, +} + +/// Environment management subcommands +#[derive(Subcommand)] +pub enum EnvCommand { + /// List environments in the current project + List { + /// Output format + #[arg(long, value_enum, default_value = "table")] + format: OutputFormat, + }, + + /// Select an environment to work with + Select { + /// Environment ID to select + id: String, + }, +} + +/// Deployment subcommands +#[derive(Subcommand)] +pub enum DeployCommand { + /// Launch interactive deployment wizard + Wizard { + /// Path to the project directory (default: current directory) + #[arg(value_name = "PROJECT_PATH", default_value = ".")] + path: PathBuf, + }, + + /// Create a new environment for the current project + NewEnv, + + /// Check deployment status + Status { + /// The deployment task ID (from deploy command output) + task_id: String, + + /// Watch for status updates (poll until complete) + #[arg(short, long)] + watch: bool, + }, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] pub enum OutputFormat { Table, diff --git a/src/lib.rs b/src/lib.rs index 3b0ed0a9..5bfc79d3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,7 +8,9 @@ pub mod config; pub mod error; pub mod generator; pub mod handlers; +pub mod platform; // Platform session state for project/org context pub mod telemetry; // Add telemetry module +pub mod wizard; // Interactive deployment wizard // Re-export commonly used types and functions pub use analyzer::{ProjectAnalysis, analyze_project}; @@ -277,6 +279,284 @@ pub async fn run_command(command: Commands) -> Result<()> { Ok(()) } } + Commands::Project { command } => { + use cli::{OutputFormat, ProjectCommand}; + use platform::api::client::PlatformApiClient; + use platform::session::PlatformSession; + + match command { + ProjectCommand::List { org_id, format } => { + // Get org_id from argument or session + let effective_org_id = match org_id { + Some(id) => id, + None => { + let session = PlatformSession::load().unwrap_or_default(); + match session.org_id { + Some(id) => id, + None => { + eprintln!("No organization selected."); + eprintln!("Run: sync-ctl org list"); + eprintln!("Then: sync-ctl org select "); + return Ok(()); + } + } + } + }; + + let client = PlatformApiClient::new().map_err(|e| { + error::IaCGeneratorError::Config(error::ConfigError::ParsingFailed( + e.to_string(), + )) + })?; + + match client.list_projects(&effective_org_id).await { + Ok(projects) => { + if projects.is_empty() { + println!("No projects found in this organization."); + return Ok(()); + } + + match format { + OutputFormat::Json => { + println!("{}", serde_json::to_string_pretty(&projects).unwrap_or_default()); + } + OutputFormat::Table => { + println!("\n{:<40} {:<30} {}", "ID", "NAME", "DESCRIPTION"); + println!("{}", "-".repeat(90)); + for project in projects { + let desc = if project.description.is_empty() { "-" } else { &project.description }; + let desc_truncated = if desc.len() > 30 { + format!("{}...", &desc[..27]) + } else { + desc.to_string() + }; + println!("{:<40} {:<30} {}", project.id, project.name, desc_truncated); + } + println!(); + } + } + } + Err(platform::api::error::PlatformApiError::Unauthorized) => { + eprintln!("Not authenticated. Run: sync-ctl auth login"); + } + Err(e) => { + eprintln!("Failed to list projects: {}", e); + } + } + Ok(()) + } + ProjectCommand::Select { id } => { + let client = PlatformApiClient::new().map_err(|e| { + error::IaCGeneratorError::Config(error::ConfigError::ParsingFailed( + e.to_string(), + )) + })?; + + match client.get_project(&id).await { + Ok(project) => { + // Get org info + let org = client.get_organization(&project.organization_id).await.ok(); + let org_name = org.as_ref().map(|o| o.name.clone()).unwrap_or_else(|| "Unknown".to_string()); + + let session = PlatformSession::with_project( + project.id.clone(), + project.name.clone(), + project.organization_id.clone(), + org_name.clone(), + ); + + if let Err(e) = session.save() { + eprintln!("Warning: Failed to save session: {}", e); + } + + println!("โœ“ Selected project: {} ({})", project.name, project.id); + println!(" Organization: {} ({})", org_name, project.organization_id); + } + Err(platform::api::error::PlatformApiError::Unauthorized) => { + eprintln!("Not authenticated. Run: sync-ctl auth login"); + } + Err(platform::api::error::PlatformApiError::NotFound(_)) => { + eprintln!("Project not found: {}", id); + eprintln!("Run: sync-ctl project list"); + } + Err(e) => { + eprintln!("Failed to select project: {}", e); + } + } + Ok(()) + } + ProjectCommand::Current => { + let session = PlatformSession::load().unwrap_or_default(); + + if !session.is_project_selected() { + println!("No project selected."); + println!("\nTo select a project:"); + println!(" 1. sync-ctl org list"); + println!(" 2. sync-ctl org select "); + println!(" 3. sync-ctl project list"); + println!(" 4. sync-ctl project select "); + return Ok(()); + } + + println!("\nCurrent context: {}", session.display_context()); + if let (Some(org_name), Some(org_id)) = (&session.org_name, &session.org_id) { + println!(" Organization: {} ({})", org_name, org_id); + } + if let (Some(project_name), Some(project_id)) = (&session.project_name, &session.project_id) { + println!(" Project: {} ({})", project_name, project_id); + } + if let (Some(env_name), Some(env_id)) = (&session.environment_name, &session.environment_id) { + println!(" Environment: {} ({})", env_name, env_id); + } else { + println!(" Environment: (none selected)"); + println!("\n To select an environment:"); + println!(" sync-ctl env list"); + println!(" sync-ctl env select "); + } + if let Some(updated) = session.last_updated { + println!(" Last updated: {}", updated.format("%Y-%m-%d %H:%M:%S UTC")); + } + println!(); + Ok(()) + } + ProjectCommand::Info { id } => { + // Get project id from arg or session + let project_id = match id { + Some(id) => id, + None => { + let session = PlatformSession::load().unwrap_or_default(); + match session.project_id { + Some(id) => id, + None => { + eprintln!("No project specified or selected."); + eprintln!("Run: sync-ctl project select "); + return Ok(()); + } + } + } + }; + + let client = PlatformApiClient::new().map_err(|e| { + error::IaCGeneratorError::Config(error::ConfigError::ParsingFailed( + e.to_string(), + )) + })?; + + match client.get_project(&project_id).await { + Ok(project) => { + // Get org info + let org = client.get_organization(&project.organization_id).await.ok(); + let org_name = org.as_ref().map(|o| o.name.clone()).unwrap_or_else(|| "Unknown".to_string()); + + println!("\nProject Details:"); + println!(" ID: {}", project.id); + println!(" Name: {}", project.name); + let desc = if project.description.is_empty() { "-" } else { &project.description }; + println!(" Description: {}", desc); + println!(" Organization: {} ({})", org_name, project.organization_id); + println!(" Created: {}", project.created_at.format("%Y-%m-%d %H:%M:%S UTC")); + println!(); + } + Err(platform::api::error::PlatformApiError::Unauthorized) => { + eprintln!("Not authenticated. Run: sync-ctl auth login"); + } + Err(platform::api::error::PlatformApiError::NotFound(_)) => { + eprintln!("Project not found: {}", project_id); + } + Err(e) => { + eprintln!("Failed to get project info: {}", e); + } + } + Ok(()) + } + } + } + Commands::Org { command } => { + use cli::{OutputFormat, OrgCommand}; + use platform::api::client::PlatformApiClient; + use platform::session::PlatformSession; + + match command { + OrgCommand::List { format } => { + let client = PlatformApiClient::new().map_err(|e| { + error::IaCGeneratorError::Config(error::ConfigError::ParsingFailed( + e.to_string(), + )) + })?; + + match client.list_organizations().await { + Ok(orgs) => { + if orgs.is_empty() { + println!("No organizations found."); + return Ok(()); + } + + match format { + OutputFormat::Json => { + println!("{}", serde_json::to_string_pretty(&orgs).unwrap_or_default()); + } + OutputFormat::Table => { + println!("\n{:<40} {:<30} {}", "ID", "NAME", "SLUG"); + println!("{}", "-".repeat(90)); + for org in orgs { + let slug = if org.slug.is_empty() { "-" } else { &org.slug }; + println!("{:<40} {:<30} {}", org.id, org.name, slug); + } + println!(); + } + } + } + Err(platform::api::error::PlatformApiError::Unauthorized) => { + eprintln!("Not authenticated. Run: sync-ctl auth login"); + } + Err(e) => { + eprintln!("Failed to list organizations: {}", e); + } + } + Ok(()) + } + OrgCommand::Select { id } => { + let client = PlatformApiClient::new().map_err(|e| { + error::IaCGeneratorError::Config(error::ConfigError::ParsingFailed( + e.to_string(), + )) + })?; + + match client.get_organization(&id).await { + Ok(org) => { + // Create session with org only (clear any project/env selection) + let session = PlatformSession { + project_id: None, + project_name: None, + org_id: Some(org.id.clone()), + org_name: Some(org.name.clone()), + environment_id: None, + environment_name: None, + last_updated: Some(chrono::Utc::now()), + }; + + if let Err(e) = session.save() { + eprintln!("Warning: Failed to save session: {}", e); + } + + println!("โœ“ Selected organization: {} ({})", org.name, org.id); + println!("\nNext: Run 'sync-ctl project list' to see projects"); + } + Err(platform::api::error::PlatformApiError::Unauthorized) => { + eprintln!("Not authenticated. Run: sync-ctl auth login"); + } + Err(platform::api::error::PlatformApiError::NotFound(_)) => { + eprintln!("Organization not found: {}", id); + eprintln!("Run: sync-ctl org list"); + } + Err(e) => { + eprintln!("Failed to select organization: {}", e); + } + } + Ok(()) + } + } + } Commands::Auth { command } => { use auth::credentials; use auth::device_flow; @@ -346,5 +626,13 @@ pub async fn run_command(command: Commands) -> Result<()> { }, } } + Commands::Deploy { .. } => { + // Deploy commands are handled in main.rs directly + unreachable!("Deploy commands should be handled in main.rs") + } + Commands::Env { .. } => { + // Env commands are handled in main.rs directly + unreachable!("Env commands should be handled in main.rs") + } } } diff --git a/src/main.rs b/src/main.rs index ee2dd322..55277160 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,8 +2,8 @@ use clap::Parser; use syncable_cli::{ analyzer::{self, analyze_monorepo, vulnerability::VulnerabilitySeverity}, cli::{ - ChatProvider, Cli, ColorScheme, Commands, DisplayFormat, OutputFormat, SecurityScanMode, - SeverityThreshold, ToolsCommand, + ChatProvider, Cli, ColorScheme, Commands, DisplayFormat, EnvCommand, OutputFormat, + SecurityScanMode, SeverityThreshold, ToolsCommand, }, config, generator, telemetry::{self}, @@ -115,6 +115,10 @@ async fn run() -> syncable_cli::Result<()> { Commands::Optimize { .. } => "optimize", Commands::Chat { .. } => "chat", Commands::Auth { .. } => "auth", + Commands::Project { .. } => "project", + Commands::Org { .. } => "org", + Commands::Env { .. } => "env", + Commands::Deploy { .. } => "deploy", }; log::debug!("Command name: {}", command_name); @@ -687,6 +691,525 @@ async fn run() -> syncable_cli::Result<()> { // Auth commands are handled by lib.rs syncable_cli::run_command(Commands::Auth { command }).await } + Commands::Project { command } => { + // Project commands are handled by lib.rs + syncable_cli::run_command(Commands::Project { command }).await + } + Commands::Org { command } => { + // Org commands are handled by lib.rs + syncable_cli::run_command(Commands::Org { command }).await + } + Commands::Env { command } => { + use syncable_cli::auth::credentials; + use syncable_cli::platform::api::PlatformApiClient; + use syncable_cli::platform::session::PlatformSession; + + // Check authentication + if !credentials::is_authenticated() { + eprintln!("Not logged in. Run `sync-ctl auth login` first."); + process::exit(1); + } + + // Load platform session for org/project context + let session = match PlatformSession::load() { + Ok(s) => s, + Err(_) => { + eprintln!("No project selected. Run `sync-ctl project select` first."); + process::exit(1); + } + }; + + let project_id = match &session.project_id { + Some(p) => p.clone(), + None => { + eprintln!("No project selected. Run `sync-ctl project select` first."); + process::exit(1); + } + }; + + // Create API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + eprintln!("Failed to create API client: {}", e); + process::exit(1); + } + }; + + match command { + EnvCommand::List { format } => { + match client.list_environments(&project_id).await { + Ok(environments) => { + if environments.is_empty() { + println!("No environments found in project."); + println!( + "\nCreate one with: {}", + "sync-ctl deploy new-env".bright_cyan() + ); + } else { + match format { + OutputFormat::Json => { + println!( + "{}", + serde_json::to_string_pretty(&environments).unwrap() + ); + } + OutputFormat::Table => { + println!("\nEnvironments in project:\n"); + for env in &environments { + let selected = session + .environment_id + .as_ref() + .map(|id| id == &env.id) + .unwrap_or(false); + let marker = + if selected { "โ†’ ".green() } else { " ".normal() }; + println!( + "{}{} ({}) - {}", + marker, + env.name.bold(), + env.id.dimmed(), + env.environment_type + ); + } + println!( + "\nSelect with: {}", + "sync-ctl env select ".bright_cyan() + ); + } + } + } + Ok(()) + } + Err(e) => { + eprintln!("Failed to list environments: {}", e); + process::exit(1); + } + } + } + EnvCommand::Select { id } => { + // Verify environment exists (match by ID or name) + match client.list_environments(&project_id).await { + Ok(environments) => { + if let Some(env) = environments + .iter() + .find(|e| e.id == id || e.name.eq_ignore_ascii_case(&id)) + { + // Update session with environment + let new_session = PlatformSession::with_environment( + session.project_id.unwrap(), + session.project_name.unwrap_or_default(), + session.org_id.unwrap_or_default(), + session.org_name.unwrap_or_default(), + env.id.clone(), + env.name.clone(), + ); + + if let Err(e) = new_session.save() { + eprintln!("Failed to save session: {}", e); + process::exit(1); + } + + println!( + "{} Selected environment: {}", + "โœ“".green(), + env.name.bold() + ); + println!("Context: {}", new_session.display_context()); + Ok(()) + } else { + eprintln!("Environment not found: {}", id); + eprintln!("Run `sync-ctl env list` to see available environments."); + process::exit(1); + } + } + Err(e) => { + eprintln!("Failed to list environments: {}", e); + process::exit(1); + } + } + } + } + } + Commands::Deploy { path, command } => { + use syncable_cli::auth::credentials; + use syncable_cli::cli::DeployCommand; + use syncable_cli::platform::api::PlatformApiClient; + use syncable_cli::platform::session::PlatformSession; + use syncable_cli::wizard::{ + create_environment_wizard, run_wizard, select_environment, + EnvironmentCreationResult, EnvironmentSelectionResult, WizardResult, + }; + + // Check authentication + if !credentials::is_authenticated() { + eprintln!("Not logged in. Run `sync-ctl auth login` first."); + process::exit(1); + } + + // Load platform session for org/project context + let session = match PlatformSession::load() { + Ok(s) => s, + Err(_) => { + eprintln!("No project selected. Run `sync-ctl project select` first."); + process::exit(1); + } + }; + + let project_id = match &session.project_id { + Some(p) => p.clone(), + None => { + eprintln!("No project selected. Run `sync-ctl project select` first."); + process::exit(1); + } + }; + + // Create API client + let client = match PlatformApiClient::new() { + Ok(c) => c, + Err(e) => { + eprintln!("Failed to create API client: {}", e); + process::exit(1); + } + }; + + match command { + Some(DeployCommand::NewEnv) => { + // Run environment creation wizard + match create_environment_wizard(&client, &project_id).await { + EnvironmentCreationResult::Created(env) => { + // Optionally update session with the new environment + let new_session = PlatformSession::with_environment( + session.project_id.unwrap(), + session.project_name.unwrap_or_default(), + session.org_id.unwrap_or_default(), + session.org_name.unwrap_or_default(), + env.id.clone(), + env.name.clone(), + ); + + if let Err(e) = new_session.save() { + eprintln!("Warning: Failed to save session: {}", e); + } + + println!( + "\nContext updated: {}", + new_session.display_context().bright_cyan() + ); + println!( + "\nNext: Run {} to deploy a service", + "sync-ctl deploy".bright_cyan() + ); + Ok(()) + } + EnvironmentCreationResult::Cancelled => { + println!("{}", "Environment creation cancelled.".dimmed()); + Ok(()) + } + EnvironmentCreationResult::Error(e) => { + eprintln!("Error: {}", e); + process::exit(1); + } + } + } + Some(DeployCommand::Status { task_id, watch }) => { + // Check deployment status + use std::time::Duration; + use tokio::time::sleep; + + loop { + match client.get_deployment_status(&task_id).await { + Ok(status) => { + // Clear screen if watching + if watch { + print!("\x1B[2J\x1B[1;1H"); + } + + println!(); + println!( + "{}", + "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" + .bright_blue() + ); + println!( + "{}", + format!(" Deployment Status: {}", task_id).bold() + ); + println!( + "{}", + "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" + .bright_blue() + ); + println!(); + + // Status with color + let status_color = match status.status.as_str() { + "completed" => status.status.green(), + "failed" => status.status.red(), + _ => status.status.yellow(), + }; + println!(" Task Status: {}", status_color); + + // Overall status with color + let overall_color = match status.overall_status.as_str() { + "healthy" => status.overall_status.green(), + "failed" => status.overall_status.red(), + _ => status.overall_status.yellow(), + }; + println!(" Overall Status: {}", overall_color); + println!(" Progress: {}%", status.progress); + + if let Some(step) = &status.current_step { + println!(" Current Step: {}", step); + } + + if !status.overall_message.is_empty() { + println!(" Message: {}", status.overall_message); + } + + if let Some(error) = &status.error { + println!(); + println!(" {} {}", "Error:".red().bold(), error); + } + + println!(); + + // Check if we should stop watching + if !watch + || status.status == "completed" + || status.status == "failed" + { + if status.status == "completed" + && status.overall_status == "healthy" + { + println!( + " {} Deployment completed successfully!", + "โœ“".green() + ); + } else if status.status == "failed" + || status.overall_status == "failed" + { + println!(" {} Deployment failed.", "โœ—".red()); + process::exit(1); + } + break; + } + + // Wait before next poll + println!( + " {}", + "Watching... (Ctrl+C to stop)".dimmed() + ); + sleep(Duration::from_secs(5)).await; + } + Err(e) => { + eprintln!("Failed to get deployment status: {}", e); + process::exit(1); + } + } + } + Ok(()) + } + Some(DeployCommand::Wizard { path: wizard_path }) => { + // Always ask for environment selection + let (environment_id, _session) = match select_environment(&client, &project_id).await { + EnvironmentSelectionResult::Selected(env) => { + // Update session with selected environment + let new_session = PlatformSession::with_environment( + session.project_id.clone().unwrap(), + session.project_name.clone().unwrap_or_default(), + session.org_id.clone().unwrap_or_default(), + session.org_name.clone().unwrap_or_default(), + env.id.clone(), + env.name.clone(), + ); + let _ = new_session.save(); + (env.id, new_session) + } + EnvironmentSelectionResult::CreateNew => { + // Run environment creation wizard + match create_environment_wizard(&client, &project_id).await { + EnvironmentCreationResult::Created(env) => { + let new_session = PlatformSession::with_environment( + session.project_id.clone().unwrap(), + session.project_name.clone().unwrap_or_default(), + session.org_id.clone().unwrap_or_default(), + session.org_name.clone().unwrap_or_default(), + env.id.clone(), + env.name.clone(), + ); + let _ = new_session.save(); + (env.id, new_session) + } + EnvironmentCreationResult::Cancelled => { + println!("{}", "Environment creation cancelled.".dimmed()); + return Ok(()); + } + EnvironmentCreationResult::Error(e) => { + eprintln!("Error creating environment: {}", e); + process::exit(1); + } + } + } + EnvironmentSelectionResult::Cancelled => { + println!("{}", "Wizard cancelled.".dimmed()); + return Ok(()); + } + EnvironmentSelectionResult::Error(e) => { + eprintln!("Error: {}", e); + process::exit(1); + } + }; + + // Run deployment wizard + match run_wizard(&client, &project_id, &environment_id, &wizard_path).await { + WizardResult::Deployed(_info) => { + // Deployment was triggered successfully + // The orchestrator already printed success message with task ID + Ok(()) + } + WizardResult::Success(config) => { + println!("{}", "Deployment configuration created!".green().bold()); + if !config.is_complete() { + println!( + "{}", + format!("Missing fields: {:?}", config.missing_fields()) + .yellow() + ); + } + println!( + "\n{}", + "Next: Run deployment with created config".dimmed() + ); + Ok(()) + } + WizardResult::StartAgent(prompt) => { + println!( + "\n{} Starting agent to help create Dockerfile...\n", + "โ†’".cyan() + ); + // Transition to chat mode with the prompt + syncable_cli::run_command(Commands::Chat { + path: wizard_path, + provider: ChatProvider::Auto, + model: None, + query: Some(prompt), + resume: None, + list_sessions: false, + }) + .await + } + WizardResult::Cancelled => { + println!("{}", "Wizard cancelled.".dimmed()); + Ok(()) + } + WizardResult::Error(e) => { + eprintln!("Error: {}", e); + process::exit(1); + } + } + } + None => { + // Always ask for environment selection + let (environment_id, _session) = match select_environment(&client, &project_id).await { + EnvironmentSelectionResult::Selected(env) => { + // Update session with selected environment + let new_session = PlatformSession::with_environment( + session.project_id.clone().unwrap(), + session.project_name.clone().unwrap_or_default(), + session.org_id.clone().unwrap_or_default(), + session.org_name.clone().unwrap_or_default(), + env.id.clone(), + env.name.clone(), + ); + let _ = new_session.save(); + (env.id, new_session) + } + EnvironmentSelectionResult::CreateNew => { + // Run environment creation wizard + match create_environment_wizard(&client, &project_id).await { + EnvironmentCreationResult::Created(env) => { + let new_session = PlatformSession::with_environment( + session.project_id.clone().unwrap(), + session.project_name.clone().unwrap_or_default(), + session.org_id.clone().unwrap_or_default(), + session.org_name.clone().unwrap_or_default(), + env.id.clone(), + env.name.clone(), + ); + let _ = new_session.save(); + (env.id, new_session) + } + EnvironmentCreationResult::Cancelled => { + println!("{}", "Environment creation cancelled.".dimmed()); + return Ok(()); + } + EnvironmentCreationResult::Error(e) => { + eprintln!("Error creating environment: {}", e); + process::exit(1); + } + } + } + EnvironmentSelectionResult::Cancelled => { + println!("{}", "Wizard cancelled.".dimmed()); + return Ok(()); + } + EnvironmentSelectionResult::Error(e) => { + eprintln!("Error: {}", e); + process::exit(1); + } + }; + + // Run deployment wizard with top-level path + match run_wizard(&client, &project_id, &environment_id, &path).await { + WizardResult::Deployed(_info) => { + // Deployment was triggered successfully + // The orchestrator already printed success message with task ID + Ok(()) + } + WizardResult::Success(config) => { + println!("{}", "Deployment configuration created!".green().bold()); + if !config.is_complete() { + println!( + "{}", + format!("Missing fields: {:?}", config.missing_fields()) + .yellow() + ); + } + println!( + "\n{}", + "Next: Run deployment with created config".dimmed() + ); + Ok(()) + } + WizardResult::StartAgent(prompt) => { + println!( + "\n{} Starting agent to help create Dockerfile...\n", + "โ†’".cyan() + ); + // Transition to chat mode with the prompt + syncable_cli::run_command(Commands::Chat { + path: path.clone(), + provider: ChatProvider::Auto, + model: None, + query: Some(prompt), + resume: None, + list_sessions: false, + }) + .await + } + WizardResult::Cancelled => { + println!("{}", "Wizard cancelled.".dimmed()); + Ok(()) + } + WizardResult::Error(e) => { + eprintln!("Error: {}", e); + process::exit(1); + } + } + } + } + } }; // Flush telemetry events before exiting diff --git a/src/platform/api/client.rs b/src/platform/api/client.rs new file mode 100644 index 00000000..f3bae4fc --- /dev/null +++ b/src/platform/api/client.rs @@ -0,0 +1,1066 @@ +//! Platform API client for Syncable +//! +//! Provides authenticated access to the Syncable Platform API for managing +//! organizations, projects, and other platform resources. + +use super::error::{PlatformApiError, Result}; +use super::types::{ + ApiErrorResponse, ArtifactRegistry, AvailableRepositoriesResponse, CloudCredentialStatus, + CloudProvider, ClusterEntity, ConnectRepositoryRequest, ConnectRepositoryResponse, + CreateDeploymentConfigRequest, CreateDeploymentConfigResponse, CreateRegistryRequest, + CreateRegistryResponse, DeploymentConfig, DeploymentTaskStatus, Environment, GenericResponse, + GetLogsResponse, GitHubInstallationUrlResponse, GitHubInstallationsResponse, + InitializeGitOpsRequest, InitializeGitOpsResponse, Organization, PaginatedDeployments, Project, + ProjectRepositoriesResponse, RegistryTaskStatus, TriggerDeploymentRequest, + TriggerDeploymentResponse, UserProfile, +}; +use crate::auth::credentials; +use reqwest::Client; +use serde::de::DeserializeOwned; +use serde::Serialize; +use urlencoding; +use std::time::Duration; + +/// Production API URL +const SYNCABLE_API_URL_PROD: &str = "https://syncable.dev"; +/// Development API URL +const SYNCABLE_API_URL_DEV: &str = "http://localhost:4000"; + +/// User agent for API requests +const USER_AGENT: &str = concat!("syncable-cli/", env!("CARGO_PKG_VERSION")); + +/// Maximum number of retry attempts for transient failures +const MAX_RETRIES: u32 = 3; +/// Initial backoff delay in milliseconds +const INITIAL_BACKOFF_MS: u64 = 500; +/// Maximum backoff delay in milliseconds +const MAX_BACKOFF_MS: u64 = 5000; + +/// Check if an error is retryable (transient failure) +fn is_retryable_error(error: &PlatformApiError) -> bool { + matches!( + error, + PlatformApiError::HttpError(_) // Network errors, timeouts + | PlatformApiError::RateLimited // 429 - rate limited + | PlatformApiError::ServerError { .. } // 5xx - server errors + | PlatformApiError::ConnectionFailed // Connection failures + ) +} + +/// Client for interacting with the Syncable Platform API +pub struct PlatformApiClient { + /// HTTP client with configured timeout and headers + http_client: Client, + /// Base API URL + api_url: String, +} + +impl PlatformApiClient { + /// Create a new Platform API client using the default API URL + /// + /// Uses `SYNCABLE_ENV=development` to switch to local development server. + pub fn new() -> Result { + let api_url = get_api_url(); + Self::with_url(api_url) + } + + /// Create a new Platform API client with a custom API URL + pub fn with_url(api_url: impl Into) -> Result { + let http_client = Client::builder() + .timeout(Duration::from_secs(30)) + .user_agent(USER_AGENT) + .build() + .map_err(PlatformApiError::HttpError)?; + + Ok(Self { + http_client, + api_url: api_url.into(), + }) + } + + /// Get the configured API URL + pub fn api_url(&self) -> &str { + &self.api_url + } + + /// Get the authentication token from stored credentials + fn get_auth_token() -> Result { + credentials::get_access_token().ok_or(PlatformApiError::Unauthorized) + } + + /// Make an authenticated GET request with automatic retry for transient failures + async fn get(&self, path: &str) -> Result { + let token = Self::get_auth_token()?; + let url = format!("{}{}", self.api_url, path); + + let mut last_error = None; + let mut backoff_ms = INITIAL_BACKOFF_MS; + + for attempt in 0..=MAX_RETRIES { + let result = self + .http_client + .get(&url) + .bearer_auth(&token) + .send() + .await; + + match result { + Ok(response) => { + match self.handle_response(response).await { + Ok(data) => return Ok(data), + Err(e) if is_retryable_error(&e) && attempt < MAX_RETRIES => { + eprintln!( + "Request failed (attempt {}/{}), retrying in {}ms...", + attempt + 1, + MAX_RETRIES + 1, + backoff_ms + ); + last_error = Some(e); + tokio::time::sleep(Duration::from_millis(backoff_ms)).await; + backoff_ms = (backoff_ms * 2).min(MAX_BACKOFF_MS); + } + Err(e) => return Err(e), + } + } + Err(e) => { + let platform_error = PlatformApiError::HttpError(e); + if is_retryable_error(&platform_error) && attempt < MAX_RETRIES { + eprintln!( + "Network error (attempt {}/{}), retrying in {}ms...", + attempt + 1, + MAX_RETRIES + 1, + backoff_ms + ); + last_error = Some(platform_error); + tokio::time::sleep(Duration::from_millis(backoff_ms)).await; + backoff_ms = (backoff_ms * 2).min(MAX_BACKOFF_MS); + } else { + return Err(platform_error); + } + } + } + } + + Err(last_error.expect("retry loop should have set last_error")) + } + + /// Make an authenticated GET request that returns Option + /// Returns None for 404 responses instead of an error + /// Includes retry logic for transient failures + async fn get_optional(&self, path: &str) -> Result> { + let token = Self::get_auth_token()?; + let url = format!("{}{}", self.api_url, path); + + let mut last_error = None; + let mut backoff_ms = INITIAL_BACKOFF_MS; + + for attempt in 0..=MAX_RETRIES { + let result = self + .http_client + .get(&url) + .bearer_auth(&token) + .send() + .await; + + match result { + Ok(response) => { + let status = response.status(); + + if status.is_success() { + let result = response + .json::() + .await + .map_err(|e| PlatformApiError::ParseError(e.to_string()))?; + return Ok(Some(result)); + } else if status.as_u16() == 404 { + return Ok(None); + } else { + let status_code = status.as_u16(); + let error_body = response.text().await.unwrap_or_default(); + let error_message = serde_json::from_str::(&error_body) + .map(|e| e.get_message()) + .unwrap_or_else(|_| error_body.clone()); + + let error = match status_code { + 401 => PlatformApiError::Unauthorized, + 403 => PlatformApiError::PermissionDenied(error_message), + 429 => PlatformApiError::RateLimited, + 500..=599 => PlatformApiError::ServerError { + status: status_code, + message: error_message, + }, + _ => PlatformApiError::ApiError { + status: status_code, + message: error_message, + }, + }; + + if is_retryable_error(&error) && attempt < MAX_RETRIES { + eprintln!( + "Request failed (attempt {}/{}), retrying in {}ms...", + attempt + 1, + MAX_RETRIES + 1, + backoff_ms + ); + last_error = Some(error); + tokio::time::sleep(Duration::from_millis(backoff_ms)).await; + backoff_ms = (backoff_ms * 2).min(MAX_BACKOFF_MS); + } else { + return Err(error); + } + } + } + Err(e) => { + let platform_error = PlatformApiError::HttpError(e); + if is_retryable_error(&platform_error) && attempt < MAX_RETRIES { + eprintln!( + "Network error (attempt {}/{}), retrying in {}ms...", + attempt + 1, + MAX_RETRIES + 1, + backoff_ms + ); + last_error = Some(platform_error); + tokio::time::sleep(Duration::from_millis(backoff_ms)).await; + backoff_ms = (backoff_ms * 2).min(MAX_BACKOFF_MS); + } else { + return Err(platform_error); + } + } + } + } + + Err(last_error.expect("retry loop should have set last_error")) + } + + /// Make an authenticated POST request with a JSON body + /// Only retries on network errors (before request completes), not on server responses, + /// since POST requests may not be idempotent. + async fn post(&self, path: &str, body: &B) -> Result { + let token = Self::get_auth_token()?; + let url = format!("{}{}", self.api_url, path); + + let mut last_error = None; + let mut backoff_ms = INITIAL_BACKOFF_MS; + + for attempt in 0..=MAX_RETRIES { + let result = self + .http_client + .post(&url) + .bearer_auth(&token) + .json(body) + .send() + .await; + + match result { + Ok(response) => { + // Got a response - don't retry POST even on server errors + return self.handle_response(response).await; + } + Err(e) => { + // Network error before request completed - safe to retry + let platform_error = PlatformApiError::HttpError(e); + if attempt < MAX_RETRIES { + eprintln!( + "Network error (attempt {}/{}), retrying in {}ms...", + attempt + 1, + MAX_RETRIES + 1, + backoff_ms + ); + last_error = Some(platform_error); + tokio::time::sleep(Duration::from_millis(backoff_ms)).await; + backoff_ms = (backoff_ms * 2).min(MAX_BACKOFF_MS); + } else { + return Err(platform_error); + } + } + } + } + + Err(last_error.expect("retry loop should have set last_error")) + } + + /// Handle the HTTP response, converting errors appropriately + async fn handle_response( + &self, + response: reqwest::Response, + ) -> Result { + let status = response.status(); + + if status.is_success() { + // Try to parse the response body + response + .json::() + .await + .map_err(|e| PlatformApiError::ParseError(e.to_string())) + } else { + // Try to parse error response for better error messages + let status_code = status.as_u16(); + let error_body = response.text().await.unwrap_or_default(); + let error_message = serde_json::from_str::(&error_body) + .map(|e| e.get_message()) + .unwrap_or_else(|_| error_body.clone()); + + match status_code { + 401 => Err(PlatformApiError::Unauthorized), + 403 => Err(PlatformApiError::PermissionDenied(error_message)), + 404 => Err(PlatformApiError::NotFound(error_message)), + 429 => Err(PlatformApiError::RateLimited), + 500..=599 => Err(PlatformApiError::ServerError { + status: status_code, + message: error_message, + }), + _ => Err(PlatformApiError::ApiError { + status: status_code, + message: error_message, + }), + } + } + } + + // ========================================================================= + // User API methods + // ========================================================================= + + /// Get the current authenticated user's profile + /// + /// Endpoint: GET /api/users/me + pub async fn get_current_user(&self) -> Result { + self.get("/api/users/me").await + } + + // ========================================================================= + // Organization API methods + // ========================================================================= + + /// List organizations the authenticated user belongs to + /// + /// Endpoint: GET /api/organizations/attended-by-user + pub async fn list_organizations(&self) -> Result> { + let response: GenericResponse> = + self.get("/api/organizations/attended-by-user").await?; + Ok(response.data) + } + + /// Get an organization by ID + /// + /// Endpoint: GET /api/organizations/:id + pub async fn get_organization(&self, id: &str) -> Result { + let response: GenericResponse = + self.get(&format!("/api/organizations/{}", id)).await?; + Ok(response.data) + } + + // ========================================================================= + // Project API methods + // ========================================================================= + + /// List projects in an organization + /// + /// Endpoint: GET /api/projects/organization/:organizationId + pub async fn list_projects(&self, org_id: &str) -> Result> { + let response: GenericResponse> = self + .get(&format!("/api/projects/organization/{}", org_id)) + .await?; + Ok(response.data) + } + + /// Get a project by ID + /// + /// Endpoint: GET /api/projects/:id + pub async fn get_project(&self, id: &str) -> Result { + let response: GenericResponse = + self.get(&format!("/api/projects/{}", id)).await?; + Ok(response.data) + } + + /// Create a new project in an organization + /// + /// Endpoint: POST /api/projects + /// + /// Note: This first fetches the current user to get the creator_id. + pub async fn create_project( + &self, + org_id: &str, + name: &str, + description: &str, + ) -> Result { + // Get current user to use as creator + let user = self.get_current_user().await?; + + let request = serde_json::json!({ + "creatorId": user.id, + "organizationId": org_id, + "name": name, + "description": description, + "context": "" + }); + + let response: GenericResponse = self.post("/api/projects", &request).await?; + Ok(response.data) + } + + // ========================================================================= + // Repository API methods + // ========================================================================= + + /// List repositories connected to a project + /// + /// Returns all GitHub/GitLab repositories that have been connected to the project. + /// Use this to get repository info needed for deployment configuration. + /// + /// Endpoint: GET /api/github/projects/:projectId/repositories + pub async fn list_project_repositories( + &self, + project_id: &str, + ) -> Result { + let response: GenericResponse = self + .get(&format!( + "/api/github/projects/{}/repositories", + project_id + )) + .await?; + Ok(response.data) + } + + // ========================================================================= + // GitHub Integration API methods + // ========================================================================= + + /// List GitHub App installations for the organization + /// + /// Returns all GitHub App installations accessible to the authenticated user's organization. + /// Use this to find which GitHub accounts are connected. + /// + /// Endpoint: GET /api/github/installations + pub async fn list_github_installations(&self) -> Result { + // API returns { installations: [...] } directly (no GenericResponse wrapper) + self.get("/api/github/installations").await + } + + /// Get the URL to install the GitHub App + /// + /// Returns the URL users should visit to install the Syncable GitHub App. + /// Use this when no installations are found. + /// + /// Endpoint: GET /api/github/installation/url + pub async fn get_github_installation_url(&self) -> Result { + self.get("/api/github/installation/url").await + } + + /// List repositories available for connection + /// + /// Returns repositories accessible through GitHub App installations, + /// including which ones are already connected to the project. + /// + /// Endpoint: GET /api/github/repositories/available + pub async fn list_available_repositories( + &self, + project_id: Option<&str>, + search: Option<&str>, + page: Option, + ) -> Result { + let mut path = "/api/github/repositories/available".to_string(); + let mut params = vec![]; + + if let Some(pid) = project_id { + params.push(format!("projectId={}", pid)); + } + if let Some(s) = search { + params.push(format!("search={}", urlencoding::encode(s))); + } + if let Some(p) = page { + params.push(format!("page={}", p)); + } + + if !params.is_empty() { + path = format!("{}?{}", path, params.join("&")); + } + + let response: GenericResponse = self.get(&path).await?; + Ok(response.data) + } + + /// Connect a repository to a project + /// + /// Connects a GitHub repository to a project, allowing deployments from that repo. + /// + /// Endpoint: POST /api/github/projects/repositories/connect + pub async fn connect_repository( + &self, + request: &ConnectRepositoryRequest, + ) -> Result { + let response: GenericResponse = self + .post("/api/github/projects/repositories/connect", request) + .await?; + Ok(response.data) + } + + /// Initialize GitOps repository for a project + /// + /// Ensures a GitOps infrastructure repository exists for the project. + /// If it doesn't exist, automatically creates it using the GitHub App installation. + /// + /// Endpoint: POST /api/projects/:projectId/gitops/initialize + pub async fn initialize_gitops( + &self, + project_id: &str, + installation_id: Option, + ) -> Result { + let request = InitializeGitOpsRequest { installation_id }; + let response: GenericResponse = self + .post( + &format!("/api/projects/{}/gitops/initialize", project_id), + &request, + ) + .await?; + Ok(response.data) + } + + // ========================================================================= + // Environment API methods + // ========================================================================= + + /// List environments for a project + /// + /// Returns all environments (deployment targets) defined for the project. + /// + /// Endpoint: GET /api/projects/:projectId/environments + pub async fn list_environments(&self, project_id: &str) -> Result> { + let response: GenericResponse> = self + .get(&format!("/api/projects/{}/environments", project_id)) + .await?; + Ok(response.data) + } + + /// Create a new environment for a project + /// + /// Creates an environment with the specified type (cluster or cloud). + /// For cluster environments, a cluster_id is required. + /// + /// Endpoint: POST /api/environments + /// + /// Note: environment_type should be "cluster" (for K8s) or "cloud" (for Cloud Runner) + pub async fn create_environment( + &self, + project_id: &str, + name: &str, + environment_type: &str, + cluster_id: Option<&str>, + ) -> Result { + let mut request = serde_json::json!({ + "projectId": project_id, + "name": name, + "environmentType": environment_type, + }); + + if let Some(cid) = cluster_id { + request["clusterId"] = serde_json::json!(cid); + } + + let response: GenericResponse = + self.post("/api/environments", &request).await?; + Ok(response.data) + } + + // ========================================================================= + // Cloud Credentials API methods + // ========================================================================= + + /// Check if a cloud provider is connected to a project + /// + /// Returns `Some(status)` if the provider is connected, `None` if not connected. + /// + /// SECURITY NOTE: This method only returns connection STATUS, never actual credentials. + /// The agent should never have access to OAuth tokens, API keys, or other secrets. + /// + /// Uses: GET /api/cloud-credentials?projectId=xxx (lists all, then filters) + pub async fn check_provider_connection( + &self, + provider: &CloudProvider, + project_id: &str, + ) -> Result> { + // Use the list endpoint (which works) and filter by provider + // The single-provider endpoint may not exist on the backend + let all_credentials = self.list_cloud_credentials_for_project(project_id).await?; + let matching = all_credentials + .into_iter() + .find(|c| c.provider.eq_ignore_ascii_case(provider.as_str())); + Ok(matching) + } + + /// List all cloud credentials for a project + /// + /// Returns all connected cloud providers for the project. + /// + /// SECURITY NOTE: This method only returns connection STATUS, never actual credentials. + /// + /// Endpoint: GET /api/cloud-credentials?projectId=xxx + pub async fn list_cloud_credentials_for_project( + &self, + project_id: &str, + ) -> Result> { + let response: GenericResponse> = self + .get(&format!("/api/cloud-credentials?projectId={}", project_id)) + .await?; + Ok(response.data) + } + + // ========================================================================= + // Deployment API methods + // ========================================================================= + + /// List deployment configurations for a project + /// + /// Returns all deployment configs associated with the project, including + /// service name, branch, target type, and auto-deploy settings. + /// + /// Endpoint: GET /api/projects/:projectId/deployment-configs + pub async fn list_deployment_configs(&self, project_id: &str) -> Result> { + let response: GenericResponse> = self + .get(&format!("/api/projects/{}/deployment-configs", project_id)) + .await?; + Ok(response.data) + } + + /// Create a new deployment configuration + /// + /// Creates a deployment config for a service. Requires repository integration + /// to be set up first (GitHub/GitLab). The project_id should be included in the request body. + /// + /// Returns the created/updated deployment config. The API also returns a `was_updated` + /// flag indicating whether this was an update to an existing config. + /// + /// Endpoint: POST /api/deployment-configs + pub async fn create_deployment_config( + &self, + request: &CreateDeploymentConfigRequest, + ) -> Result { + // Log the full request for debugging + if let Ok(json) = serde_json::to_string_pretty(request) { + log::debug!("Creating deployment config with request:\n{}", json); + } + + let response: GenericResponse = + self.post("/api/deployment-configs", request).await?; + + log::debug!( + "Deployment config created: id={}, serviceName={}, wasUpdated={}", + response.data.config.id, + response.data.config.service_name, + response.data.was_updated + ); + + Ok(response.data.config) + } + + /// Trigger a deployment using a deployment config + /// + /// Starts a new deployment for the specified config. Optionally specify + /// a commit SHA to deploy a specific version. + /// + /// Endpoint: POST /api/deployment-configs/deploy + pub async fn trigger_deployment( + &self, + request: &TriggerDeploymentRequest, + ) -> Result { + log::debug!( + "Triggering deployment: POST /api/deployment-configs/deploy with projectId={}, configId={}", + request.project_id, + request.config_id + ); + + // API returns { data: TriggerDeploymentResponse } + let response: GenericResponse = + self.post("/api/deployment-configs/deploy", request).await?; + + log::debug!( + "Deployment triggered successfully: backstageTaskId={}, status={}", + response.data.backstage_task_id, + response.data.status + ); + + Ok(response.data) + } + + /// Get deployment task status + /// + /// Returns the current status of a deployment task, including progress + /// percentage, current step, and overall status. + /// + /// Endpoint: GET /api/deployments/task/:taskId + pub async fn get_deployment_status(&self, task_id: &str) -> Result { + self.get(&format!("/api/deployments/task/{}", task_id)) + .await + } + + /// List deployments for a project + /// + /// Returns a paginated list of deployments for the project, sorted by + /// creation time (most recent first). + /// + /// Endpoint: GET /api/deployments/project/:projectId + pub async fn list_deployments( + &self, + project_id: &str, + limit: Option, + ) -> Result { + let path = match limit { + Some(l) => format!("/api/deployments/project/{}?limit={}", project_id, l), + None => format!("/api/deployments/project/{}", project_id), + }; + self.get(&path).await + } + + /// Get container logs for a deployed service + /// + /// Returns recent logs from the service's containers. Supports time filtering + /// and line limits for efficient log retrieval. + /// + /// # Arguments + /// + /// * `service_id` - The service/deployment ID (from list_deployments) + /// * `start` - Optional ISO timestamp to filter logs from + /// * `end` - Optional ISO timestamp to filter logs until + /// * `limit` - Optional max number of log lines (default: 100) + /// + /// Endpoint: GET /api/deployments/services/:serviceId/logs + pub async fn get_service_logs( + &self, + service_id: &str, + start: Option<&str>, + end: Option<&str>, + limit: Option, + ) -> Result { + let mut query_params = Vec::new(); + + if let Some(s) = start { + query_params.push(format!("start={}", s)); + } + if let Some(e) = end { + query_params.push(format!("end={}", e)); + } + if let Some(l) = limit { + query_params.push(format!("limit={}", l)); + } + + let path = if query_params.is_empty() { + format!("/api/deployments/services/{}/logs", service_id) + } else { + format!( + "/api/deployments/services/{}/logs?{}", + service_id, + query_params.join("&") + ) + }; + + self.get(&path).await + } + + // ========================================================================= + // Cluster API methods + // ========================================================================= + + /// List all clusters for a project + /// + /// Returns all K8s clusters available for deployments in this project. + /// + /// Endpoint: GET /api/clusters/project/:projectId + pub async fn list_clusters_for_project(&self, project_id: &str) -> Result> { + let response: GenericResponse> = self + .get(&format!("/api/clusters/project/{}", project_id)) + .await?; + Ok(response.data) + } + + /// Get a specific cluster by ID + /// + /// Returns cluster details or None if not found. + /// + /// Endpoint: GET /api/clusters/:clusterId + pub async fn get_cluster(&self, cluster_id: &str) -> Result> { + // API wraps responses in { "data": ... }, so we need GenericResponse + let response: Option> = self + .get_optional(&format!("/api/clusters/{}", cluster_id)) + .await?; + Ok(response.map(|r| r.data)) + } + + // ========================================================================= + // Artifact Registry API methods + // ========================================================================= + + /// List all artifact registries for a project + /// + /// Returns all container registries available for image storage in this project. + /// + /// Endpoint: GET /api/projects/:projectId/artifact-registries + pub async fn list_registries_for_project( + &self, + project_id: &str, + ) -> Result> { + let response: GenericResponse> = self + .get(&format!("/api/projects/{}/artifact-registries", project_id)) + .await?; + Ok(response.data) + } + + /// List only ready artifact registries for a project + /// + /// Returns registries that are ready to receive image pushes. + /// Use this for deployment wizard to show only usable registries. + /// + /// Endpoint: GET /api/projects/:projectId/artifact-registries/ready + pub async fn list_ready_registries_for_project( + &self, + project_id: &str, + ) -> Result> { + let response: GenericResponse> = self + .get(&format!( + "/api/projects/{}/artifact-registries/ready", + project_id + )) + .await?; + Ok(response.data) + } + + /// Provision a new artifact registry + /// + /// Starts async provisioning via Backstage scaffolder. + /// Returns task ID for polling status. + /// + /// Endpoint: POST /api/projects/:projectId/artifact-registries + pub async fn create_registry( + &self, + project_id: &str, + request: &CreateRegistryRequest, + ) -> Result { + self.post( + &format!("/api/projects/{}/artifact-registries", project_id), + request, + ) + .await + } + + /// Get registry provisioning task status + /// + /// Poll this endpoint to check provisioning progress. + /// + /// Endpoint: GET /api/artifact-registries/task/:taskId + pub async fn get_registry_task_status(&self, task_id: &str) -> Result { + self.get(&format!("/api/artifact-registries/task/{}", task_id)) + .await + } + + // ========================================================================= + // Health Check API methods + // ========================================================================= + + /// Check if the API is reachable (quick health check) + /// + /// Uses a shorter timeout (5s) for quick connectivity verification. + /// This method does NOT require authentication. + /// + /// Returns `Ok(())` if API is reachable, `Err(ConnectionFailed)` otherwise. + pub async fn check_connection(&self) -> Result<()> { + // Use a shorter timeout for health checks + let health_client = Client::builder() + .timeout(Duration::from_secs(5)) + .user_agent(USER_AGENT) + .build() + .map_err(PlatformApiError::HttpError)?; + + let url = format!("{}/health", self.api_url); + + match health_client.get(&url).send().await { + Ok(response) => { + if response.status().is_success() { + Ok(()) + } else { + Err(PlatformApiError::ConnectionFailed) + } + } + Err(_) => Err(PlatformApiError::ConnectionFailed), + } + } +} + +/// Get the API URL based on environment +fn get_api_url() -> &'static str { + if std::env::var("SYNCABLE_ENV").as_deref() == Ok("development") { + SYNCABLE_API_URL_DEV + } else { + SYNCABLE_API_URL_PROD + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_client_construction() { + let client = PlatformApiClient::with_url("https://example.com").unwrap(); + assert_eq!(client.api_url(), "https://example.com"); + } + + #[test] + fn test_url_building() { + let client = PlatformApiClient::with_url("https://api.example.com").unwrap(); + + // Verify the base URL is stored correctly + assert_eq!(client.api_url(), "https://api.example.com"); + + // Test path concatenation logic (implicitly tested through api_url) + let expected_path = format!("{}/api/organizations/123", client.api_url()); + assert_eq!(expected_path, "https://api.example.com/api/organizations/123"); + } + + #[test] + fn test_error_type_creation() { + // Test that error types can be created correctly + let unauthorized = PlatformApiError::Unauthorized; + assert!(unauthorized.to_string().contains("Not authenticated")); + + let not_found = PlatformApiError::NotFound("Resource not found".to_string()); + assert!(not_found.to_string().contains("Not found")); + + let api_error = PlatformApiError::ApiError { + status: 400, + message: "Bad request".to_string(), + }; + assert!(api_error.to_string().contains("400")); + assert!(api_error.to_string().contains("Bad request")); + + let permission_denied = + PlatformApiError::PermissionDenied("Access denied".to_string()); + assert!(permission_denied.to_string().contains("Permission denied")); + + let rate_limited = PlatformApiError::RateLimited; + assert!(rate_limited.to_string().contains("Rate limit")); + + let server_error = PlatformApiError::ServerError { + status: 500, + message: "Internal server error".to_string(), + }; + assert!(server_error.to_string().contains("500")); + } + + #[test] + fn test_api_url_constants() { + // Test that our URL constants are valid + assert!(SYNCABLE_API_URL_PROD.starts_with("https://")); + assert!(SYNCABLE_API_URL_DEV.starts_with("http://")); + } + + #[test] + fn test_user_agent() { + // Verify user agent contains version + assert!(USER_AGENT.starts_with("syncable-cli/")); + } + + #[test] + fn test_parse_error_creation() { + let error = PlatformApiError::ParseError("invalid json".to_string()); + assert!(error.to_string().contains("parse")); + assert!(error.to_string().contains("invalid json")); + } + + #[test] + fn test_http_error_conversion() { + // Test that reqwest errors can be converted + // This is a compile-time check via the From trait + let _: fn(reqwest::Error) -> PlatformApiError = PlatformApiError::from; + } + + #[test] + fn test_provider_connection_path() { + // Test that the API path is built correctly + let provider = CloudProvider::Gcp; + let project_id = "proj-123"; + let expected_path = format!( + "/api/cloud-credentials/provider/{}?projectId={}", + provider.as_str(), + project_id + ); + assert_eq!(expected_path, "/api/cloud-credentials/provider/gcp?projectId=proj-123"); + } + + #[test] + fn test_service_logs_path_no_params() { + // Test logs path without query params + let service_id = "svc-123"; + let path = format!("/api/deployments/services/{}/logs", service_id); + assert_eq!(path, "/api/deployments/services/svc-123/logs"); + } + + #[test] + fn test_service_logs_path_with_params() { + // Test logs path with query params + let service_id = "svc-123"; + let mut query_params = Vec::new(); + query_params.push("start=2024-01-01T00:00:00Z".to_string()); + query_params.push("limit=50".to_string()); + let path = format!( + "/api/deployments/services/{}/logs?{}", + service_id, + query_params.join("&") + ); + assert_eq!(path, "/api/deployments/services/svc-123/logs?start=2024-01-01T00:00:00Z&limit=50"); + } + + #[test] + fn test_list_environments_path() { + // Test that the API path is built correctly + let project_id = "proj-123"; + let path = format!("/api/projects/{}/environments", project_id); + assert_eq!(path, "/api/projects/proj-123/environments"); + } + + #[test] + fn test_create_environment_request() { + // Test that the request JSON is built correctly + let project_id = "proj-123"; + let name = "production"; + let environment_type = "cluster"; + let cluster_id = Some("cluster-456"); + + let mut request = serde_json::json!({ + "projectId": project_id, + "name": name, + "environmentType": environment_type, + }); + + if let Some(cid) = cluster_id { + request["clusterId"] = serde_json::json!(cid); + } + + let json_str = request.to_string(); + assert!(json_str.contains("\"projectId\":\"proj-123\"")); + assert!(json_str.contains("\"name\":\"production\"")); + assert!(json_str.contains("\"environmentType\":\"cluster\"")); + assert!(json_str.contains("\"clusterId\":\"cluster-456\"")); + } + + #[test] + fn test_create_environment_request_cloud() { + // Test request without cluster_id (cloud runner) + let project_id = "proj-123"; + let name = "staging"; + let environment_type = "cloud"; + let cluster_id: Option<&str> = None; + + let mut request = serde_json::json!({ + "projectId": project_id, + "name": name, + "environmentType": environment_type, + }); + + if let Some(cid) = cluster_id { + request["clusterId"] = serde_json::json!(cid); + } + + let json_str = request.to_string(); + assert!(json_str.contains("\"environmentType\":\"cloud\"")); + assert!(!json_str.contains("clusterId")); + } +} diff --git a/src/platform/api/error.rs b/src/platform/api/error.rs new file mode 100644 index 00000000..99e309f3 --- /dev/null +++ b/src/platform/api/error.rs @@ -0,0 +1,96 @@ +//! Error types for the Platform API client +//! +//! Provides structured error types for all API operations. + +use thiserror::Error; + +/// Errors that can occur when interacting with the Syncable Platform API +#[derive(Debug, Error)] +pub enum PlatformApiError { + /// HTTP request failed (network error, timeout, etc.) + #[error("HTTP request failed: {0}")] + HttpError(#[from] reqwest::Error), + + /// API returned an error response + #[error("API error ({status}): {message}")] + ApiError { + /// HTTP status code + status: u16, + /// Error message from the API + message: String, + }, + + /// Failed to parse the API response + #[error("Failed to parse response: {0}")] + ParseError(String), + + /// User is not authenticated - needs to run `sync-ctl auth login` + #[error("Not authenticated - run `sync-ctl auth login` first")] + Unauthorized, + + /// Requested resource was not found + #[error("Not found: {0}")] + NotFound(String), + + /// User does not have permission for the requested operation + #[error("Permission denied: {0}")] + PermissionDenied(String), + + /// Rate limit exceeded + #[error("Rate limit exceeded - please try again later")] + RateLimited, + + /// Server error + #[error("Server error ({status}): {message}")] + ServerError { + /// HTTP status code (5xx) + status: u16, + /// Error message + message: String, + }, + + /// Could not connect to the Syncable API + #[error("Could not connect to Syncable API - check your internet connection")] + ConnectionFailed, +} + +impl PlatformApiError { + /// Get a user-friendly suggestion for resolving this error + /// + /// Returns actionable advice that helps users fix the issue. + pub fn suggestion(&self) -> Option<&'static str> { + match self { + Self::Unauthorized => Some("Run `sync-ctl auth login` to authenticate"), + Self::RateLimited => Some("Wait a moment and try again"), + Self::HttpError(_) => Some("Check your internet connection"), + Self::ServerError { .. } => { + Some("The server is experiencing issues. Try again later") + } + Self::PermissionDenied(_) => { + Some("Check your project permissions in the Syncable dashboard") + } + Self::NotFound(_) => Some("Verify the resource ID is correct"), + Self::ParseError(_) => Some("This may be a bug - please report it"), + Self::ApiError { status, .. } if *status >= 400 && *status < 500 => { + Some("Check the request parameters") + } + Self::ConnectionFailed => { + Some("Check your internet connection and try again") + } + _ => None, + } + } + + /// Format the error with suggestion if available + /// + /// Returns the error message followed by a suggestion on how to resolve it. + pub fn with_suggestion(&self) -> String { + match self.suggestion() { + Some(suggestion) => format!("{}\n โ†’ {}", self, suggestion), + None => self.to_string(), + } + } +} + +/// Result type alias for Platform API operations +pub type Result = std::result::Result; diff --git a/src/platform/api/mod.rs b/src/platform/api/mod.rs new file mode 100644 index 00000000..6b526a59 --- /dev/null +++ b/src/platform/api/mod.rs @@ -0,0 +1,37 @@ +//! Platform API client module +//! +//! Provides authenticated access to the Syncable Platform API for managing +//! organizations, projects, and other platform resources. +//! +//! # Example +//! +//! ```rust,ignore +//! use syncable_cli::platform::api::PlatformApiClient; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box> { +//! let client = PlatformApiClient::new()?; +//! +//! // List organizations +//! let orgs = client.list_organizations().await?; +//! for org in orgs { +//! println!("Organization: {}", org.name); +//! } +//! +//! Ok(()) +//! } +//! ``` + +pub mod client; +pub mod error; +pub mod types; + +// Re-export commonly used items +pub use client::PlatformApiClient; +pub use error::{PlatformApiError, Result}; +pub use types::{ + ArtifactRegistry, CloudCredentialStatus, CloudProvider, ClusterEntity, ClusterStatus, + DeployedService, DeploymentConfig, DeploymentTaskStatus, Environment, Organization, + PaginatedDeployments, PaginationInfo, Project, ProjectMember, RegistryStatus, + TriggerDeploymentRequest, TriggerDeploymentResponse, UserProfile, +}; diff --git a/src/platform/api/types.rs b/src/platform/api/types.rs new file mode 100644 index 00000000..122a6305 --- /dev/null +++ b/src/platform/api/types.rs @@ -0,0 +1,1582 @@ +//! API response types for the Syncable Platform API +//! +//! These types mirror the backend DTOs for organizations, projects, and related entities. + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::str::FromStr; + +/// Generic API response wrapper +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GenericResponse { + /// The response data + pub data: T, +} + +/// Organization information +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Organization { + /// Unique organization identifier (UUID) + pub id: String, + /// Organization display name + pub name: String, + /// URL-friendly slug + pub slug: String, + /// Optional logo URL + pub logo: Option, + /// When the organization was created + pub created_at: DateTime, +} + +/// Project information +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Project { + /// Unique project identifier (UUID) + pub id: String, + /// Project display name + pub name: String, + /// Project description + pub description: String, + /// Parent organization ID + pub organization_id: String, + /// When the project was created + pub created_at: DateTime, + /// Project context/notes (optional) + #[serde(default)] + pub context: Option, +} + +/// Project member information +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ProjectMember { + /// User ID of the member + pub user_id: String, + /// Member's role in the project + pub role: String, +} + +/// Request body for creating a new project +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CreateProjectRequest { + /// ID of the user creating the project + pub creator_id: String, + /// Project name + pub name: String, + /// Project description + pub description: String, + /// Project context/notes + #[serde(default)] + pub context: String, +} + +/// User profile information (from /api/users/me) +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UserProfile { + /// User ID (UUID) + pub id: String, + /// User's email address + pub email: String, + /// User's display name + pub name: Option, + /// Profile image URL + pub image: Option, +} + +/// API error response format +#[derive(Debug, Clone, Deserialize)] +pub struct ApiErrorResponse { + /// Error message + pub error: Option, + /// Detailed error message + pub message: Option, +} + +impl ApiErrorResponse { + /// Get the error message, preferring `message` over `error` + pub fn get_message(&self) -> String { + self.message + .clone() + .or_else(|| self.error.clone()) + .unwrap_or_else(|| "Unknown error".to_string()) + } +} + +/// Cloud provider types supported by the platform +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(rename_all = "lowercase")] +pub enum CloudProvider { + Gcp, + Aws, + Azure, + Hetzner, + Scaleway, + Cyso, +} + +impl CloudProvider { + /// Returns the lowercase string identifier for this provider + pub fn as_str(&self) -> &'static str { + match self { + CloudProvider::Gcp => "gcp", + CloudProvider::Aws => "aws", + CloudProvider::Azure => "azure", + CloudProvider::Hetzner => "hetzner", + CloudProvider::Scaleway => "scaleway", + CloudProvider::Cyso => "cyso", + } + } + + /// Returns the human-readable display name for this provider + pub fn display_name(&self) -> &'static str { + match self { + CloudProvider::Gcp => "Google Cloud Platform", + CloudProvider::Aws => "Amazon Web Services", + CloudProvider::Azure => "Microsoft Azure", + CloudProvider::Hetzner => "Hetzner Cloud", + CloudProvider::Scaleway => "Scaleway", + CloudProvider::Cyso => "Cyso Cloud", + } + } + + /// Returns all supported cloud providers + pub fn all() -> &'static [CloudProvider] { + &[ + CloudProvider::Gcp, + CloudProvider::Hetzner, + CloudProvider::Aws, + CloudProvider::Azure, + CloudProvider::Scaleway, + CloudProvider::Cyso, + ] + } + + /// Returns whether this provider is currently available for deployment + /// + /// Returns `true` for GCP and Hetzner (currently supported). + /// Returns `false` for AWS, Azure, Scaleway, Cyso (coming soon). + pub fn is_available(&self) -> bool { + matches!(self, CloudProvider::Gcp | CloudProvider::Hetzner) + } +} + +impl fmt::Display for CloudProvider { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +impl FromStr for CloudProvider { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "gcp" | "google" | "google-cloud" => Ok(CloudProvider::Gcp), + "aws" | "amazon" => Ok(CloudProvider::Aws), + "azure" | "microsoft" => Ok(CloudProvider::Azure), + "hetzner" => Ok(CloudProvider::Hetzner), + "scaleway" => Ok(CloudProvider::Scaleway), + "cyso" | "cyso-cloud" => Ok(CloudProvider::Cyso), + _ => Err(format!( + "Unknown cloud provider: '{}'. Valid options: gcp, aws, azure, hetzner, scaleway, cyso", + s + )), + } + } +} + +/// Minimal credential info (no secrets - just connection status) +/// +/// SECURITY NOTE: This type intentionally contains only non-sensitive metadata. +/// Actual credentials (OAuth tokens, API keys, etc.) are NEVER exposed through +/// this API. The agent only needs to know IF a provider is connected, not the +/// actual credential values. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CloudCredentialStatus { + /// Unique identifier for this credential record + pub id: String, + /// The cloud provider this credential is for (lowercase: gcp, aws, azure, hetzner) + pub provider: String, + // NOTE: Never include tokens/secrets here - this is intentionally minimal +} + +// ============================================================================= +// Environment Types +// ============================================================================= + +/// Environment entity for a project +/// +/// Environments define deployment targets within a project. +/// Each deployment configuration is associated with an environment. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Environment { + /// Unique environment identifier (UUID) + pub id: String, + /// Environment display name (e.g., "production", "staging", "development") + pub name: String, + /// Parent project ID + pub project_id: String, + /// Environment type: "cluster" (K8s) or "cloud" (Cloud Runner) + pub environment_type: String, + /// Cluster ID (only for cluster type) + #[serde(default)] + pub cluster_id: Option, + /// Kubernetes namespace (only for cluster type) + #[serde(default)] + pub namespace: Option, + /// Description + #[serde(default)] + pub description: Option, + /// Whether the environment is active + #[serde(default = "default_true")] + pub is_active: bool, + /// When the environment was created + #[serde(default)] + pub created_at: Option, + /// When the environment was last updated + #[serde(default)] + pub updated_at: Option, +} + +fn default_true() -> bool { + true +} + +// ============================================================================= +// Deployment Types +// ============================================================================= + +/// Deployment configuration for a service +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DeploymentConfig { + /// Unique identifier for this deployment config + pub id: String, + /// The project this config belongs to + pub project_id: String, + /// Repository ID (from GitHub/GitLab integration) + pub repository_id: i64, + /// Full repository name (e.g., "owner/repo") + pub repository_full_name: String, + /// Name of the service being deployed + pub service_name: String, + /// Environment ID for deployment + pub environment_id: String, + /// Target type: "kubernetes" or "cloud_runner" + pub target_type: Option, + /// Branch to deploy from + pub branch: String, + /// Port the service listens on + pub port: i32, + /// Whether auto-deploy on push is enabled + pub auto_deploy_enabled: bool, + /// Deployment strategy (e.g., "rolling", "blue_green") + pub deployment_strategy: Option, + /// When this config was created + pub created_at: DateTime, +} + +/// Response from creating a deployment config +/// +/// The API returns the config wrapped with a wasUpdated flag indicating +/// whether an existing config was updated or a new one was created. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CreateDeploymentConfigResponse { + /// The created or updated deployment config + pub config: DeploymentConfig, + /// Whether this was an update to an existing config (vs new creation) + pub was_updated: bool, +} + +/// Request to trigger deployment +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct TriggerDeploymentRequest { + /// Project ID for the deployment + pub project_id: String, + /// Deployment config ID to use + pub config_id: String, + /// Optional specific commit SHA to deploy (defaults to latest) + #[serde(skip_serializing_if = "Option::is_none")] + pub commit_sha: Option, +} + +/// Response from triggering a deployment +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TriggerDeploymentResponse { + /// The deployment config ID used + pub config_id: String, + /// Task ID to track deployment progress + pub backstage_task_id: String, + /// Initial status of the deployment + pub status: String, + /// Human-readable message about the deployment + pub message: String, +} + +/// Deployment task status +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DeploymentTaskStatus { + /// Task status: "processing", "completed", "failed" + pub status: String, + /// Progress percentage (0-100) + pub progress: i32, + /// Current step description + pub current_step: Option, + /// Overall deployment status: "generating", "building", "deploying", "healthy", "failed" + pub overall_status: String, + /// Human-readable overall message + pub overall_message: String, + /// Error message if deployment failed + pub error: Option, +} + +/// Deployed service info +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DeployedService { + /// Unique deployment ID + pub id: String, + /// Project this deployment belongs to + pub project_id: String, + /// Name of the deployed service + pub service_name: String, + /// Full repository name + pub repository_full_name: String, + /// Deployment status + pub status: String, + /// Task ID used for this deployment + pub backstage_task_id: Option, + /// Commit SHA that was deployed + pub commit_sha: Option, + /// Public URL of the deployed service + pub public_url: Option, + /// When this deployment was created + pub created_at: DateTime, +} + +/// Paginated list of deployments +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct PaginatedDeployments { + /// List of deployments + pub data: Vec, + /// Pagination info + pub pagination: PaginationInfo, +} + +/// Pagination information for list responses +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct PaginationInfo { + /// Cursor for next page (if any) + pub next_cursor: Option, + /// Whether there are more results + pub has_more: bool, +} + +// ============================================================================= +// Log Types +// ============================================================================= + +/// A single log entry from a container +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct LogEntry { + /// ISO timestamp when log was generated + pub timestamp: String, + /// Log message content + pub message: String, + /// Container metadata labels + pub labels: std::collections::HashMap, +} + +/// Statistics about the log query +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct LogQueryStats { + /// Number of log entries returned + pub entries_returned: i32, + /// Time taken to execute query in milliseconds + pub query_time_ms: i64, +} + +/// Response from log query endpoint +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetLogsResponse { + /// Log entries + pub data: Vec, + /// Query statistics + pub stats: LogQueryStats, +} + +// ============================================================================= +// Cluster Types +// ============================================================================= + +/// K8s cluster entity from platform +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ClusterEntity { + /// Unique cluster identifier + pub id: String, + /// Cluster display name + pub name: String, + /// Cloud provider hosting the cluster + pub provider: CloudProvider, + /// Region where cluster is deployed + pub region: String, + /// Current cluster status + pub status: ClusterStatus, + /// Kubernetes version (if available) + pub kubernetes_version: Option, + /// Number of nodes in the cluster (if available) + pub node_count: Option, + /// When the cluster was created + pub created_at: String, +} + +/// Status of a K8s cluster +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum ClusterStatus { + Provisioning, + Running, + Updating, + Deleting, + Error, + #[serde(other)] + Unknown, +} + +impl ClusterStatus { + /// Returns a human-readable display string for the status + pub fn display(&self) -> &'static str { + match self { + ClusterStatus::Provisioning => "Provisioning", + ClusterStatus::Running => "Running", + ClusterStatus::Updating => "Updating", + ClusterStatus::Deleting => "Deleting", + ClusterStatus::Error => "Error", + ClusterStatus::Unknown => "Unknown", + } + } +} + +// ============================================================================= +// Artifact Registry Types +// ============================================================================= + +/// Artifact registry for container images +/// +/// This maps to the backend's ProvisionedArtifactRegistryDto +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArtifactRegistry { + /// Unique registry identifier + pub id: String, + /// Registry display name + pub name: String, + /// Cloud provider hosting the registry + #[serde(alias = "provider")] + pub cloud_provider: CloudProvider, + /// Region where registry is located + pub region: String, + /// URL to push/pull images + pub registry_url: String, + /// Current registry status + pub status: RegistryStatus, + /// When the registry was created (ISO 8601 format) + #[serde(default)] + pub created_at: Option, + /// When the registry was last updated + #[serde(default)] + pub updated_at: Option, +} + +impl ArtifactRegistry { + /// Get the cloud provider (for backwards compatibility) + pub fn provider(&self) -> &CloudProvider { + &self.cloud_provider + } +} + +/// Status of an artifact registry +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum RegistryStatus { + Provisioning, + Ready, + Error, + #[serde(other)] + Unknown, +} + +impl RegistryStatus { + /// Returns a human-readable display string for the status + pub fn display(&self) -> &'static str { + match self { + RegistryStatus::Provisioning => "Provisioning", + RegistryStatus::Ready => "Ready", + RegistryStatus::Error => "Error", + RegistryStatus::Unknown => "Unknown", + } + } +} + +/// Request to provision a new artifact registry +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct CreateRegistryRequest { + /// Project ID for the registry + pub project_id: String, + /// Cluster ID to associate registry with + pub cluster_id: String, + /// Cluster name for display + pub cluster_name: String, + /// Name for the new registry + pub registry_name: String, + /// Cloud provider hosting the registry + pub cloud_provider: String, + /// Region for the registry + pub region: String, + /// GCP project ID (required for GCP provider) + #[serde(skip_serializing_if = "Option::is_none")] + pub gcp_project_id: Option, +} + +/// Response from registry provisioning +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CreateRegistryResponse { + /// Task ID for tracking provisioning progress + pub task_id: String, + /// Initial status + pub status: String, + /// Human-readable message + pub message: String, + /// Registry name (if immediately available) + pub registry_name: Option, + /// Registry URL (if immediately available) + pub registry_url: Option, + /// Cloud provider + pub cloud_provider: String, + /// When the task was created + pub created_at: String, +} + +/// Task status when polling registry provisioning +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RegistryTaskStatus { + /// Current task state + pub status: RegistryTaskState, + /// Current step description + pub current_step: Option, + /// Progress percentage (0-100) + pub progress: Option, + /// Overall status message + pub overall_status: Option, + /// Overall human-readable message + pub overall_message: Option, + /// Output data when completed + #[serde(default)] + pub output: RegistryTaskOutput, + /// Error info if failed + pub error: Option, +} + +/// State of a registry provisioning task +#[derive(Debug, Clone, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +pub enum RegistryTaskState { + Processing, + Completed, + Failed, + Cancelled, + #[serde(other)] + Unknown, +} + +/// Output data from a completed registry provisioning task +#[derive(Debug, Clone, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RegistryTaskOutput { + /// Name of the provisioned registry + pub registry_name: Option, + /// URL to push/pull images + pub registry_url: Option, + /// Cloud provider that hosts the registry + pub cloud_provider: Option, + /// URL to the commit that created the registry + pub commit_url: Option, +} + +/// Error details from a failed registry provisioning task +#[derive(Debug, Clone, Deserialize)] +pub struct RegistryTaskError { + /// Error name/type + pub name: String, + /// Error message + pub message: String, +} + +// ============================================================================= +// CLI Wizard Types +// ============================================================================= + +/// Deployment target type for the CLI wizard +/// +/// Determines whether the service deploys to a managed Cloud Runner +/// (GCP Cloud Run, Hetzner container) or to a Kubernetes cluster. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum DeploymentTarget { + /// Deploy to Cloud Runner (GCP Cloud Run or Hetzner container) + /// No cluster required - fully managed by cloud provider + CloudRunner, + /// Deploy to a Kubernetes cluster + /// Requires cluster selection + Kubernetes, +} + +impl DeploymentTarget { + /// Returns the API string representation + pub fn as_str(&self) -> &'static str { + match self { + DeploymentTarget::CloudRunner => "cloud_runner", + DeploymentTarget::Kubernetes => "kubernetes", + } + } + + /// Returns a human-readable display name + pub fn display_name(&self) -> &'static str { + match self { + DeploymentTarget::CloudRunner => "Cloud Runner", + DeploymentTarget::Kubernetes => "Kubernetes", + } + } +} + +impl fmt::Display for DeploymentTarget { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +/// Deployment configuration being built by the CLI wizard +/// +/// This type accumulates selections made during the wizard flow +/// before being converted to a CreateDeploymentConfigRequest. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct WizardDeploymentConfig { + /// Service name (from Dockerfile discovery or user input) + pub service_name: Option, + /// Path to the Dockerfile relative to repo root + pub dockerfile_path: Option, + /// Build context path relative to repo root + pub build_context: Option, + /// Port the service listens on + pub port: Option, + /// Git branch to deploy from + pub branch: Option, + /// Deployment target type + pub target: Option, + /// Selected cloud provider + pub provider: Option, + /// Selected cluster ID (required for Kubernetes target) + pub cluster_id: Option, + /// Selected registry ID (or None to provision new) + pub registry_id: Option, + /// Environment ID for deployment + pub environment_id: Option, + /// Enable auto-deploy on push + pub auto_deploy: bool, + /// Region/Location for Cloud Runner deployment (e.g., "nbg1" for Hetzner, "us-central1" for GCP) + pub region: Option, + /// Machine/Instance type for Cloud Runner (e.g., "cx22" for Hetzner, "e2-small" for GCP) + pub machine_type: Option, + /// Whether the service should be publicly accessible + pub is_public: bool, + /// Health check endpoint path (optional, e.g., "/health" or "/healthz") + pub health_check_path: Option, +} + +impl WizardDeploymentConfig { + /// Create a new empty wizard config + pub fn new() -> Self { + Self::default() + } + + /// Check if all required fields are set for the selected target + pub fn is_complete(&self) -> bool { + let base_complete = self.service_name.is_some() + && self.port.is_some() + && self.branch.is_some() + && self.target.is_some() + && self.provider.is_some() + && self.environment_id.is_some(); + + if !base_complete { + return false; + } + + // K8s requires cluster selection + if self.target == Some(DeploymentTarget::Kubernetes) { + return self.cluster_id.is_some(); + } + + // Cloud Runner requires region and machine type + if self.target == Some(DeploymentTarget::CloudRunner) { + return self.region.is_some() && self.machine_type.is_some(); + } + + true + } + + /// Get a list of missing required fields + pub fn missing_fields(&self) -> Vec<&'static str> { + let mut missing = Vec::new(); + if self.service_name.is_none() { + missing.push("service_name"); + } + if self.port.is_none() { + missing.push("port"); + } + if self.branch.is_none() { + missing.push("branch"); + } + if self.target.is_none() { + missing.push("target"); + } + if self.provider.is_none() { + missing.push("provider"); + } + if self.environment_id.is_none() { + missing.push("environment_id"); + } + if self.target == Some(DeploymentTarget::Kubernetes) && self.cluster_id.is_none() { + missing.push("cluster_id"); + } + if self.target == Some(DeploymentTarget::CloudRunner) { + if self.region.is_none() { + missing.push("region"); + } + if self.machine_type.is_none() { + missing.push("machine_type"); + } + } + missing + } +} + +/// Repository connected to a project +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ProjectRepository { + /// Connection ID + pub id: String, + /// Project ID + pub project_id: String, + /// GitHub repository ID + pub repository_id: i64, + /// Repository name (e.g., "my-repo") + pub repository_name: String, + /// Full repository name (e.g., "owner/my-repo") + pub repository_full_name: String, + /// Repository owner + pub repository_owner: String, + /// Whether the repository is private + pub repository_private: bool, + /// Default branch name + #[serde(default)] + pub default_branch: Option, + /// Whether the connection is active + #[serde(default = "default_true")] + pub is_active: bool, + /// Connection type (e.g., "app") + #[serde(default)] + pub connection_type: Option, + /// Repository type (e.g., "application", "gitops") + #[serde(default)] + pub repository_type: Option, + /// Whether this is the primary GitOps repository + #[serde(default)] + pub is_primary_git_ops: Option, + /// GitHub installation ID + #[serde(default)] + pub github_installation_id: Option, + /// User ID who connected the repository + #[serde(default)] + pub user_id: Option, + /// When the repository was connected + #[serde(default)] + pub created_at: Option, + /// When the repository was last updated + #[serde(default)] + pub updated_at: Option, +} + +/// Response for listing project repositories +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ProjectRepositoriesResponse { + /// Connected repositories + pub repositories: Vec, + /// Total count + pub total_count: i32, +} + +/// Cloud Runner configuration for internal wizard use +/// +/// Note: This is used internally by the wizard to collect configuration. +/// When sending to the API, use `build_cloud_runner_config()` to create +/// the provider-nested structure the backend expects. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct CloudRunnerConfig { + /// Region/location (e.g., "nbg1", "us-central1") + #[serde(skip_serializing_if = "Option::is_none")] + pub region: Option, + /// Machine/instance type (e.g., "cx22", "e2-small") + #[serde(skip_serializing_if = "Option::is_none")] + pub machine_type: Option, + /// Whether service should be publicly accessible + #[serde(skip_serializing_if = "Option::is_none")] + pub is_public: Option, + /// Health check endpoint path + #[serde(skip_serializing_if = "Option::is_none")] + pub health_check_path: Option, +} + +/// Build the cloud runner config in the provider-nested structure expected by backend. +/// +/// The backend expects: +/// - For GCP: `{ "gcp": { "region": "...", "allowUnauthenticated": true } }` +/// - For Hetzner: `{ "hetzner": { "location": "...", "serverType": "..." } }` +/// +/// # Arguments +/// * `provider` - The cloud provider (GCP, Hetzner, etc.) +/// * `region` - Region/location for deployment +/// * `machine_type` - Machine/server type +/// * `is_public` - Whether the service should be publicly accessible +/// * `health_check_path` - Optional health check endpoint path +pub fn build_cloud_runner_config( + provider: &CloudProvider, + region: &str, + machine_type: &str, + is_public: bool, + health_check_path: Option<&str>, +) -> serde_json::Value { + match provider { + CloudProvider::Gcp => { + let mut gcp_config = serde_json::json!({ + "region": region, + "allowUnauthenticated": is_public, + }); + if let Some(path) = health_check_path { + gcp_config["healthCheckPath"] = serde_json::json!(path); + } + serde_json::json!({ + "gcp": gcp_config + }) + } + CloudProvider::Hetzner => { + serde_json::json!({ + "hetzner": { + "location": region, + "serverType": machine_type + } + }) + } + // For other providers, use a generic structure + _ => { + serde_json::json!({ + provider.as_str(): { + "region": region, + "machineType": machine_type, + "isPublic": is_public + } + }) + } + } +} + +/// Request body for creating a new deployment configuration +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct CreateDeploymentConfigRequest { + /// Project ID + pub project_id: String, + /// Service name for the deployment + pub service_name: String, + /// Repository ID (from GitHub/GitLab integration) + pub repository_id: i64, + /// Full repository name (e.g., "owner/repo") + pub repository_full_name: String, + /// Path to Dockerfile relative to repo root + /// Note: Backend may use "dockerfile" or "dockerfilePath" - sending both for compatibility + #[serde(skip_serializing_if = "Option::is_none")] + pub dockerfile_path: Option, + /// Alias for dockerfile_path (some backend endpoints expect this name) + #[serde(skip_serializing_if = "Option::is_none")] + pub dockerfile: Option, + /// Build context path relative to repo root + #[serde(skip_serializing_if = "Option::is_none")] + pub build_context: Option, + /// Alias for build_context (some backend endpoints expect this name) + #[serde(skip_serializing_if = "Option::is_none")] + pub context: Option, + /// Port the service listens on + pub port: i32, + /// Git branch to deploy from + pub branch: String, + /// Target type: "kubernetes" or "cloud_runner" + pub target_type: String, + /// Cloud provider (gcp, hetzner) + pub cloud_provider: String, + /// Environment ID for deployment + pub environment_id: String, + /// Cluster ID (required for kubernetes target) + #[serde(skip_serializing_if = "Option::is_none")] + pub cluster_id: Option, + /// Registry ID (optional - will provision if not provided) + #[serde(skip_serializing_if = "Option::is_none")] + pub registry_id: Option, + /// Enable auto-deploy on push + pub auto_deploy_enabled: bool, + /// Public access for the service + #[serde(skip_serializing_if = "Option::is_none")] + pub is_public: Option, + /// Cloud Runner specific configuration (provider-nested structure) + /// + /// Use `build_cloud_runner_config()` to create this value. + /// Backend expects: `{ "gcp": {...} }` or `{ "hetzner": {...} }` + #[serde(skip_serializing_if = "Option::is_none")] + pub cloud_runner_config: Option, +} + +/// Provider deployment availability status for the wizard +/// +/// Combines provider connection status with available resources +/// to help users select where to deploy. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ProviderDeploymentStatus { + /// The cloud provider + pub provider: CloudProvider, + /// Whether the provider is connected (has credentials) + pub is_connected: bool, + /// Available Kubernetes clusters (empty if no clusters or not connected) + pub clusters: Vec, + /// Available artifact registries (empty if none or not connected) + pub registries: Vec, + /// Whether Cloud Runner is available for this provider + pub cloud_runner_available: bool, + /// Display message for the wizard (e.g., "2 clusters, 1 registry") + pub summary: String, +} + +/// Summary of a K8s cluster for wizard display +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ClusterSummary { + /// Cluster ID + pub id: String, + /// Cluster display name + pub name: String, + /// Region + pub region: String, + /// Is cluster running/healthy + pub is_healthy: bool, +} + +/// Summary of an artifact registry for wizard display +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RegistrySummary { + /// Registry ID + pub id: String, + /// Registry display name + pub name: String, + /// Region + pub region: String, + /// Is registry ready + pub is_ready: bool, +} + +impl ProviderDeploymentStatus { + /// Check if this provider can be used for deployment + pub fn can_deploy(&self) -> bool { + self.is_connected && (self.cloud_runner_available || !self.clusters.is_empty()) + } + + /// Get available deployment targets for this provider + pub fn available_targets(&self) -> Vec { + let mut targets = Vec::new(); + if self.cloud_runner_available { + targets.push(DeploymentTarget::CloudRunner); + } + if !self.clusters.is_empty() { + targets.push(DeploymentTarget::Kubernetes); + } + targets + } +} + +// ========================================================================= +// GitHub Integration Types +// ========================================================================= + +/// GitHub App installation connected to the organization +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GitHubInstallation { + /// GitHub App installation ID + pub installation_id: i64, + /// GitHub account ID + pub account_id: i64, + /// GitHub account login/username + pub account_login: String, + /// Account type: "User" or "Organization" + pub account_type: String, + /// Target type: "User" or "Organization" + #[serde(default)] + pub target_type: Option, + /// Permissions granted to the app + #[serde(default)] + pub permissions: Option, + /// Events the app is subscribed to + #[serde(default)] + pub events: Option>, + /// Repository selection: "all" or "selected" + #[serde(default)] + pub repository_selection: Option, + /// GitHub App ID + #[serde(default)] + pub app_id: Option, + /// GitHub App slug + #[serde(default)] + pub app_slug: Option, + /// When the installation was suspended + #[serde(default)] + pub suspended_at: Option, + /// Who suspended the installation + #[serde(default)] + pub suspended_by: Option, + /// When the installation was created + #[serde(default)] + pub created_at: Option, + /// When the installation was last updated + #[serde(default)] + pub updated_at: Option, +} + +/// Response for listing GitHub installations +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GitHubInstallationsResponse { + /// List of GitHub App installations + pub installations: Vec, +} + +/// Response for getting GitHub App installation URL +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GitHubInstallationUrlResponse { + /// URL to install the GitHub App + pub installation_url: String, +} + +/// Repository available for connection (from GitHub) +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct AvailableRepository { + /// GitHub repository ID + pub id: i64, + /// Repository name (e.g., "my-repo") + pub name: String, + /// Full repository name (e.g., "owner/my-repo") + pub full_name: String, + /// Repository owner + #[serde(default)] + pub owner: Option, + /// Whether the repository is private + #[serde(default)] + pub private: bool, + /// Default branch name + #[serde(default)] + pub default_branch: Option, + /// Repository description + #[serde(default)] + pub description: Option, + /// Repository HTML URL + #[serde(default)] + pub html_url: Option, + /// GitHub installation ID this repo is accessible through + #[serde(default)] + pub installation_id: Option, +} + +/// Response for listing available repositories +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct AvailableRepositoriesResponse { + /// List of available repositories + pub repositories: Vec, + /// IDs of repositories already connected to the project + #[serde(default)] + pub connected_repositories: Vec, + /// Total count of available repositories + #[serde(default)] + pub total_count: i32, + /// Current page number + #[serde(default)] + pub page: i32, + /// Items per page + #[serde(default)] + pub per_page: i32, + /// Whether there are more pages + #[serde(default)] + pub has_more: bool, +} + +/// Request to connect a repository to a project +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ConnectRepositoryRequest { + /// Project ID to connect the repository to + pub project_id: String, + /// GitHub repository ID + pub repository_id: i64, + /// Repository name + pub repository_name: String, + /// Full repository name (owner/repo) + pub repository_full_name: String, + /// Repository owner + pub repository_owner: String, + /// Whether the repository is private + pub repository_private: bool, + /// Default branch name + #[serde(skip_serializing_if = "Option::is_none")] + pub default_branch: Option, + /// Connection type (e.g., "app") + #[serde(skip_serializing_if = "Option::is_none")] + pub connection_type: Option, + /// GitHub installation ID + #[serde(skip_serializing_if = "Option::is_none")] + pub github_installation_id: Option, + /// Repository type: "application" or "gitops" + #[serde(skip_serializing_if = "Option::is_none")] + pub repository_type: Option, +} + +/// Response after connecting a repository to a project +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ConnectRepositoryResponse { + /// Connection ID + pub id: String, + /// Project ID + pub project_id: String, + /// GitHub repository ID + pub repository_id: i64, + /// Full repository name + pub repository_full_name: String, + /// Whether the connection is active + #[serde(default = "default_true")] + pub is_active: bool, +} + +/// Request to initialize GitOps repository for a project +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct InitializeGitOpsRequest { + /// GitHub installation ID to use for creating the repo + #[serde(skip_serializing_if = "Option::is_none")] + pub installation_id: Option, +} + +/// Response after initializing GitOps repository +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct InitializeGitOpsResponse { + /// Full name of the created/existing GitOps repository + pub repo_full_name: String, + /// GitHub installation ID used + pub installation_id: i64, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_cloud_provider_as_str() { + assert_eq!(CloudProvider::Gcp.as_str(), "gcp"); + assert_eq!(CloudProvider::Aws.as_str(), "aws"); + assert_eq!(CloudProvider::Azure.as_str(), "azure"); + assert_eq!(CloudProvider::Hetzner.as_str(), "hetzner"); + assert_eq!(CloudProvider::Scaleway.as_str(), "scaleway"); + assert_eq!(CloudProvider::Cyso.as_str(), "cyso"); + } + + #[test] + fn test_cloud_provider_display_name() { + assert_eq!(CloudProvider::Gcp.display_name(), "Google Cloud Platform"); + assert_eq!(CloudProvider::Aws.display_name(), "Amazon Web Services"); + assert_eq!(CloudProvider::Azure.display_name(), "Microsoft Azure"); + assert_eq!(CloudProvider::Hetzner.display_name(), "Hetzner Cloud"); + assert_eq!(CloudProvider::Scaleway.display_name(), "Scaleway"); + assert_eq!(CloudProvider::Cyso.display_name(), "Cyso Cloud"); + } + + #[test] + fn test_cloud_provider_from_str() { + assert_eq!(CloudProvider::from_str("gcp").unwrap(), CloudProvider::Gcp); + assert_eq!(CloudProvider::from_str("GCP").unwrap(), CloudProvider::Gcp); + assert_eq!(CloudProvider::from_str("aws").unwrap(), CloudProvider::Aws); + assert_eq!( + CloudProvider::from_str("azure").unwrap(), + CloudProvider::Azure + ); + assert_eq!( + CloudProvider::from_str("hetzner").unwrap(), + CloudProvider::Hetzner + ); + assert_eq!( + CloudProvider::from_str("scaleway").unwrap(), + CloudProvider::Scaleway + ); + assert_eq!( + CloudProvider::from_str("cyso").unwrap(), + CloudProvider::Cyso + ); + assert!(CloudProvider::from_str("unknown").is_err()); + } + + #[test] + fn test_cloud_provider_display() { + assert_eq!(format!("{}", CloudProvider::Gcp), "gcp"); + assert_eq!(format!("{}", CloudProvider::Aws), "aws"); + } + + #[test] + fn test_cloud_provider_all() { + let all = CloudProvider::all(); + assert_eq!(all.len(), 6); + assert!(all.contains(&CloudProvider::Gcp)); + assert!(all.contains(&CloudProvider::Aws)); + assert!(all.contains(&CloudProvider::Azure)); + assert!(all.contains(&CloudProvider::Hetzner)); + assert!(all.contains(&CloudProvider::Scaleway)); + assert!(all.contains(&CloudProvider::Cyso)); + } + + #[test] + fn test_cloud_provider_is_available() { + // Available providers + assert!(CloudProvider::Gcp.is_available()); + assert!(CloudProvider::Hetzner.is_available()); + + // Coming soon providers + assert!(!CloudProvider::Aws.is_available()); + assert!(!CloudProvider::Azure.is_available()); + assert!(!CloudProvider::Scaleway.is_available()); + assert!(!CloudProvider::Cyso.is_available()); + } + + #[test] + fn test_cloud_credential_status_serialization() { + let status = CloudCredentialStatus { + id: "cred-123".to_string(), + provider: "gcp".to_string(), + }; + + let json = serde_json::to_string(&status).unwrap(); + assert!(json.contains("\"id\":\"cred-123\"")); + assert!(json.contains("\"provider\":\"gcp\"")); + // Verify no tokens/secrets in serialized output + assert!(!json.contains("token")); + assert!(!json.contains("secret")); + assert!(!json.contains("key")); + } + + // ========================================================================= + // CLI Wizard Types Tests + // ========================================================================= + + #[test] + fn test_deployment_target_as_str() { + assert_eq!(DeploymentTarget::CloudRunner.as_str(), "cloud_runner"); + assert_eq!(DeploymentTarget::Kubernetes.as_str(), "kubernetes"); + } + + #[test] + fn test_deployment_target_display_name() { + assert_eq!(DeploymentTarget::CloudRunner.display_name(), "Cloud Runner"); + assert_eq!(DeploymentTarget::Kubernetes.display_name(), "Kubernetes"); + } + + #[test] + fn test_wizard_config_is_complete_cloud_runner() { + let mut config = WizardDeploymentConfig::new(); + assert!(!config.is_complete()); + + config.service_name = Some("api".to_string()); + config.port = Some(8080); + config.branch = Some("main".to_string()); + config.target = Some(DeploymentTarget::CloudRunner); + config.provider = Some(CloudProvider::Gcp); + config.environment_id = Some("env-123".to_string()); + + // Cloud Runner requires region and machine type + assert!(!config.is_complete()); + + config.region = Some("us-central1".to_string()); + config.machine_type = Some("e2-small".to_string()); + + assert!(config.is_complete()); + } + + #[test] + fn test_wizard_config_is_complete_kubernetes() { + let mut config = WizardDeploymentConfig::new(); + config.service_name = Some("api".to_string()); + config.port = Some(8080); + config.branch = Some("main".to_string()); + config.target = Some(DeploymentTarget::Kubernetes); + config.provider = Some(CloudProvider::Gcp); + config.environment_id = Some("env-123".to_string()); + + // K8s requires cluster_id + assert!(!config.is_complete()); + + config.cluster_id = Some("cluster-123".to_string()); + assert!(config.is_complete()); + } + + #[test] + fn test_wizard_config_missing_fields() { + let config = WizardDeploymentConfig::new(); + let missing = config.missing_fields(); + assert!(missing.contains(&"service_name")); + assert!(missing.contains(&"port")); + assert!(missing.contains(&"branch")); + } + + #[test] + fn test_provider_deployment_status_can_deploy() { + let status = ProviderDeploymentStatus { + provider: CloudProvider::Gcp, + is_connected: true, + clusters: vec![], + registries: vec![], + cloud_runner_available: true, + summary: "Cloud Run available".to_string(), + }; + assert!(status.can_deploy()); + + let disconnected = ProviderDeploymentStatus { + provider: CloudProvider::Aws, + is_connected: false, + clusters: vec![], + registries: vec![], + cloud_runner_available: false, + summary: "Not connected".to_string(), + }; + assert!(!disconnected.can_deploy()); + } + + #[test] + fn test_provider_deployment_status_available_targets() { + let status = ProviderDeploymentStatus { + provider: CloudProvider::Gcp, + is_connected: true, + clusters: vec![ClusterSummary { + id: "c1".to_string(), + name: "prod-cluster".to_string(), + region: "us-central1".to_string(), + is_healthy: true, + }], + registries: vec![], + cloud_runner_available: true, + summary: "1 cluster, Cloud Run".to_string(), + }; + + let targets = status.available_targets(); + assert_eq!(targets.len(), 2); + assert!(targets.contains(&DeploymentTarget::CloudRunner)); + assert!(targets.contains(&DeploymentTarget::Kubernetes)); + } + + // ========================================================================= + // Environment Tests + // ========================================================================= + + #[test] + fn test_environment_serialization() { + let env = Environment { + id: "env-123".to_string(), + name: "production".to_string(), + project_id: "proj-456".to_string(), + environment_type: "cluster".to_string(), + cluster_id: Some("cluster-789".to_string()), + namespace: Some("prod-ns".to_string()), + description: Some("Production environment".to_string()), + is_active: true, + created_at: Some("2024-01-01T00:00:00Z".to_string()), + updated_at: Some("2024-01-01T00:00:00Z".to_string()), + }; + + let json = serde_json::to_string(&env).unwrap(); + assert!(json.contains("\"id\":\"env-123\"")); + assert!(json.contains("\"name\":\"production\"")); + assert!(json.contains("\"projectId\":\"proj-456\"")); + assert!(json.contains("\"environmentType\":\"cluster\"")); + assert!(json.contains("\"clusterId\":\"cluster-789\"")); + } + + #[test] + fn test_environment_deserialization() { + let json = r#"{ + "id": "env-abc", + "name": "staging", + "projectId": "proj-def", + "environmentType": "cloud", + "isActive": true, + "createdAt": "2024-01-15T12:00:00Z", + "updatedAt": "2024-01-15T12:00:00Z" + }"#; + + let env: Environment = serde_json::from_str(json).unwrap(); + assert_eq!(env.id, "env-abc"); + assert_eq!(env.name, "staging"); + assert_eq!(env.project_id, "proj-def"); + assert_eq!(env.environment_type, "cloud"); + assert!(env.cluster_id.is_none()); + assert_eq!(env.created_at, Some("2024-01-15T12:00:00Z".to_string())); + } + + #[test] + fn test_environment_optional_fields_default() { + let json = r#"{ + "id": "env-min", + "name": "minimal", + "projectId": "proj-min", + "environmentType": "cloud" + }"#; + + let env: Environment = serde_json::from_str(json).unwrap(); + assert!(env.cluster_id.is_none()); + assert!(env.created_at.is_none()); + assert!(env.is_active); // default_true + } + + #[test] + fn test_create_deployment_config_request_serialization() { + let request = CreateDeploymentConfigRequest { + project_id: "proj-123".to_string(), + service_name: "api".to_string(), + repository_id: 12345, + repository_full_name: "org/repo".to_string(), + dockerfile_path: Some("Dockerfile".to_string()), + dockerfile: Some("Dockerfile".to_string()), + build_context: Some(".".to_string()), + context: Some(".".to_string()), + port: 8080, + branch: "main".to_string(), + target_type: "cloud_runner".to_string(), + cloud_provider: "gcp".to_string(), + environment_id: "env-123".to_string(), + cluster_id: None, + registry_id: Some("reg-456".to_string()), + auto_deploy_enabled: true, + is_public: None, + cloud_runner_config: None, + }; + + let json = serde_json::to_string(&request).unwrap(); + assert!(json.contains("\"serviceName\":\"api\"")); + assert!(json.contains("\"port\":8080")); + // Optional None fields should be skipped + assert!(!json.contains("clusterId")); + assert!(!json.contains("isPublic")); + } + + // ========================================================================= + // Cloud Runner Config Builder Tests + // ========================================================================= + + #[test] + fn test_build_cloud_runner_config_gcp() { + let config = build_cloud_runner_config( + &CloudProvider::Gcp, + "us-central1", + "e2-small", + true, + Some("/health"), + ); + let gcp = config.get("gcp").expect("should have gcp key"); + assert_eq!(gcp.get("region").and_then(|v| v.as_str()), Some("us-central1")); + assert_eq!(gcp.get("allowUnauthenticated").and_then(|v| v.as_bool()), Some(true)); + assert_eq!(gcp.get("healthCheckPath").and_then(|v| v.as_str()), Some("/health")); + } + + #[test] + fn test_build_cloud_runner_config_gcp_private() { + let config = build_cloud_runner_config( + &CloudProvider::Gcp, + "europe-west1", + "e2-medium", + false, + None, + ); + let gcp = config.get("gcp").expect("should have gcp key"); + assert_eq!(gcp.get("region").and_then(|v| v.as_str()), Some("europe-west1")); + assert_eq!(gcp.get("allowUnauthenticated").and_then(|v| v.as_bool()), Some(false)); + // No health check path when not provided + assert!(gcp.get("healthCheckPath").is_none()); + } + + #[test] + fn test_build_cloud_runner_config_hetzner() { + let config = build_cloud_runner_config( + &CloudProvider::Hetzner, + "nbg1", + "cx22", + true, + None, + ); + let hetzner = config.get("hetzner").expect("should have hetzner key"); + assert_eq!(hetzner.get("location").and_then(|v| v.as_str()), Some("nbg1")); + assert_eq!(hetzner.get("serverType").and_then(|v| v.as_str()), Some("cx22")); + } + + #[test] + fn test_build_cloud_runner_config_hetzner_different_location() { + let config = build_cloud_runner_config( + &CloudProvider::Hetzner, + "fsn1", + "cx32", + false, + Some("/healthz"), + ); + let hetzner = config.get("hetzner").expect("should have hetzner key"); + assert_eq!(hetzner.get("location").and_then(|v| v.as_str()), Some("fsn1")); + assert_eq!(hetzner.get("serverType").and_then(|v| v.as_str()), Some("cx32")); + // Hetzner config doesn't include health check path in current implementation + } +} diff --git a/src/platform/mod.rs b/src/platform/mod.rs new file mode 100644 index 00000000..63079626 --- /dev/null +++ b/src/platform/mod.rs @@ -0,0 +1,10 @@ +//! Platform module for Syncable platform integration +//! +//! This module provides: +//! - Session state management for tracking selected projects and organizations +//! - API client for interacting with the Syncable Platform API + +pub mod api; +pub mod session; + +pub use session::PlatformSession; diff --git a/src/platform/session.rs b/src/platform/session.rs new file mode 100644 index 00000000..4cb058af --- /dev/null +++ b/src/platform/session.rs @@ -0,0 +1,338 @@ +//! Platform session state management +//! +//! Manages the selected platform project/organization context that persists +//! across CLI sessions. Stored in `~/.syncable/platform-session.json`. + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::io; +use std::path::PathBuf; + +/// Platform session state - tracks selected project, organization, and environment +/// +/// This is a separate system from conversation persistence - it tracks +/// which platform project/org/environment the user has selected for platform operations. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct PlatformSession { + /// Selected platform project UUID + pub project_id: Option, + /// Human-readable project name + pub project_name: Option, + /// Organization UUID + pub org_id: Option, + /// Organization name + pub org_name: Option, + /// Selected environment UUID + pub environment_id: Option, + /// Human-readable environment name + pub environment_name: Option, + /// When the session was last updated + pub last_updated: Option>, +} + +impl PlatformSession { + /// Creates a new empty platform session + pub fn new() -> Self { + Self::default() + } + + /// Creates a platform session with a selected project + pub fn with_project( + project_id: String, + project_name: String, + org_id: String, + org_name: String, + ) -> Self { + Self { + project_id: Some(project_id), + project_name: Some(project_name), + org_id: Some(org_id), + org_name: Some(org_name), + environment_id: None, + environment_name: None, + last_updated: Some(Utc::now()), + } + } + + /// Creates a platform session with a selected project and environment + pub fn with_environment( + project_id: String, + project_name: String, + org_id: String, + org_name: String, + environment_id: String, + environment_name: String, + ) -> Self { + Self { + project_id: Some(project_id), + project_name: Some(project_name), + org_id: Some(org_id), + org_name: Some(org_name), + environment_id: Some(environment_id), + environment_name: Some(environment_name), + last_updated: Some(Utc::now()), + } + } + + /// Clears the selected project and environment + pub fn clear(&mut self) { + self.project_id = None; + self.project_name = None; + self.org_id = None; + self.org_name = None; + self.environment_id = None; + self.environment_name = None; + self.last_updated = Some(Utc::now()); + } + + /// Clears only the selected environment (keeps project) + pub fn clear_environment(&mut self) { + self.environment_id = None; + self.environment_name = None; + self.last_updated = Some(Utc::now()); + } + + /// Returns true if a project is currently selected + pub fn is_project_selected(&self) -> bool { + self.project_id.is_some() + } + + /// Returns true if an environment is currently selected + pub fn is_environment_selected(&self) -> bool { + self.environment_id.is_some() + } + + /// Returns the path to the platform session file + /// + /// Location: `~/.syncable/platform-session.json` + pub fn session_path() -> PathBuf { + dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(".syncable") + .join("platform-session.json") + } + + /// Load platform session from disk + /// + /// Returns Default if the file doesn't exist or can't be parsed. + pub fn load() -> io::Result { + let path = Self::session_path(); + + if !path.exists() { + return Ok(Self::default()); + } + + let content = fs::read_to_string(&path)?; + serde_json::from_str(&content).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + } + + /// Save platform session to disk + /// + /// Creates `~/.syncable/` directory if it doesn't exist. + pub fn save(&self) -> io::Result<()> { + let path = Self::session_path(); + + // Ensure directory exists (pattern from persistence.rs) + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + + let json = serde_json::to_string_pretty(self)?; + fs::write(&path, json)?; + Ok(()) + } + + /// Returns a display string for the current context + /// + /// Format: "[org/project/env]", "[org/project]", or "[no project selected]" + pub fn display_context(&self) -> String { + match (&self.org_name, &self.project_name, &self.environment_name) { + (Some(org), Some(project), Some(env)) => format!("[{}/{}/{}]", org, project, env), + (Some(org), Some(project), None) => format!("[{}/{}]", org, project), + (None, Some(project), Some(env)) => format!("[{}/{}]", project, env), + (None, Some(project), None) => format!("[{}]", project), + _ => "[no project selected]".to_string(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::tempdir; + + #[test] + fn test_new_session_is_empty() { + let session = PlatformSession::new(); + assert!(!session.is_project_selected()); + assert_eq!(session.display_context(), "[no project selected]"); + } + + #[test] + fn test_with_project() { + let session = PlatformSession::with_project( + "proj-123".to_string(), + "my-project".to_string(), + "org-456".to_string(), + "my-org".to_string(), + ); + + assert!(session.is_project_selected()); + assert_eq!(session.project_id, Some("proj-123".to_string())); + assert_eq!(session.display_context(), "[my-org/my-project]"); + } + + #[test] + fn test_clear() { + let mut session = PlatformSession::with_project( + "proj-123".to_string(), + "my-project".to_string(), + "org-456".to_string(), + "my-org".to_string(), + ); + + session.clear(); + assert!(!session.is_project_selected()); + assert!(session.last_updated.is_some()); // last_updated preserved + } + + #[test] + fn test_display_context() { + // Full context with environment + let session = PlatformSession::with_environment( + "id".to_string(), + "project".to_string(), + "oid".to_string(), + "org".to_string(), + "env-id".to_string(), + "prod".to_string(), + ); + assert_eq!(session.display_context(), "[org/project/prod]"); + + // Project only (no env) + let session = PlatformSession::with_project( + "id".to_string(), + "project".to_string(), + "oid".to_string(), + "org".to_string(), + ); + assert_eq!(session.display_context(), "[org/project]"); + + // Project only (no org) + let session = PlatformSession { + project_id: Some("id".to_string()), + project_name: Some("project".to_string()), + org_id: None, + org_name: None, + environment_id: None, + environment_name: None, + last_updated: None, + }; + assert_eq!(session.display_context(), "[project]"); + + // No project + let session = PlatformSession::new(); + assert_eq!(session.display_context(), "[no project selected]"); + } + + #[test] + fn test_with_environment() { + let session = PlatformSession::with_environment( + "proj-123".to_string(), + "my-project".to_string(), + "org-456".to_string(), + "my-org".to_string(), + "env-789".to_string(), + "production".to_string(), + ); + + assert!(session.is_project_selected()); + assert!(session.is_environment_selected()); + assert_eq!(session.project_id, Some("proj-123".to_string())); + assert_eq!(session.environment_id, Some("env-789".to_string())); + assert_eq!(session.environment_name, Some("production".to_string())); + assert_eq!(session.display_context(), "[my-org/my-project/production]"); + } + + #[test] + fn test_clear_environment() { + let mut session = PlatformSession::with_environment( + "proj-123".to_string(), + "my-project".to_string(), + "org-456".to_string(), + "my-org".to_string(), + "env-789".to_string(), + "production".to_string(), + ); + + assert!(session.is_environment_selected()); + + session.clear_environment(); + + assert!(session.is_project_selected()); // Project still selected + assert!(!session.is_environment_selected()); // Environment cleared + assert_eq!(session.display_context(), "[my-org/my-project]"); + } + + #[test] + fn test_is_environment_selected() { + let session = PlatformSession::new(); + assert!(!session.is_environment_selected()); + + let session = PlatformSession::with_project( + "proj-123".to_string(), + "my-project".to_string(), + "org-456".to_string(), + "my-org".to_string(), + ); + assert!(!session.is_environment_selected()); + + let session = PlatformSession::with_environment( + "proj-123".to_string(), + "my-project".to_string(), + "org-456".to_string(), + "my-org".to_string(), + "env-789".to_string(), + "staging".to_string(), + ); + assert!(session.is_environment_selected()); + } + + #[test] + fn test_save_and_load() { + // Use a temp directory for testing + let temp_dir = tempdir().unwrap(); + let temp_path = temp_dir.path().join("platform-session.json"); + + // Create and save a session + let session = PlatformSession::with_project( + "proj-789".to_string(), + "test-project".to_string(), + "org-abc".to_string(), + "test-org".to_string(), + ); + + // Write directly to temp path for testing + let json = serde_json::to_string_pretty(&session).unwrap(); + fs::write(&temp_path, json).unwrap(); + + // Read back + let content = fs::read_to_string(&temp_path).unwrap(); + let loaded: PlatformSession = serde_json::from_str(&content).unwrap(); + + assert_eq!(loaded.project_id, session.project_id); + assert_eq!(loaded.project_name, session.project_name); + assert_eq!(loaded.org_id, session.org_id); + assert_eq!(loaded.org_name, session.org_name); + } + + #[test] + fn test_load_missing_file() { + // When file doesn't exist, should return default + // (This test relies on the actual load() checking path.exists()) + // We can't easily test this without mocking, so we just verify default behavior + let default = PlatformSession::default(); + assert!(!default.is_project_selected()); + } +} diff --git a/src/wizard/cloud_provider_data.rs b/src/wizard/cloud_provider_data.rs new file mode 100644 index 00000000..1058078d --- /dev/null +++ b/src/wizard/cloud_provider_data.rs @@ -0,0 +1,226 @@ +//! Cloud provider regions and machine types for the deployment wizard +//! +//! This module contains static data for cloud provider options, +//! matching the frontend's cloudProviderData.ts for consistency. + +use crate::platform::api::types::CloudProvider; + +/// A cloud region/location option +#[derive(Debug, Clone)] +pub struct CloudRegion { + /// Region ID (e.g., "nbg1", "us-central1") + pub id: &'static str, + /// Human-readable name (e.g., "Nuremberg", "Iowa") + pub name: &'static str, + /// Geographic location (e.g., "Germany", "US Central") + pub location: &'static str, +} + +/// A machine/instance type option +#[derive(Debug, Clone)] +pub struct MachineType { + /// Machine type ID (e.g., "cx22", "e2-small") + pub id: &'static str, + /// Display name + pub name: &'static str, + /// Number of vCPUs (as string to handle fractional) + pub cpu: &'static str, + /// Memory amount (e.g., "4 GB") + pub memory: &'static str, + /// Optional description (e.g., "Shared Intel", "ARM64") + pub description: Option<&'static str>, +} + +// ============================================================================= +// Hetzner Cloud +// ============================================================================= + +/// Hetzner Cloud locations +pub static HETZNER_LOCATIONS: &[CloudRegion] = &[ + // Europe + CloudRegion { id: "nbg1", name: "Nuremberg", location: "Germany" }, + CloudRegion { id: "fsn1", name: "Falkenstein", location: "Germany" }, + CloudRegion { id: "hel1", name: "Helsinki", location: "Finland" }, + // Americas + CloudRegion { id: "ash", name: "Ashburn", location: "US East" }, + CloudRegion { id: "hil", name: "Hillsboro", location: "US West" }, + // Asia Pacific + CloudRegion { id: "sin", name: "Singapore", location: "Southeast Asia" }, +]; + +/// Hetzner Cloud server types (updated January 2026 naming) +pub static HETZNER_SERVER_TYPES: &[MachineType] = &[ + // Shared vCPU - CX Series (Intel/AMD cost-optimized) + MachineType { id: "cx23", name: "CX23", cpu: "2", memory: "4 GB", description: Some("Shared Intel/AMD") }, + MachineType { id: "cx33", name: "CX33", cpu: "4", memory: "8 GB", description: Some("Shared Intel/AMD") }, + MachineType { id: "cx43", name: "CX43", cpu: "8", memory: "16 GB", description: Some("Shared Intel/AMD") }, + MachineType { id: "cx53", name: "CX53", cpu: "16", memory: "32 GB", description: Some("Shared Intel/AMD") }, + // Shared vCPU - CPX Series (AMD regular) + MachineType { id: "cpx22", name: "CPX22", cpu: "2", memory: "4 GB", description: Some("Shared AMD") }, + MachineType { id: "cpx32", name: "CPX32", cpu: "4", memory: "8 GB", description: Some("Shared AMD") }, + MachineType { id: "cpx42", name: "CPX42", cpu: "8", memory: "16 GB", description: Some("Shared AMD") }, + MachineType { id: "cpx52", name: "CPX52", cpu: "12", memory: "24 GB", description: Some("Shared AMD") }, + MachineType { id: "cpx62", name: "CPX62", cpu: "16", memory: "32 GB", description: Some("Shared AMD") }, + // Dedicated vCPU - CCX Series (AMD) + MachineType { id: "ccx13", name: "CCX13", cpu: "2", memory: "8 GB", description: Some("Dedicated AMD") }, + MachineType { id: "ccx23", name: "CCX23", cpu: "4", memory: "16 GB", description: Some("Dedicated AMD") }, + MachineType { id: "ccx33", name: "CCX33", cpu: "8", memory: "32 GB", description: Some("Dedicated AMD") }, + MachineType { id: "ccx43", name: "CCX43", cpu: "16", memory: "64 GB", description: Some("Dedicated AMD") }, + MachineType { id: "ccx53", name: "CCX53", cpu: "32", memory: "128 GB", description: Some("Dedicated AMD") }, + MachineType { id: "ccx63", name: "CCX63", cpu: "48", memory: "192 GB", description: Some("Dedicated AMD") }, + // ARM - CAX Series (Ampere) + MachineType { id: "cax11", name: "CAX11", cpu: "2", memory: "4 GB", description: Some("ARM64 Ampere") }, + MachineType { id: "cax21", name: "CAX21", cpu: "4", memory: "8 GB", description: Some("ARM64 Ampere") }, + MachineType { id: "cax31", name: "CAX31", cpu: "8", memory: "16 GB", description: Some("ARM64 Ampere") }, + MachineType { id: "cax41", name: "CAX41", cpu: "16", memory: "32 GB", description: Some("ARM64 Ampere") }, +]; + +// ============================================================================= +// GCP (Google Cloud Platform) +// ============================================================================= + +/// GCP regions +pub static GCP_REGIONS: &[CloudRegion] = &[ + // Americas + CloudRegion { id: "us-central1", name: "Iowa", location: "US Central" }, + CloudRegion { id: "us-east1", name: "South Carolina", location: "US East" }, + CloudRegion { id: "us-east4", name: "Virginia", location: "US East" }, + CloudRegion { id: "us-west1", name: "Oregon", location: "US West" }, + CloudRegion { id: "us-west2", name: "Los Angeles", location: "US West" }, + // Europe + CloudRegion { id: "europe-west1", name: "Belgium", location: "Europe" }, + CloudRegion { id: "europe-west2", name: "London", location: "UK" }, + CloudRegion { id: "europe-west3", name: "Frankfurt", location: "Germany" }, + CloudRegion { id: "europe-west4", name: "Netherlands", location: "Europe" }, + CloudRegion { id: "europe-north1", name: "Finland", location: "Europe" }, + // Asia Pacific + CloudRegion { id: "asia-east1", name: "Taiwan", location: "Asia Pacific" }, + CloudRegion { id: "asia-northeast1", name: "Tokyo", location: "Japan" }, + CloudRegion { id: "asia-southeast1", name: "Singapore", location: "Southeast Asia" }, + CloudRegion { id: "australia-southeast1", name: "Sydney", location: "Australia" }, +]; + +/// GCP machine types (Compute Engine) +pub static GCP_MACHINE_TYPES: &[MachineType] = &[ + // E2 Series (Cost-optimized) + MachineType { id: "e2-micro", name: "e2-micro", cpu: "0.25", memory: "1 GB", description: Some("Shared-core") }, + MachineType { id: "e2-small", name: "e2-small", cpu: "0.5", memory: "2 GB", description: Some("Shared-core") }, + MachineType { id: "e2-medium", name: "e2-medium", cpu: "1", memory: "4 GB", description: Some("Shared-core") }, + MachineType { id: "e2-standard-2", name: "e2-standard-2", cpu: "2", memory: "8 GB", description: None }, + MachineType { id: "e2-standard-4", name: "e2-standard-4", cpu: "4", memory: "16 GB", description: None }, + MachineType { id: "e2-standard-8", name: "e2-standard-8", cpu: "8", memory: "32 GB", description: None }, + // N2 Series (Balanced) + MachineType { id: "n2-standard-2", name: "n2-standard-2", cpu: "2", memory: "8 GB", description: None }, + MachineType { id: "n2-standard-4", name: "n2-standard-4", cpu: "4", memory: "16 GB", description: None }, + MachineType { id: "n2-standard-8", name: "n2-standard-8", cpu: "8", memory: "32 GB", description: None }, +]; + +// ============================================================================= +// Helper Functions +// ============================================================================= + +/// Get regions for a cloud provider +pub fn get_regions_for_provider(provider: &CloudProvider) -> &'static [CloudRegion] { + match provider { + CloudProvider::Hetzner => HETZNER_LOCATIONS, + CloudProvider::Gcp => GCP_REGIONS, + _ => &[], // AWS, Azure not yet supported for Cloud Runner + } +} + +/// Get machine types for a cloud provider +pub fn get_machine_types_for_provider(provider: &CloudProvider) -> &'static [MachineType] { + match provider { + CloudProvider::Hetzner => HETZNER_SERVER_TYPES, + CloudProvider::Gcp => GCP_MACHINE_TYPES, + _ => &[], // AWS, Azure not yet supported for Cloud Runner + } +} + +/// Get default region for a provider +pub fn get_default_region(provider: &CloudProvider) -> &'static str { + match provider { + CloudProvider::Hetzner => "nbg1", + CloudProvider::Gcp => "us-central1", + _ => "", + } +} + +/// Get default machine type for a provider +pub fn get_default_machine_type(provider: &CloudProvider) -> &'static str { + match provider { + CloudProvider::Hetzner => "cx23", + CloudProvider::Gcp => "e2-small", + _ => "", + } +} + +/// Format region for display: "Nuremberg (Germany)" +pub fn format_region_display(region: &CloudRegion) -> String { + format!("{} ({})", region.name, region.location) +} + +/// Format machine type for display: "cx22 ยท 2 vCPU ยท 4 GB" +pub fn format_machine_type_display(machine: &MachineType) -> String { + let base = format!("{} ยท {} vCPU ยท {}", machine.name, machine.cpu, machine.memory); + if let Some(desc) = machine.description { + format!("{} ยท {}", base, desc) + } else { + base + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_hetzner_locations() { + assert!(!HETZNER_LOCATIONS.is_empty()); + assert!(HETZNER_LOCATIONS.iter().any(|r| r.id == "nbg1")); + } + + #[test] + fn test_hetzner_machine_types() { + assert!(!HETZNER_SERVER_TYPES.is_empty()); + assert!(HETZNER_SERVER_TYPES.iter().any(|m| m.id == "cx23")); + } + + #[test] + fn test_gcp_regions() { + assert!(!GCP_REGIONS.is_empty()); + assert!(GCP_REGIONS.iter().any(|r| r.id == "us-central1")); + } + + #[test] + fn test_gcp_machine_types() { + assert!(!GCP_MACHINE_TYPES.is_empty()); + assert!(GCP_MACHINE_TYPES.iter().any(|m| m.id == "e2-small")); + } + + #[test] + fn test_get_regions_for_provider() { + let hetzner_regions = get_regions_for_provider(&CloudProvider::Hetzner); + assert!(!hetzner_regions.is_empty()); + + let gcp_regions = get_regions_for_provider(&CloudProvider::Gcp); + assert!(!gcp_regions.is_empty()); + } + + #[test] + fn test_format_region_display() { + let region = &HETZNER_LOCATIONS[0]; + let display = format_region_display(region); + assert!(display.contains("Nuremberg")); + assert!(display.contains("Germany")); + } + + #[test] + fn test_format_machine_type_display() { + let machine = &HETZNER_SERVER_TYPES[0]; + let display = format_machine_type_display(machine); + assert!(display.contains("CX23")); + assert!(display.contains("2 vCPU")); + assert!(display.contains("4 GB")); + } +} diff --git a/src/wizard/cluster_selection.rs b/src/wizard/cluster_selection.rs new file mode 100644 index 00000000..cbfc6bf0 --- /dev/null +++ b/src/wizard/cluster_selection.rs @@ -0,0 +1,109 @@ +//! Cluster selection step for deployment wizard + +use crate::platform::api::types::ClusterSummary; +use crate::wizard::render::{display_step_header, status_indicator, wizard_render_config}; +use colored::Colorize; +use inquire::{InquireError, Select}; + +/// Result of cluster selection step +#[derive(Debug, Clone)] +pub enum ClusterSelectionResult { + /// User selected a cluster + Selected(ClusterSummary), + /// User wants to go back + Back, + /// User cancelled the wizard + Cancelled, +} + +/// Display cluster selection for Kubernetes deployments +pub fn select_cluster(clusters: &[ClusterSummary]) -> ClusterSelectionResult { + display_step_header( + 3, + "Select Cluster", + "Choose which Kubernetes cluster to deploy to.", + ); + + // Filter to only healthy clusters + let healthy_clusters: Vec<&ClusterSummary> = clusters.iter().filter(|c| c.is_healthy).collect(); + + if healthy_clusters.is_empty() { + println!( + "\n{}", + "No healthy clusters available. Provision a cluster in platform settings.".red() + ); + return ClusterSelectionResult::Cancelled; + } + + // Build options with status and region + let mut options: Vec = healthy_clusters + .iter() + .map(|c| { + format!( + "{} {} {}", + status_indicator(c.is_healthy), + c.name.cyan(), + c.region.dimmed() + ) + }) + .collect(); + + // Add back option + options.push("โ† Back to target selection".dimmed().to_string()); + + let selection = Select::new("Select cluster:", options.clone()) + .with_render_config(wizard_render_config()) + .with_help_message("โ†‘โ†“ to move, Enter to select, Esc to cancel") + .with_page_size(6) + .prompt(); + + match selection { + Ok(answer) => { + if answer.contains("Back") { + return ClusterSelectionResult::Back; + } + + // Find selected cluster by name + let selected = healthy_clusters + .iter() + .find(|c| answer.contains(&c.name)) + .copied(); + + match selected { + Some(cluster) => { + println!( + "\n{} Selected cluster: {} ({})", + "โœ“".green(), + cluster.name, + cluster.region + ); + ClusterSelectionResult::Selected(cluster.clone()) + } + None => ClusterSelectionResult::Cancelled, + } + } + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + println!("\n{}", "Wizard cancelled.".dimmed()); + ClusterSelectionResult::Cancelled + } + Err(_) => ClusterSelectionResult::Cancelled, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_cluster_selection_result_variants() { + let cluster = ClusterSummary { + id: "c1".to_string(), + name: "prod".to_string(), + region: "us-central1".to_string(), + is_healthy: true, + }; + let _ = ClusterSelectionResult::Selected(cluster); + let _ = ClusterSelectionResult::Back; + let _ = ClusterSelectionResult::Cancelled; + } +} diff --git a/src/wizard/config_form.rs b/src/wizard/config_form.rs new file mode 100644 index 00000000..90ccc635 --- /dev/null +++ b/src/wizard/config_form.rs @@ -0,0 +1,237 @@ +//! Deployment configuration form for the wizard + +use crate::analyzer::DiscoveredDockerfile; +use crate::platform::api::types::{CloudProvider, DeploymentTarget, WizardDeploymentConfig}; +use crate::wizard::render::display_step_header; +use colored::Colorize; +use inquire::{Confirm, InquireError, Text}; + +/// Result of config form step +#[derive(Debug, Clone)] +pub enum ConfigFormResult { + /// User completed the form + Completed(WizardDeploymentConfig), + /// User wants to go back + Back, + /// User cancelled the wizard + Cancelled, +} + +/// Collect deployment configuration details from user +/// +/// Region, machine type, Dockerfile path, and build context are already selected +/// in previous steps. This form collects service name, port, branch, public access, +/// health check, and auto-deploy settings. +#[allow(clippy::too_many_arguments)] +pub fn collect_config( + provider: CloudProvider, + target: DeploymentTarget, + cluster_id: Option, + registry_id: Option, + environment_id: &str, + dockerfile_path: &str, + build_context: &str, + discovered_dockerfile: &DiscoveredDockerfile, + region: Option, + machine_type: Option, + step_number: u8, +) -> ConfigFormResult { + display_step_header( + step_number, + "Configure Service", + "Provide details for your service deployment.", + ); + + // Show previously selected options + println!( + " {} Dockerfile: {}", + "โ”‚".dimmed(), + dockerfile_path.cyan() + ); + println!( + " {} Build context: {}", + "โ”‚".dimmed(), + build_context.cyan() + ); + if let Some(ref r) = region { + println!(" {} Region: {}", "โ”‚".dimmed(), r.cyan()); + } + if let Some(ref m) = machine_type { + println!(" {} Machine: {}", "โ”‚".dimmed(), m.cyan()); + } + println!(); + + // Pre-populate from discovery + let default_name = discovered_dockerfile.suggested_service_name.clone(); + let default_port = discovered_dockerfile.suggested_port.unwrap_or(8080); + + // Get current git branch for default + let default_branch = get_current_branch().unwrap_or_else(|| "main".to_string()); + + // Service name + let service_name = match Text::new("Service name:") + .with_default(&default_name) + .with_help_message("K8s-compatible name (lowercase, hyphens)") + .prompt() + { + Ok(name) => sanitize_service_name(&name), + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + return ConfigFormResult::Cancelled; + } + Err(_) => return ConfigFormResult::Cancelled, + }; + + // Port + let port_str = default_port.to_string(); + let port = match Text::new("Service port:") + .with_default(&port_str) + .with_help_message("Port your service listens on") + .prompt() + { + Ok(p) => p.parse::().unwrap_or(default_port), + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + return ConfigFormResult::Cancelled; + } + Err(_) => return ConfigFormResult::Cancelled, + }; + + // Branch + let branch = match Text::new("Git branch:") + .with_default(&default_branch) + .with_help_message("Branch to deploy from") + .prompt() + { + Ok(b) => b, + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + return ConfigFormResult::Cancelled; + } + Err(_) => return ConfigFormResult::Cancelled, + }; + + // Public access toggle (for Cloud Runner) + let is_public = if target == DeploymentTarget::CloudRunner { + println!(); + println!( + "{}", + "โ”€โ”€โ”€ Access Configuration โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€".dimmed() + ); + match Confirm::new("Enable public access?") + .with_default(true) + .with_help_message("Make service accessible via public IP/URL") + .prompt() + { + Ok(v) => v, + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + return ConfigFormResult::Cancelled; + } + Err(_) => return ConfigFormResult::Cancelled, + } + } else { + true // Default to public for K8s + }; + + // Health check (optional) + let health_check_path = if target == DeploymentTarget::CloudRunner { + match Confirm::new("Configure health check endpoint?") + .with_default(false) + .with_help_message("Optional HTTP health probe for your service") + .prompt() + { + Ok(true) => { + match Text::new("Health check path:") + .with_default("/health") + .with_help_message("e.g., /health, /healthz, /api/health") + .prompt() + { + Ok(path) => Some(path), + Err(InquireError::OperationCanceled) + | Err(InquireError::OperationInterrupted) => { + return ConfigFormResult::Cancelled; + } + Err(_) => return ConfigFormResult::Cancelled, + } + } + Ok(false) => None, + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + return ConfigFormResult::Cancelled; + } + Err(_) => return ConfigFormResult::Cancelled, + } + } else { + None + }; + + // Auto-deploy disabled by default (CI/CD not ready yet) + let auto_deploy = false; + + // Build the config + let config = WizardDeploymentConfig { + service_name: Some(service_name.clone()), + dockerfile_path: Some(dockerfile_path.to_string()), + build_context: Some(build_context.to_string()), + port: Some(port), + branch: Some(branch), + target: Some(target), + provider: Some(provider), + cluster_id, + registry_id, + environment_id: Some(environment_id.to_string()), + auto_deploy, + region, + machine_type, + is_public, + health_check_path, + }; + + println!("\n{} Configuration complete: {}", "โœ“".green(), service_name); + + ConfigFormResult::Completed(config) +} + +/// Get current git branch name +fn get_current_branch() -> Option { + std::process::Command::new("git") + .args(["rev-parse", "--abbrev-ref", "HEAD"]) + .output() + .ok() + .and_then(|output| { + if output.status.success() { + String::from_utf8(output.stdout) + .ok() + .map(|s| s.trim().to_string()) + } else { + None + } + }) +} + +/// Sanitize service name for K8s compatibility +fn sanitize_service_name(name: &str) -> String { + name.to_lowercase() + .chars() + .map(|c| if c.is_alphanumeric() || c == '-' { c } else { '-' }) + .collect::() + .trim_matches('-') + .to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_sanitize_service_name() { + assert_eq!(sanitize_service_name("My Service"), "my-service"); + assert_eq!(sanitize_service_name("foo_bar"), "foo-bar"); + assert_eq!(sanitize_service_name("--test--"), "test"); + assert_eq!(sanitize_service_name("API Server"), "api-server"); + } + + #[test] + fn test_config_form_result_variants() { + let config = WizardDeploymentConfig::default(); + let _ = ConfigFormResult::Completed(config); + let _ = ConfigFormResult::Back; + let _ = ConfigFormResult::Cancelled; + } +} diff --git a/src/wizard/dockerfile_selection.rs b/src/wizard/dockerfile_selection.rs new file mode 100644 index 00000000..c47961e8 --- /dev/null +++ b/src/wizard/dockerfile_selection.rs @@ -0,0 +1,355 @@ +//! Dockerfile selection step for the deployment wizard +//! +//! Provides smart Dockerfile discovery and selection with build context options. + +use crate::analyzer::DiscoveredDockerfile; +use crate::wizard::render::{display_step_header, wizard_render_config}; +use colored::Colorize; +use inquire::{Confirm, InquireError, Select, Text}; +use std::fmt; +use std::path::Path; + +/// Result of Dockerfile selection step +#[derive(Debug, Clone)] +pub enum DockerfileSelectionResult { + /// User selected a Dockerfile with build context + Selected { + dockerfile: DiscoveredDockerfile, + build_context: String, + }, + /// User wants the agent to create a Dockerfile + StartAgent(String), + /// User wants to go back + Back, + /// User cancelled the wizard + Cancelled, +} + +/// Build context options for the user to choose from +#[derive(Debug, Clone)] +enum BuildContextOption { + /// Directory containing the Dockerfile + DockerfileDirectory(String), + /// Repository root + RepositoryRoot, + /// Custom user-specified path + Custom, +} + +impl fmt::Display for BuildContextOption { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BuildContextOption::DockerfileDirectory(path) => { + write!(f, "Dockerfile's directory {}", path.dimmed()) + } + BuildContextOption::RepositoryRoot => { + write!(f, "Repository root {}", ".".dimmed()) + } + BuildContextOption::Custom => { + write!(f, "Custom path...") + } + } + } +} + +/// Wrapper for displaying Dockerfile options in the selection menu +struct DockerfileOption<'a> { + dockerfile: &'a DiscoveredDockerfile, + project_root: &'a Path, +} + +impl<'a> fmt::Display for DockerfileOption<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Get relative path from project root + let relative_path = self + .dockerfile + .path + .strip_prefix(self.project_root) + .map(|p| p.to_string_lossy().to_string()) + .unwrap_or_else(|_| self.dockerfile.path.to_string_lossy().to_string()); + + // Show: path โ†’ build_context + let build_context = if self.dockerfile.build_context == "." { + ". (root)".to_string() + } else { + self.dockerfile.build_context.clone() + }; + + write!( + f, + "{} {} {}", + relative_path, + "โ†’".dimmed(), + build_context.dimmed() + ) + } +} + +/// Select a Dockerfile from discovered Dockerfiles +/// +/// Handles three cases: +/// - Multiple Dockerfiles: Show selection menu +/// - Single Dockerfile: Auto-select with confirmation +/// - No Dockerfiles: Offer to start agent for creation +pub fn select_dockerfile( + dockerfiles: &[DiscoveredDockerfile], + project_root: &Path, +) -> DockerfileSelectionResult { + display_step_header( + 5, + "Select Dockerfile", + "Choose the Dockerfile to use for deployment.", + ); + + match dockerfiles.len() { + 0 => handle_no_dockerfiles(), + 1 => handle_single_dockerfile(&dockerfiles[0], project_root), + _ => handle_multiple_dockerfiles(dockerfiles, project_root), + } +} + +/// Handle case when no Dockerfiles are found +fn handle_no_dockerfiles() -> DockerfileSelectionResult { + println!( + "\n{} {}", + "โš ".yellow(), + "No Dockerfiles found in this project.".yellow() + ); + + match Confirm::new("Would you like the agent to help create one?") + .with_default(true) + .with_help_message("Start an AI-assisted session to generate a Dockerfile") + .prompt() + { + Ok(true) => { + let prompt = "Help me create a Dockerfile for this project. Analyze the codebase and suggest an appropriate Dockerfile with best practices for production deployment.".to_string(); + DockerfileSelectionResult::StartAgent(prompt) + } + Ok(false) => DockerfileSelectionResult::Cancelled, + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + DockerfileSelectionResult::Cancelled + } + Err(_) => DockerfileSelectionResult::Cancelled, + } +} + +/// Handle case when only one Dockerfile is found +fn handle_single_dockerfile( + dockerfile: &DiscoveredDockerfile, + project_root: &Path, +) -> DockerfileSelectionResult { + let relative_path = dockerfile + .path + .strip_prefix(project_root) + .map(|p| p.to_string_lossy().to_string()) + .unwrap_or_else(|_| dockerfile.path.to_string_lossy().to_string()); + + println!( + "\n{} Found: {}", + "โœ“".green(), + relative_path.cyan() + ); + + // Show additional info if available + if let Some(ref base) = dockerfile.base_image { + println!(" {} Base image: {}", "โ”‚".dimmed(), base.dimmed()); + } + if let Some(port) = dockerfile.suggested_port { + println!(" {} Suggested port: {}", "โ”‚".dimmed(), port.to_string().dimmed()); + } + + // Proceed to build context selection + select_build_context(dockerfile) +} + +/// Handle case when multiple Dockerfiles are found +fn handle_multiple_dockerfiles( + dockerfiles: &[DiscoveredDockerfile], + project_root: &Path, +) -> DockerfileSelectionResult { + println!( + "\n{} Found {} Dockerfiles:", + "โ„น".blue(), + dockerfiles.len().to_string().cyan() + ); + + // Create display options + let options: Vec = dockerfiles + .iter() + .map(|df| DockerfileOption { + dockerfile: df, + project_root, + }) + .collect(); + + // Build the selection menu + let selection = Select::new("Select Dockerfile:", options) + .with_render_config(wizard_render_config()) + .with_help_message("Use โ†‘/โ†“ to navigate, Enter to select") + .prompt(); + + match selection { + Ok(selected) => { + // Find the selected dockerfile by matching path + let selected_df = dockerfiles + .iter() + .find(|df| std::ptr::eq(*df, selected.dockerfile)) + .unwrap(); + select_build_context(selected_df) + } + Err(InquireError::OperationCanceled) => DockerfileSelectionResult::Back, + Err(InquireError::OperationInterrupted) => DockerfileSelectionResult::Cancelled, + Err(_) => DockerfileSelectionResult::Cancelled, + } +} + +/// Select build context for the chosen Dockerfile +fn select_build_context(dockerfile: &DiscoveredDockerfile) -> DockerfileSelectionResult { + println!(); + println!( + "{}", + "โ”€โ”€โ”€ Build Context โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€".dimmed() + ); + println!( + " {}", + "The build context is the directory sent to Docker during build.".dimmed() + ); + + // Compute dockerfile directory (default build context) + let dockerfile_dir = dockerfile + .path + .parent() + .map(|p| { + if p.as_os_str().is_empty() { + ".".to_string() + } else { + p.to_string_lossy().to_string() + } + }) + .unwrap_or_else(|| ".".to_string()); + + // Use the computed build_context from discovery as dockerfile directory display + let display_dir = if dockerfile.build_context.is_empty() || dockerfile.build_context == "." { + ".".to_string() + } else { + dockerfile.build_context.clone() + }; + + // Build options + let options = vec![ + BuildContextOption::DockerfileDirectory(display_dir.clone()), + BuildContextOption::RepositoryRoot, + BuildContextOption::Custom, + ]; + + let selection = Select::new("Build context:", options) + .with_render_config(wizard_render_config()) + .with_help_message("Select the directory to use as Docker build context") + .prompt(); + + match selection { + Ok(BuildContextOption::DockerfileDirectory(_)) => DockerfileSelectionResult::Selected { + dockerfile: dockerfile.clone(), + build_context: display_dir, + }, + Ok(BuildContextOption::RepositoryRoot) => DockerfileSelectionResult::Selected { + dockerfile: dockerfile.clone(), + build_context: ".".to_string(), + }, + Ok(BuildContextOption::Custom) => { + // Prompt for custom path + match Text::new("Custom build context path:") + .with_default(&dockerfile_dir) + .with_help_message("Relative path from repository root") + .prompt() + { + Ok(path) => DockerfileSelectionResult::Selected { + dockerfile: dockerfile.clone(), + build_context: path, + }, + Err(InquireError::OperationCanceled) => DockerfileSelectionResult::Back, + Err(InquireError::OperationInterrupted) => DockerfileSelectionResult::Cancelled, + Err(_) => DockerfileSelectionResult::Cancelled, + } + } + Err(InquireError::OperationCanceled) => DockerfileSelectionResult::Back, + Err(InquireError::OperationInterrupted) => DockerfileSelectionResult::Cancelled, + Err(_) => DockerfileSelectionResult::Cancelled, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::path::PathBuf; + + fn create_test_dockerfile(path: &str, build_context: &str) -> DiscoveredDockerfile { + DiscoveredDockerfile { + path: PathBuf::from(path), + build_context: build_context.to_string(), + suggested_service_name: "test-service".to_string(), + suggested_port: Some(8080), + base_image: Some("node:18".to_string()), + is_multistage: false, + environment: None, + } + } + + #[test] + fn test_dockerfile_option_display() { + let df = create_test_dockerfile("/project/services/api/Dockerfile", "services/api"); + let project_root = PathBuf::from("/project"); + let option = DockerfileOption { + dockerfile: &df, + project_root: &project_root, + }; + let display = format!("{}", option); + assert!(display.contains("services/api/Dockerfile")); + assert!(display.contains("โ†’")); + } + + #[test] + fn test_dockerfile_option_display_root() { + let df = create_test_dockerfile("/project/Dockerfile", "."); + let project_root = PathBuf::from("/project"); + let option = DockerfileOption { + dockerfile: &df, + project_root: &project_root, + }; + let display = format!("{}", option); + assert!(display.contains("Dockerfile")); + assert!(display.contains("(root)")); + } + + #[test] + fn test_build_context_option_display() { + let dir_option = BuildContextOption::DockerfileDirectory("services/api".to_string()); + assert!(format!("{}", dir_option).contains("services/api")); + + let root_option = BuildContextOption::RepositoryRoot; + assert!(format!("{}", root_option).contains(".")); + + let custom_option = BuildContextOption::Custom; + assert!(format!("{}", custom_option).contains("Custom")); + } + + #[test] + fn test_dockerfile_selection_result_variants() { + let df = create_test_dockerfile("/project/Dockerfile", "."); + + // Test Selected variant + let selected = DockerfileSelectionResult::Selected { + dockerfile: df.clone(), + build_context: ".".to_string(), + }; + matches!(selected, DockerfileSelectionResult::Selected { .. }); + + // Test StartAgent variant + let agent = DockerfileSelectionResult::StartAgent("prompt".to_string()); + matches!(agent, DockerfileSelectionResult::StartAgent(_)); + + // Test Back and Cancelled variants + let _ = DockerfileSelectionResult::Back; + let _ = DockerfileSelectionResult::Cancelled; + } +} diff --git a/src/wizard/environment_creation.rs b/src/wizard/environment_creation.rs new file mode 100644 index 00000000..103205f4 --- /dev/null +++ b/src/wizard/environment_creation.rs @@ -0,0 +1,312 @@ +//! Environment creation wizard for deployment targets +//! +//! Interactive wizard that guides users through creating a new environment +//! with target type selection (Kubernetes or Cloud Runner). + +use crate::platform::api::client::PlatformApiClient; +use crate::platform::api::types::{ClusterSummary, Environment}; +use crate::wizard::provider_selection::get_provider_deployment_statuses; +use crate::wizard::render::{display_step_header, wizard_render_config}; +use colored::Colorize; +use inquire::{InquireError, Select, Text}; + +/// Environment type for the API +/// "cluster" = Kubernetes cluster +/// "cloud" = Cloud Runner (serverless) +#[derive(Debug, Clone, PartialEq, Eq)] +enum EnvironmentType { + Cluster, + Cloud, +} + +impl EnvironmentType { + fn as_str(&self) -> &'static str { + match self { + EnvironmentType::Cluster => "cluster", + EnvironmentType::Cloud => "cloud", + } + } + + fn display_name(&self) -> &'static str { + match self { + EnvironmentType::Cluster => "Kubernetes", + EnvironmentType::Cloud => "Cloud Runner", + } + } +} + +/// Result of environment creation wizard +#[derive(Debug)] +pub enum EnvironmentCreationResult { + /// Environment created successfully + Created(Environment), + /// User cancelled the wizard + Cancelled, + /// An error occurred + Error(String), +} + +/// Run the environment creation wizard +/// +/// Guides user through: +/// 1. Choosing environment name +/// 2. Selecting target type (Kubernetes or Cloud Runner) +/// 3. If Kubernetes: selecting a cluster +pub async fn create_environment_wizard( + client: &PlatformApiClient, + project_id: &str, +) -> EnvironmentCreationResult { + display_step_header( + 1, + "Create Environment", + "Set up a new deployment environment for your project.", + ); + + // Step 1: Get environment name + let name = match Text::new("Environment name:") + .with_placeholder("e.g., production, staging, development") + .with_help_message("Choose a descriptive name for this environment") + .prompt() + { + Ok(name) => { + if name.trim().is_empty() { + println!("\n{}", "Environment name cannot be empty.".red()); + return EnvironmentCreationResult::Cancelled; + } + name.trim().to_string() + } + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + println!("\n{}", "Wizard cancelled.".dimmed()); + return EnvironmentCreationResult::Cancelled; + } + Err(e) => { + return EnvironmentCreationResult::Error(format!("Input error: {}", e)); + } + }; + + // Step 2: Select target type + display_step_header( + 2, + "Select Target Type", + "Choose how this environment will deploy services.", + ); + + let target_options = vec![ + format!( + "{} {}", + "Cloud Runner".cyan(), + "Fully managed, auto-scaling containers".dimmed() + ), + format!( + "{} {}", + "Kubernetes".cyan(), + "Deploy to your own K8s cluster".dimmed() + ), + ]; + + let target_selection = Select::new("Select target type:", target_options) + .with_render_config(wizard_render_config()) + .with_help_message("Cloud Runner: serverless, Kubernetes: full control") + .prompt(); + + let env_type = match target_selection { + Ok(answer) => { + if answer.contains("Cloud Runner") { + EnvironmentType::Cloud + } else { + EnvironmentType::Cluster + } + } + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + println!("\n{}", "Wizard cancelled.".dimmed()); + return EnvironmentCreationResult::Cancelled; + } + Err(e) => { + return EnvironmentCreationResult::Error(format!("Selection error: {}", e)); + } + }; + + println!( + "\n{} Target: {}", + "โœ“".green(), + env_type.display_name().bold() + ); + + // Step 3: If Kubernetes (cluster), select cluster + let cluster_id = if env_type == EnvironmentType::Cluster { + match select_cluster_for_env(client, project_id).await { + ClusterSelectionResult::Selected(id) => Some(id), + ClusterSelectionResult::NoClusters => { + println!( + "\n{}", + "No Kubernetes clusters available. Please provision a cluster first.".red() + ); + return EnvironmentCreationResult::Cancelled; + } + ClusterSelectionResult::Cancelled => { + return EnvironmentCreationResult::Cancelled; + } + ClusterSelectionResult::Error(e) => { + return EnvironmentCreationResult::Error(e); + } + } + } else { + None + }; + + // Create the environment via API + println!("\n{}", "Creating environment...".dimmed()); + + match client + .create_environment( + project_id, + &name, + env_type.as_str(), + cluster_id.as_deref(), + ) + .await + { + Ok(env) => { + println!( + "\n{} Environment {} created successfully!", + "โœ“".green().bold(), + env.name.bold() + ); + println!(" ID: {}", env.id.dimmed()); + println!(" Type: {}", env.environment_type); + if let Some(cid) = &env.cluster_id { + println!(" Cluster: {}", cid); + } + EnvironmentCreationResult::Created(env) + } + Err(e) => EnvironmentCreationResult::Error(format!("Failed to create environment: {}", e)), + } +} + +/// Result of cluster selection +enum ClusterSelectionResult { + Selected(String), + NoClusters, + Cancelled, + Error(String), +} + +/// Select a Kubernetes cluster from available clusters +async fn select_cluster_for_env( + client: &PlatformApiClient, + project_id: &str, +) -> ClusterSelectionResult { + display_step_header( + 3, + "Select Cluster", + "Choose a Kubernetes cluster for this environment.", + ); + + // Get available clusters + let clusters: Vec = + match get_available_clusters_for_project(client, project_id).await { + Ok(c) => c, + Err(e) => return ClusterSelectionResult::Error(e), + }; + + if clusters.is_empty() { + return ClusterSelectionResult::NoClusters; + } + + // Build options + let options: Vec = clusters + .iter() + .map(|c| { + let health = if c.is_healthy { + "healthy".green() + } else { + "unhealthy".red() + }; + format!("{} ({}) - {}", c.name.bold(), c.region.dimmed(), health) + }) + .collect(); + + let selection = Select::new("Select cluster:", options.clone()) + .with_render_config(wizard_render_config()) + .with_help_message("Choose the cluster to deploy to") + .prompt(); + + match selection { + Ok(answer) => { + // Find the selected cluster by matching the name at the start + let selected_name = answer.split(" (").next().unwrap_or(""); + if let Some(cluster) = clusters.iter().find(|c| c.name == selected_name) { + println!("\n{} Selected: {}", "โœ“".green(), cluster.name.bold()); + ClusterSelectionResult::Selected(cluster.id.clone()) + } else { + ClusterSelectionResult::Error("Failed to match selected cluster".to_string()) + } + } + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + println!("\n{}", "Wizard cancelled.".dimmed()); + ClusterSelectionResult::Cancelled + } + Err(e) => ClusterSelectionResult::Error(format!("Selection error: {}", e)), + } +} + +/// Get available clusters from all connected providers for a project +async fn get_available_clusters_for_project( + client: &PlatformApiClient, + project_id: &str, +) -> Result, String> { + // Get provider deployment statuses which include cluster info + let statuses = get_provider_deployment_statuses(client, project_id) + .await + .map_err(|e| format!("Failed to get provider statuses: {}", e))?; + + // Collect all clusters from connected providers + let mut all_clusters = Vec::new(); + for status in statuses { + if status.is_connected { + all_clusters.extend(status.clusters); + } + } + + Ok(all_clusters) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_environment_creation_result_variants() { + let created = EnvironmentCreationResult::Created(Environment { + id: "env-1".to_string(), + name: "test".to_string(), + project_id: "proj-1".to_string(), + environment_type: "cloud".to_string(), + cluster_id: None, + namespace: None, + description: None, + is_active: true, + created_at: None, + updated_at: None, + }); + assert!(matches!(created, EnvironmentCreationResult::Created(_))); + + let cancelled = EnvironmentCreationResult::Cancelled; + assert!(matches!(cancelled, EnvironmentCreationResult::Cancelled)); + + let error = EnvironmentCreationResult::Error("test error".to_string()); + assert!(matches!(error, EnvironmentCreationResult::Error(_))); + } + + #[test] + fn test_environment_type_as_str() { + assert_eq!(EnvironmentType::Cluster.as_str(), "cluster"); + assert_eq!(EnvironmentType::Cloud.as_str(), "cloud"); + } + + #[test] + fn test_environment_type_display_name() { + assert_eq!(EnvironmentType::Cluster.display_name(), "Kubernetes"); + assert_eq!(EnvironmentType::Cloud.display_name(), "Cloud Runner"); + } +} diff --git a/src/wizard/environment_selection.rs b/src/wizard/environment_selection.rs new file mode 100644 index 00000000..5d5e5420 --- /dev/null +++ b/src/wizard/environment_selection.rs @@ -0,0 +1,149 @@ +//! Environment selection step for the deployment wizard +//! +//! Prompts user to select an environment or create a new one. + +use crate::platform::api::types::Environment; +use crate::platform::api::PlatformApiClient; +use crate::wizard::render::{display_step_header, wizard_render_config}; +use colored::Colorize; +use inquire::{InquireError, Select}; +use std::fmt; + +/// Result of environment selection step +#[derive(Debug, Clone)] +pub enum EnvironmentSelectionResult { + /// User selected an environment + Selected(Environment), + /// User wants to create a new environment + CreateNew, + /// User cancelled the wizard + Cancelled, + /// An error occurred + Error(String), +} + +/// Wrapper for displaying environment options in the selection menu +struct EnvironmentOption { + environment: Environment, +} + +impl fmt::Display for EnvironmentOption { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{} {}", + self.environment.name.cyan(), + self.environment.environment_type.to_string().dimmed() + ) + } +} + +/// Option to create a new environment +struct CreateNewOption; + +impl fmt::Display for CreateNewOption { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", "+ Create new environment".bright_green()) + } +} + +/// Selection menu item that can be either an environment or create new +enum SelectionItem { + Environment(EnvironmentOption), + CreateNew(CreateNewOption), +} + +impl fmt::Display for SelectionItem { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SelectionItem::Environment(env) => env.fmt(f), + SelectionItem::CreateNew(create) => create.fmt(f), + } + } +} + +/// Prompt user to select an environment for deployment +pub async fn select_environment( + client: &PlatformApiClient, + project_id: &str, +) -> EnvironmentSelectionResult { + display_step_header( + 0, + "Select Environment", + "Choose the environment to deploy to.", + ); + + // Fetch environments + let environments = match client.list_environments(project_id).await { + Ok(envs) => envs, + Err(e) => { + return EnvironmentSelectionResult::Error(format!( + "Failed to fetch environments: {}", + e + )); + } + }; + + if environments.is_empty() { + println!( + "\n{} No environments found. Let's create one first.", + "โ„น".cyan() + ); + return EnvironmentSelectionResult::CreateNew; + } + + // Build selection options + let mut options: Vec = environments + .into_iter() + .map(|env| SelectionItem::Environment(EnvironmentOption { environment: env })) + .collect(); + + // Add create new option at the end + options.push(SelectionItem::CreateNew(CreateNewOption)); + + let selection = Select::new("Select environment:", options) + .with_render_config(wizard_render_config()) + .with_help_message("Use โ†‘/โ†“ to navigate, Enter to select") + .prompt(); + + match selection { + Ok(SelectionItem::Environment(env_opt)) => { + println!( + "\n{} Selected environment: {}", + "โœ“".green(), + env_opt.environment.name.cyan() + ); + EnvironmentSelectionResult::Selected(env_opt.environment) + } + Ok(SelectionItem::CreateNew(_)) => EnvironmentSelectionResult::CreateNew, + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + EnvironmentSelectionResult::Cancelled + } + Err(_) => EnvironmentSelectionResult::Cancelled, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_environment_selection_result_variants() { + let env = Environment { + id: "test-id".to_string(), + name: "prod".to_string(), + project_id: "proj-1".to_string(), + environment_type: "cloud".to_string(), + cluster_id: None, + namespace: None, + description: None, + is_active: true, + created_at: None, + updated_at: None, + }; + let _ = EnvironmentSelectionResult::Selected(env); + let _ = EnvironmentSelectionResult::CreateNew; + let _ = EnvironmentSelectionResult::Cancelled; + let _ = EnvironmentSelectionResult::Error("test".to_string()); + } +} diff --git a/src/wizard/infrastructure_selection.rs b/src/wizard/infrastructure_selection.rs new file mode 100644 index 00000000..347b9981 --- /dev/null +++ b/src/wizard/infrastructure_selection.rs @@ -0,0 +1,241 @@ +//! Infrastructure selection step for the deployment wizard +//! +//! Handles region and machine type selection for Cloud Runner deployments. + +use crate::platform::api::types::CloudProvider; +use crate::wizard::cloud_provider_data::{ + get_default_machine_type, get_default_region, get_machine_types_for_provider, + get_regions_for_provider, CloudRegion, MachineType, +}; +use crate::wizard::render::{display_step_header, wizard_render_config}; +use colored::Colorize; +use inquire::{InquireError, Select}; +use std::fmt; + +/// Result of infrastructure selection step +#[derive(Debug, Clone)] +pub enum InfrastructureSelectionResult { + /// User selected region and machine type + Selected { + region: String, + machine_type: String, + }, + /// User wants to go back + Back, + /// User cancelled the wizard + Cancelled, +} + +/// Wrapper for displaying region options in the selection menu +struct RegionOption<'a> { + region: &'a CloudRegion, +} + +impl<'a> fmt::Display for RegionOption<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{} {}", + self.region.id.cyan(), + format!("{} ({})", self.region.name, self.region.location).dimmed() + ) + } +} + +/// Wrapper for displaying machine type options in the selection menu +struct MachineTypeOption<'a> { + machine: &'a MachineType, +} + +impl<'a> fmt::Display for MachineTypeOption<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let specs = format!("{} vCPU ยท {}", self.machine.cpu, self.machine.memory); + let desc = self + .machine + .description + .map(|d| format!(" ยท {}", d)) + .unwrap_or_default(); + write!( + f, + "{} {}{}", + self.machine.name.cyan(), + specs.dimmed(), + desc.dimmed() + ) + } +} + +/// Select region and machine type for Cloud Runner deployment +pub fn select_infrastructure( + provider: &CloudProvider, + step_number: u8, +) -> InfrastructureSelectionResult { + // Select region first + let region = match select_region(provider, step_number) { + Some(r) => r, + None => return InfrastructureSelectionResult::Back, + }; + + // Then select machine type + match select_machine_type(provider, ®ion) { + Some(machine_type) => InfrastructureSelectionResult::Selected { + region, + machine_type, + }, + None => InfrastructureSelectionResult::Back, + } +} + +/// Select region/location for deployment +fn select_region(provider: &CloudProvider, step_number: u8) -> Option { + let provider_name = match provider { + CloudProvider::Hetzner => "Hetzner", + CloudProvider::Gcp => "GCP", + _ => "Cloud", + }; + + display_step_header( + step_number, + &format!("Select {} Region", provider_name), + "Choose the geographic location for your deployment.", + ); + + let regions = get_regions_for_provider(provider); + if regions.is_empty() { + println!( + "\n{} No regions available for this provider.", + "โš ".yellow() + ); + return None; + } + + let default_region = get_default_region(provider); + let default_index = regions + .iter() + .position(|r| r.id == default_region) + .unwrap_or(0); + + let options: Vec = regions.iter().map(|r| RegionOption { region: r }).collect(); + + let selection = Select::new("Select region:", options) + .with_render_config(wizard_render_config()) + .with_starting_cursor(default_index) + .with_help_message("Use โ†‘/โ†“ to navigate, Enter to select") + .prompt(); + + match selection { + Ok(selected) => { + println!( + "\n{} Selected region: {} ({})", + "โœ“".green(), + selected.region.name.cyan(), + selected.region.id + ); + Some(selected.region.id.to_string()) + } + Err(InquireError::OperationCanceled) => None, + Err(InquireError::OperationInterrupted) => None, + Err(_) => None, + } +} + +/// Select machine/instance type for deployment +fn select_machine_type(provider: &CloudProvider, _region: &str) -> Option { + println!(); + println!( + "{}", + "โ”€โ”€โ”€ Machine Type โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€".dimmed() + ); + println!( + " {}", + "Select the VM size for your deployment.".dimmed() + ); + + let machine_types = get_machine_types_for_provider(provider); + if machine_types.is_empty() { + println!( + "\n{} No machine types available for this provider.", + "โš ".yellow() + ); + return None; + } + + let default_machine = get_default_machine_type(provider); + let default_index = machine_types + .iter() + .position(|m| m.id == default_machine) + .unwrap_or(0); + + let options: Vec = machine_types + .iter() + .map(|m| MachineTypeOption { machine: m }) + .collect(); + + let selection = Select::new("Select machine type:", options) + .with_render_config(wizard_render_config()) + .with_starting_cursor(default_index) + .with_help_message("Smaller = cheaper, Larger = more resources") + .prompt(); + + match selection { + Ok(selected) => { + println!( + "\n{} Selected: {} ({} vCPU, {})", + "โœ“".green(), + selected.machine.name.cyan(), + selected.machine.cpu, + selected.machine.memory + ); + Some(selected.machine.id.to_string()) + } + Err(InquireError::OperationCanceled) => None, + Err(InquireError::OperationInterrupted) => None, + Err(_) => None, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_region_option_display() { + let region = CloudRegion { + id: "nbg1", + name: "Nuremberg", + location: "Germany", + }; + let option = RegionOption { region: ®ion }; + let display = format!("{}", option); + assert!(display.contains("nbg1")); + assert!(display.contains("Nuremberg")); + } + + #[test] + fn test_machine_type_option_display() { + let machine = MachineType { + id: "cx22", + name: "CX22", + cpu: "2", + memory: "4 GB", + description: Some("Shared Intel"), + }; + let option = MachineTypeOption { machine: &machine }; + let display = format!("{}", option); + assert!(display.contains("CX22")); + assert!(display.contains("2 vCPU")); + assert!(display.contains("4 GB")); + } + + #[test] + fn test_infrastructure_selection_result_variants() { + let selected = InfrastructureSelectionResult::Selected { + region: "nbg1".to_string(), + machine_type: "cx22".to_string(), + }; + matches!(selected, InfrastructureSelectionResult::Selected { .. }); + + let _ = InfrastructureSelectionResult::Back; + let _ = InfrastructureSelectionResult::Cancelled; + } +} diff --git a/src/wizard/mod.rs b/src/wizard/mod.rs new file mode 100644 index 00000000..8d2c15f6 --- /dev/null +++ b/src/wizard/mod.rs @@ -0,0 +1,43 @@ +//! Interactive deployment wizard for configuring new services +//! +//! Provides a step-by-step TUI wizard for deploying services to the Syncable platform. + +mod cloud_provider_data; +mod cluster_selection; +mod config_form; +mod dockerfile_selection; +mod environment_creation; +mod environment_selection; +mod infrastructure_selection; +mod orchestrator; +mod provider_selection; +pub mod recommendations; +mod registry_provisioning; +mod registry_selection; +mod render; +mod repository_selection; +mod target_selection; + +pub use cloud_provider_data::{ + get_default_machine_type, get_default_region, get_machine_types_for_provider, + get_regions_for_provider, CloudRegion, MachineType, +}; +pub use cluster_selection::{select_cluster, ClusterSelectionResult}; +pub use config_form::{collect_config, ConfigFormResult}; +pub use dockerfile_selection::{select_dockerfile, DockerfileSelectionResult}; +pub use environment_creation::{create_environment_wizard, EnvironmentCreationResult}; +pub use environment_selection::{select_environment, EnvironmentSelectionResult}; +pub use infrastructure_selection::{select_infrastructure, InfrastructureSelectionResult}; +pub use orchestrator::{run_wizard, WizardResult}; +pub use provider_selection::{ + get_provider_deployment_statuses, select_provider, ProviderSelectionResult, +}; +pub use registry_provisioning::{provision_registry, RegistryProvisioningResult}; +pub use registry_selection::{select_registry, RegistrySelectionResult}; +pub use repository_selection::{select_repository, RepositorySelectionResult}; +pub use recommendations::{ + recommend_deployment, DeploymentRecommendation, MachineOption, ProviderOption, + RecommendationAlternatives, RecommendationInput, RegionOption, +}; +pub use render::{count_badge, display_step_header, status_indicator, wizard_render_config}; +pub use target_selection::{select_target, TargetSelectionResult}; diff --git a/src/wizard/orchestrator.rs b/src/wizard/orchestrator.rs new file mode 100644 index 00000000..b6d6aa57 --- /dev/null +++ b/src/wizard/orchestrator.rs @@ -0,0 +1,547 @@ +//! Wizard orchestration - ties all steps together + +use crate::analyzer::discover_dockerfiles_for_deployment; +use crate::platform::api::types::{ + build_cloud_runner_config, ConnectRepositoryRequest, CreateDeploymentConfigRequest, + DeploymentTarget, ProjectRepository, TriggerDeploymentRequest, WizardDeploymentConfig, +}; +use crate::platform::api::PlatformApiClient; +use crate::wizard::{ + collect_config, get_provider_deployment_statuses, provision_registry, select_cluster, + select_dockerfile, select_infrastructure, select_provider, select_registry, select_repository, + select_target, ClusterSelectionResult, ConfigFormResult, DockerfileSelectionResult, + InfrastructureSelectionResult, ProviderSelectionResult, RegistryProvisioningResult, + RegistrySelectionResult, RepositorySelectionResult, TargetSelectionResult, +}; +use colored::Colorize; +use inquire::{Confirm, InquireError}; +use std::path::Path; + +/// Deployment result with task ID for tracking +#[derive(Debug, Clone)] +pub struct DeploymentInfo { + /// The deployment config ID + pub config_id: String, + /// Backstage task ID for tracking progress + pub task_id: String, + /// Service name that was deployed + pub service_name: String, +} + +/// Result of running the wizard +#[derive(Debug)] +pub enum WizardResult { + /// Wizard completed and deployment triggered + Deployed(DeploymentInfo), + /// Wizard completed successfully (config created but not deployed) + Success(WizardDeploymentConfig), + /// User wants to start agent to create Dockerfile + StartAgent(String), + /// User cancelled the wizard + Cancelled, + /// An error occurred + Error(String), +} + +/// Run the deployment wizard +pub async fn run_wizard( + client: &PlatformApiClient, + project_id: &str, + environment_id: &str, + project_path: &Path, +) -> WizardResult { + println!(); + println!( + "{}", + "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•".bright_cyan() + ); + println!( + "{}", + " Deployment Wizard " + .bright_cyan() + .bold() + ); + println!( + "{}", + "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•".bright_cyan() + ); + + // Step 0: Repository selection (auto-detect or ask) + let repository = match select_repository(client, project_id, project_path).await { + RepositorySelectionResult::Selected(repo) => repo, + RepositorySelectionResult::ConnectNew(available) => { + // Connect the repository first + println!("{} Connecting repository...", "โ†’".cyan()); + + // Extract owner from full_name if not provided + let owner = available + .owner + .clone() + .unwrap_or_else(|| available.full_name.split('/').next().unwrap_or("").to_string()); + + let connect_request = ConnectRepositoryRequest { + project_id: project_id.to_string(), + repository_id: available.id, + repository_name: available.name.clone(), + repository_full_name: available.full_name.clone(), + repository_owner: owner.clone(), + repository_private: available.private, + default_branch: available.default_branch.clone().or(Some("main".to_string())), + connection_type: Some("app".to_string()), + github_installation_id: available.installation_id, + repository_type: Some("application".to_string()), + }; + match client.connect_repository(&connect_request).await { + Ok(response) => { + println!("{} Repository connected!", "โœ“".green()); + // Construct ProjectRepository from the response and available info + ProjectRepository { + id: response.id, + project_id: response.project_id, + repository_id: response.repository_id, + repository_name: available.name, + repository_full_name: response.repository_full_name, + repository_owner: owner, + repository_private: available.private, + default_branch: available.default_branch, + is_active: response.is_active, + connection_type: Some("app".to_string()), + repository_type: Some("application".to_string()), + is_primary_git_ops: None, + github_installation_id: available.installation_id, + user_id: None, + created_at: None, + updated_at: None, + } + } + Err(e) => { + return WizardResult::Error(format!("Failed to connect repository: {}", e)); + } + } + } + RepositorySelectionResult::NeedsGitHubApp { installation_url, org_name } => { + println!( + "\n{} Please install the Syncable GitHub App for organization '{}' first.", + "โš ".yellow(), + org_name.cyan() + ); + println!("Installation URL: {}", installation_url); + return WizardResult::Cancelled; + } + RepositorySelectionResult::NoInstallations { installation_url } => { + println!( + "\n{} No GitHub App installations found. Please install the app first.", + "โš ".yellow() + ); + println!("Installation URL: {}", installation_url); + return WizardResult::Cancelled; + } + RepositorySelectionResult::NoRepositories => { + return WizardResult::Error( + "No repositories available. Please install the Syncable GitHub App first." + .to_string(), + ); + } + RepositorySelectionResult::Cancelled => return WizardResult::Cancelled, + RepositorySelectionResult::Error(e) => return WizardResult::Error(e), + }; + + // Step 1: Provider selection + let provider_statuses = match get_provider_deployment_statuses(client, project_id).await { + Ok(s) => s, + Err(e) => { + return WizardResult::Error(format!("Failed to fetch provider status: {}", e)); + } + }; + + let provider = match select_provider(&provider_statuses) { + ProviderSelectionResult::Selected(p) => p, + ProviderSelectionResult::Cancelled => return WizardResult::Cancelled, + }; + + // Get status for selected provider + let provider_status = provider_statuses + .iter() + .find(|s| s.provider == provider) + .expect("Selected provider must exist in statuses"); + + // Step 2: Target selection (with back navigation) + let target = match select_target(provider_status) { + TargetSelectionResult::Selected(t) => t, + TargetSelectionResult::Back => { + // Restart from provider selection + return Box::pin(run_wizard(client, project_id, environment_id, project_path)).await; + } + TargetSelectionResult::Cancelled => return WizardResult::Cancelled, + }; + + // Step 3: Infrastructure selection for Cloud Runner OR Cluster selection for K8s + let (cluster_id, region, machine_type) = if target == DeploymentTarget::CloudRunner { + // Cloud Runner: Select region and machine type + match select_infrastructure(&provider, 3) { + InfrastructureSelectionResult::Selected { + region, + machine_type, + } => (None, Some(region), Some(machine_type)), + InfrastructureSelectionResult::Back => { + // Go back (restart wizard for simplicity) + return Box::pin(run_wizard(client, project_id, environment_id, project_path)).await; + } + InfrastructureSelectionResult::Cancelled => return WizardResult::Cancelled, + } + } else { + // Kubernetes: Select cluster + match select_cluster(&provider_status.clusters) { + ClusterSelectionResult::Selected(c) => (Some(c.id), None, None), + ClusterSelectionResult::Back => { + // Go back to target selection (restart wizard for simplicity) + return Box::pin(run_wizard(client, project_id, environment_id, project_path)) + .await; + } + ClusterSelectionResult::Cancelled => return WizardResult::Cancelled, + } + }; + + // Step 4: Registry selection + let registry_id = loop { + match select_registry(&provider_status.registries) { + RegistrySelectionResult::Selected(r) => break Some(r.id), + RegistrySelectionResult::ProvisionNew => { + // Get cluster info for provisioning + let (prov_cluster_id, prov_cluster_name, prov_region) = + if let Some(ref cid) = cluster_id { + // Use selected cluster + let cluster = provider_status + .clusters + .iter() + .find(|c| c.id == *cid) + .expect("Selected cluster must exist"); + (cid.clone(), cluster.name.clone(), cluster.region.clone()) + } else { + // For Cloud Runner, use first available cluster for registry provisioning + if let Some(cluster) = provider_status.clusters.first() { + ( + cluster.id.clone(), + cluster.name.clone(), + cluster.region.clone(), + ) + } else { + return WizardResult::Error( + "No cluster available for registry provisioning".to_string(), + ); + } + }; + + // Provision the registry + match provision_registry( + client, + project_id, + &prov_cluster_id, + &prov_cluster_name, + provider.clone(), + &prov_region, + None, // GCP project ID resolved by backend + ) + .await + { + RegistryProvisioningResult::Success(registry) => { + break Some(registry.id); + } + RegistryProvisioningResult::Cancelled => { + return WizardResult::Cancelled; + } + RegistryProvisioningResult::Error(e) => { + eprintln!("{} {}", "Registry provisioning failed:".red(), e); + // Allow retry - loop back to selection + continue; + } + } + } + RegistrySelectionResult::Back => { + // Go back (restart wizard for simplicity) + return Box::pin(run_wizard(client, project_id, environment_id, project_path)).await; + } + RegistrySelectionResult::Cancelled => return WizardResult::Cancelled, + } + }; + + // Step 5: Dockerfile selection + let dockerfiles = discover_dockerfiles_for_deployment(project_path).unwrap_or_default(); + let (selected_dockerfile, build_context) = match select_dockerfile(&dockerfiles, project_path) { + DockerfileSelectionResult::Selected { + dockerfile, + build_context, + } => (dockerfile, build_context), + DockerfileSelectionResult::StartAgent(prompt) => { + return WizardResult::StartAgent(prompt); + } + DockerfileSelectionResult::Back => { + // Go back (restart wizard for simplicity) + return Box::pin(run_wizard(client, project_id, environment_id, project_path)).await; + } + DockerfileSelectionResult::Cancelled => return WizardResult::Cancelled, + }; + + // Construct dockerfile path from build_context and filename + // This is more robust than strip_prefix which can have path matching edge cases + // Docker's -f flag expects path relative to repo root (where docker is invoked) + let dockerfile_name = selected_dockerfile + .path + .file_name() + .map(|n| n.to_string_lossy().to_string()) + .unwrap_or_else(|| "Dockerfile".to_string()); + + let dockerfile_path = if build_context == "." || build_context.is_empty() { + dockerfile_name.clone() // Dockerfile at repo root + } else { + format!("{}/{}", build_context, dockerfile_name) // e.g., "services/foo/Dockerfile" + }; + + log::debug!( + "Dockerfile path: {}, build_context: {}, dockerfile_name: {}", + dockerfile_path, + build_context, + dockerfile_name + ); + + // Step 6: Config form + let config = match collect_config( + provider.clone(), + target.clone(), + cluster_id.clone(), + registry_id.clone(), + environment_id, + &dockerfile_path, + &build_context, + &selected_dockerfile, + region.clone(), + machine_type.clone(), + 6, + ) { + ConfigFormResult::Completed(config) => config, + ConfigFormResult::Back => { + // Restart wizard + return Box::pin(run_wizard(client, project_id, environment_id, project_path)).await; + } + ConfigFormResult::Cancelled => return WizardResult::Cancelled, + }; + + // Show summary + display_summary(&config); + + // Step 7: Confirm and deploy + println!(); + let should_deploy = match Confirm::new("Deploy now?") + .with_default(true) + .with_help_message("This will create the deployment configuration and start the deployment") + .prompt() + { + Ok(v) => v, + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + return WizardResult::Cancelled; + } + Err(_) => return WizardResult::Cancelled, + }; + + if !should_deploy { + println!("{}", "Deployment skipped. Configuration saved.".dimmed()); + return WizardResult::Success(config); + } + + // Create deployment configuration + println!(); + println!("{}", "Creating deployment configuration...".dimmed()); + + let deploy_request = CreateDeploymentConfigRequest { + project_id: project_id.to_string(), + service_name: config.service_name.clone().unwrap_or_default(), + repository_id: repository.repository_id, + repository_full_name: repository.repository_full_name.clone(), + // Send both field name variants for backend compatibility + dockerfile_path: config.dockerfile_path.clone(), + dockerfile: config.dockerfile_path.clone(), // Alias + build_context: config.build_context.clone(), + context: config.build_context.clone(), // Alias + port: config.port.unwrap_or(8080) as i32, + branch: config.branch.clone().unwrap_or_else(|| "main".to_string()), + target_type: target.as_str().to_string(), + cloud_provider: provider.as_str().to_string(), + environment_id: environment_id.to_string(), + cluster_id: cluster_id.clone(), + registry_id: registry_id.clone(), + auto_deploy_enabled: config.auto_deploy, + is_public: Some(config.is_public), + cloud_runner_config: if target == DeploymentTarget::CloudRunner { + Some(build_cloud_runner_config( + &provider, + region.as_deref().unwrap_or(""), + machine_type.as_deref().unwrap_or(""), + config.is_public, + config.health_check_path.as_deref(), + )) + } else { + None + }, + }; + + // Debug output - show key fields being sent + log::debug!("CreateDeploymentConfigRequest fields:"); + log::debug!(" projectId: {}", deploy_request.project_id); + log::debug!(" serviceName: {}", deploy_request.service_name); + log::debug!(" environmentId: {}", deploy_request.environment_id); + log::debug!(" repositoryId: {}", deploy_request.repository_id); + log::debug!(" repositoryFullName: {}", deploy_request.repository_full_name); + log::debug!(" dockerfilePath: {:?}", deploy_request.dockerfile_path); + log::debug!(" buildContext: {:?}", deploy_request.build_context); + log::debug!(" targetType: {}", deploy_request.target_type); + log::debug!(" cloudProvider: {}", deploy_request.cloud_provider); + log::debug!(" port: {}", deploy_request.port); + log::debug!(" branch: {}", deploy_request.branch); + if let Some(ref config) = deploy_request.cloud_runner_config { + log::debug!(" cloudRunnerConfig: {}", config); + } + + let deployment_config = match client.create_deployment_config(&deploy_request).await { + Ok(config) => config, + Err(e) => { + return WizardResult::Error(format!("Failed to create deployment config: {}", e)); + } + }; + + println!( + "{} Deployment configuration created: {}", + "โœ“".green(), + deployment_config.id.dimmed() + ); + log::debug!(" Config ID: {}", deployment_config.id); + log::debug!(" Service Name: {}", deployment_config.service_name); + log::debug!(" Environment ID: {}", deployment_config.environment_id); + + // Trigger deployment + println!("{}", "Triggering deployment...".dimmed()); + + let trigger_request = TriggerDeploymentRequest { + project_id: project_id.to_string(), + config_id: deployment_config.id.clone(), + commit_sha: None, // Use latest from branch + }; + + // Debug: Show trigger request + log::debug!( + "Trigger request: projectId={}, configId={}", + trigger_request.project_id, + trigger_request.config_id + ); + + match client.trigger_deployment(&trigger_request).await { + Ok(response) => { + log::info!( + "Deployment triggered successfully: taskId={}, status={}, message={}", + response.backstage_task_id, + response.status, + response.message + ); + + println!(); + println!( + "{}", + "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•".bright_green() + ); + println!( + "{} Deployment started!", + "โœ“".bright_green().bold() + ); + println!( + "{}", + "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•".bright_green() + ); + println!(); + println!(" Service: {}", config.service_name.as_deref().unwrap_or("").cyan()); + println!(" Task ID: {}", response.backstage_task_id.dimmed()); + println!(" Status: {}", response.status.yellow()); + println!(); + println!( + "{}", + "Track progress: sync-ctl deploy status ".dimmed() + ); + println!(); + + WizardResult::Deployed(DeploymentInfo { + config_id: deployment_config.id, + task_id: response.backstage_task_id, + service_name: config.service_name.unwrap_or_default(), + }) + } + Err(e) => { + log::error!("Failed to trigger deployment: {}", e); + eprintln!( + "\n{} {} {}\n", + "โœ—".red().bold(), + "Deployment trigger failed:".red().bold(), + e + ); + WizardResult::Error(format!("Failed to trigger deployment: {}", e)) + } + } +} + +/// Display a summary of the deployment configuration +fn display_summary(config: &WizardDeploymentConfig) { + println!(); + println!( + "{}", + "โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€".dimmed() + ); + println!("{}", " Deployment Summary ".bright_green().bold()); + println!( + "{}", + "โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€".dimmed() + ); + + if let Some(ref name) = config.service_name { + println!(" Service: {}", name.cyan()); + } + if let Some(ref target) = config.target { + println!(" Target: {}", target.display_name()); + } + if let Some(ref provider) = config.provider { + println!(" Provider: {:?}", provider); + } + if let Some(ref region) = config.region { + println!(" Region: {}", region.cyan()); + } + if let Some(ref machine) = config.machine_type { + println!(" Machine: {}", machine.cyan()); + } + if let Some(ref branch) = config.branch { + println!(" Branch: {}", branch); + } + if let Some(port) = config.port { + println!(" Port: {}", port); + } + println!( + " Public: {}", + if config.is_public { + "Yes".green() + } else { + "No".yellow() + } + ); + if let Some(ref health) = config.health_check_path { + println!(" Health check: {}", health.cyan()); + } + println!( + " Auto-deploy: {}", + if config.auto_deploy { + "Yes".green() + } else { + "No".yellow() + } + ); + + println!( + "{}", + "โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€".dimmed() + ); + println!(); +} diff --git a/src/wizard/provider_selection.rs b/src/wizard/provider_selection.rs new file mode 100644 index 00000000..5e2cbbce --- /dev/null +++ b/src/wizard/provider_selection.rs @@ -0,0 +1,316 @@ +//! Provider selection step for deployment wizard + +use crate::platform::api::{ + types::{ + CloudProvider, ClusterStatus, ClusterSummary, ProviderDeploymentStatus, RegistryStatus, + RegistrySummary, + }, + PlatformApiClient, +}; +use crate::wizard::render::{display_step_header, status_indicator, wizard_render_config}; +use colored::Colorize; +use inquire::{InquireError, Select}; +use std::collections::HashMap; + +/// Get deployment status for all providers +/// +/// Queries the platform to determine which providers are connected and what +/// resources (clusters, registries) are available for each. +pub async fn get_provider_deployment_statuses( + client: &PlatformApiClient, + project_id: &str, +) -> Result, crate::platform::api::PlatformApiError> { + // Get all cloud credentials for the project (determines connectivity) + let credentials = client + .list_cloud_credentials_for_project(project_id) + .await + .unwrap_or_default(); + + // Build set of connected providers from credentials + let connected_providers: std::collections::HashSet = credentials + .iter() + .map(|c| c.provider.to_lowercase()) + .collect(); + + // Get all clusters and registries for the project + let clusters = client + .list_clusters_for_project(project_id) + .await + .unwrap_or_default(); + let registries = client + .list_registries_for_project(project_id) + .await + .unwrap_or_default(); + + // Group by provider + let mut provider_clusters: HashMap> = HashMap::new(); + let mut provider_registries: HashMap> = HashMap::new(); + + for cluster in clusters { + let summary = ClusterSummary { + id: cluster.id, + name: cluster.name, + region: cluster.region, + is_healthy: cluster.status == ClusterStatus::Running, + }; + provider_clusters + .entry(cluster.provider) + .or_default() + .push(summary); + } + + for registry in registries { + let summary = RegistrySummary { + id: registry.id, + name: registry.name, + region: registry.region, + is_ready: registry.status == RegistryStatus::Ready, + }; + provider_registries + .entry(registry.cloud_provider) + .or_default() + .push(summary); + } + + // Build status for each supported provider + // Available providers first, then coming soon providers + let providers = [ + CloudProvider::Gcp, + CloudProvider::Hetzner, + CloudProvider::Aws, + CloudProvider::Azure, + CloudProvider::Scaleway, + CloudProvider::Cyso, + ]; + let mut statuses = Vec::new(); + + for provider in providers { + let clusters = provider_clusters.remove(&provider).unwrap_or_default(); + let registries = provider_registries.remove(&provider).unwrap_or_default(); + + // Provider is connected if it has cloud credentials (NOT just resources) + let is_connected = connected_providers.contains(provider.as_str()); + + // Cloud Runner available for GCP and Hetzner when connected + let cloud_runner_available = + is_connected && matches!(provider, CloudProvider::Gcp | CloudProvider::Hetzner); + + let summary = build_status_summary(&clusters, ®istries, cloud_runner_available); + + statuses.push(ProviderDeploymentStatus { + provider, + is_connected, + clusters, + registries, + cloud_runner_available, + summary, + }); + } + + Ok(statuses) +} + +/// Build a human-readable summary string for a provider +fn build_status_summary( + clusters: &[ClusterSummary], + registries: &[RegistrySummary], + cloud_runner: bool, +) -> String { + let mut parts = Vec::new(); + + if cloud_runner { + parts.push("Cloud Run".to_string()); + } + + let healthy_clusters = clusters.iter().filter(|c| c.is_healthy).count(); + if healthy_clusters > 0 { + parts.push(format!( + "{} cluster{}", + healthy_clusters, + if healthy_clusters == 1 { "" } else { "s" } + )); + } + + let ready_registries = registries.iter().filter(|r| r.is_ready).count(); + if ready_registries > 0 { + parts.push(format!( + "{} registr{}", + ready_registries, + if ready_registries == 1 { "y" } else { "ies" } + )); + } + + if parts.is_empty() { + "Not connected".to_string() + } else { + parts.join(", ") + } +} + +/// Result of provider selection step +#[derive(Debug, Clone)] +pub enum ProviderSelectionResult { + /// User selected a provider + Selected(CloudProvider), + /// User cancelled the wizard + Cancelled, +} + +/// Display provider selection and prompt user to choose +pub fn select_provider(statuses: &[ProviderDeploymentStatus]) -> ProviderSelectionResult { + display_step_header( + 1, + "Select Provider", + "Choose which cloud provider to deploy to. You'll need to connect providers in the platform settings first.", + ); + + // Build options with status indicators + let options: Vec = statuses + .iter() + .map(|s| { + let name = format!("{:?}", s.provider); + // Check availability first - unavailable providers show "Coming Soon" + if !s.provider.is_available() { + format!("โ—‹ {} {}", name.dimmed(), "(Coming Soon)".yellow()) + } else { + let indicator = status_indicator(s.is_connected); + if s.is_connected { + format!("{} {} {}", indicator, name, s.summary.dimmed()) + } else { + format!("{} {} {}", indicator, name.dimmed(), "Not connected".dimmed()) + } + } + }) + .collect(); + + // Find available AND connected providers for validation + let available_connected_indices: Vec = statuses + .iter() + .enumerate() + .filter(|(_, s)| s.provider.is_available() && s.is_connected) + .map(|(i, _)| i) + .collect(); + + if available_connected_indices.is_empty() { + println!( + "\n{}", + "No providers connected. Connect a cloud provider in platform settings first.".red() + ); + println!( + " {}", + "Visit: https://app.syncable.dev/integrations".dimmed() + ); + println!( + " {}", + "Note: GCP and Hetzner are currently available. AWS, Azure, Scaleway, and Cyso Cloud are coming soon.".dimmed() + ); + return ProviderSelectionResult::Cancelled; + } + + let selection = Select::new("Select a provider:", options) + .with_render_config(wizard_render_config()) + .with_help_message("โ†‘โ†“ to move, Enter to select, Esc to cancel") + .with_page_size(6) + .prompt(); + + match selection { + Ok(answer) => { + // Find which provider was selected + let selected_idx = statuses + .iter() + .position(|s| { + let display = format!("{:?}", s.provider); + answer.contains(&display) + }) + .unwrap_or(0); + + let selected_status = &statuses[selected_idx]; + + // Check availability first - coming soon providers can't be selected + if !selected_status.provider.is_available() { + println!( + "\n{}", + format!( + "{} is coming soon! Currently only GCP and Hetzner are available.", + selected_status.provider.display_name() + ) + .yellow() + ); + return ProviderSelectionResult::Cancelled; + } + + if !selected_status.is_connected { + println!( + "\n{}", + format!( + "{:?} is not connected. Please connect it in platform settings first.", + selected_status.provider + ) + .yellow() + ); + return ProviderSelectionResult::Cancelled; + } + + println!( + "\n{} Selected: {:?}", + "โœ“".green(), + selected_status.provider + ); + ProviderSelectionResult::Selected(selected_status.provider.clone()) + } + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + println!("\n{}", "Wizard cancelled.".dimmed()); + ProviderSelectionResult::Cancelled + } + Err(_) => ProviderSelectionResult::Cancelled, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_build_status_summary_cloud_runner_only() { + let summary = build_status_summary(&[], &[], true); + assert_eq!(summary, "Cloud Run"); + } + + #[test] + fn test_build_status_summary_full() { + let clusters = vec![ + ClusterSummary { + id: "c1".to_string(), + name: "prod".to_string(), + region: "us-central1".to_string(), + is_healthy: true, + }, + ClusterSummary { + id: "c2".to_string(), + name: "staging".to_string(), + region: "us-east1".to_string(), + is_healthy: false, + }, + ]; + let registries = vec![RegistrySummary { + id: "r1".to_string(), + name: "main".to_string(), + region: "us-central1".to_string(), + is_ready: true, + }]; + let summary = build_status_summary(&clusters, ®istries, true); + assert_eq!(summary, "Cloud Run, 1 cluster, 1 registry"); + } + + #[test] + fn test_build_status_summary_not_connected() { + let summary = build_status_summary(&[], &[], false); + assert_eq!(summary, "Not connected"); + } + + #[test] + fn test_provider_selection_result_variants() { + let _ = ProviderSelectionResult::Selected(CloudProvider::Gcp); + let _ = ProviderSelectionResult::Cancelled; + } +} diff --git a/src/wizard/recommendations.rs b/src/wizard/recommendations.rs new file mode 100644 index 00000000..d275a1dd --- /dev/null +++ b/src/wizard/recommendations.rs @@ -0,0 +1,769 @@ +//! Deployment recommendation engine +//! +//! Generates intelligent deployment recommendations based on project analysis. +//! Takes analyzer output and produces actionable suggestions with reasoning. + +use crate::analyzer::{PortSource, ProjectAnalysis, TechnologyCategory}; +use crate::platform::api::types::{CloudProvider, DeploymentTarget}; +use crate::wizard::cloud_provider_data::{ + get_default_machine_type, get_default_region, get_machine_types_for_provider, + get_regions_for_provider, +}; +use serde::{Deserialize, Serialize}; + +/// A deployment recommendation with reasoning +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DeploymentRecommendation { + /// Recommended cloud provider + pub provider: CloudProvider, + /// Why this provider was recommended + pub provider_reasoning: String, + + /// Recommended deployment target + pub target: DeploymentTarget, + /// Why this target was recommended + pub target_reasoning: String, + + /// Recommended machine type (provider-specific) + pub machine_type: String, + /// Why this machine type was recommended + pub machine_reasoning: String, + + /// Recommended region + pub region: String, + /// Why this region was recommended + pub region_reasoning: String, + + /// Detected port to expose + pub port: u16, + /// Where the port was detected from + pub port_source: String, + + /// Recommended health check path (if detected) + pub health_check_path: Option, + + /// Overall confidence in recommendation (0.0-1.0) + pub confidence: f32, + + /// Alternative recommendations if user wants to customize + pub alternatives: RecommendationAlternatives, +} + +/// Alternative options for customization +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RecommendationAlternatives { + pub providers: Vec, + pub machine_types: Vec, + pub regions: Vec, +} + +/// Provider option with availability info +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProviderOption { + pub provider: CloudProvider, + pub available: bool, + pub reason_if_unavailable: Option, +} + +/// Machine type option with specs +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MachineOption { + pub machine_type: String, + pub vcpu: String, + pub memory_gb: String, + pub description: String, +} + +/// Region option with display name +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RegionOption { + pub region: String, + pub display_name: String, +} + +/// Input for generating recommendations +#[derive(Debug, Clone)] +pub struct RecommendationInput { + pub analysis: ProjectAnalysis, + pub available_providers: Vec, + pub has_existing_k8s: bool, + pub user_region_hint: Option, +} + +/// Generate deployment recommendation based on project analysis +pub fn recommend_deployment(input: RecommendationInput) -> DeploymentRecommendation { + // 1. Select provider + let (provider, provider_reasoning) = select_provider(&input); + + // 2. Select target (K8s vs Cloud Runner) + let (target, target_reasoning) = select_target(&input); + + // 3. Select machine type based on detected framework + let (machine_type, machine_reasoning) = select_machine_type(&input.analysis, &provider); + + // 4. Select region + let (region, region_reasoning) = select_region(&provider, input.user_region_hint.as_deref()); + + // 5. Select port + let (port, port_source) = select_port(&input.analysis); + + // 6. Select health check path + let health_check_path = select_health_endpoint(&input.analysis); + + // 7. Calculate confidence + let confidence = calculate_confidence(&input.analysis, &port_source, health_check_path.is_some()); + + // 8. Build alternatives + let alternatives = build_alternatives(&provider, &input.available_providers); + + DeploymentRecommendation { + provider, + provider_reasoning, + target, + target_reasoning, + machine_type, + machine_reasoning, + region, + region_reasoning, + port, + port_source, + health_check_path, + confidence, + alternatives, + } +} + +/// Select the best provider based on available options and project characteristics +fn select_provider(input: &RecommendationInput) -> (CloudProvider, String) { + // Check if infrastructure suggests a specific provider + if let Some(ref infra) = input.analysis.infrastructure { + // If they have existing K8s clusters, prefer the provider they're already using + if infra.has_kubernetes || input.has_existing_k8s { + // For now, default to Hetzner for K8s unless GCP clusters detected + if input.available_providers.contains(&CloudProvider::Gcp) { + return ( + CloudProvider::Gcp, + "GCP recommended: Existing Kubernetes infrastructure detected".to_string(), + ); + } + } + } + + // Check which providers are available + let has_hetzner = input.available_providers.contains(&CloudProvider::Hetzner); + let has_gcp = input.available_providers.contains(&CloudProvider::Gcp); + + if has_hetzner && has_gcp { + // Both available - prefer Hetzner for cost-effectiveness + ( + CloudProvider::Hetzner, + "Hetzner recommended: Cost-effective for web services, European data centers".to_string(), + ) + } else if has_hetzner { + ( + CloudProvider::Hetzner, + "Hetzner selected: Only available connected provider".to_string(), + ) + } else if has_gcp { + ( + CloudProvider::Gcp, + "GCP selected: Only available connected provider".to_string(), + ) + } else { + // Fallback - shouldn't happen in practice + ( + CloudProvider::Hetzner, + "Hetzner selected: Default provider".to_string(), + ) + } +} + +/// Select deployment target based on existing infrastructure +fn select_target(input: &RecommendationInput) -> (DeploymentTarget, String) { + // Check for existing Kubernetes infrastructure + if let Some(ref infra) = input.analysis.infrastructure { + if infra.has_kubernetes && input.has_existing_k8s { + return ( + DeploymentTarget::Kubernetes, + "Kubernetes recommended: Existing K8s manifests detected and clusters available".to_string(), + ); + } + } + + // Default to Cloud Runner for simplicity + ( + DeploymentTarget::CloudRunner, + "Cloud Runner recommended: Simpler deployment, no cluster management required".to_string(), + ) +} + +/// Select machine type based on detected framework characteristics +fn select_machine_type(analysis: &ProjectAnalysis, provider: &CloudProvider) -> (String, String) { + // Detect framework type to determine resource needs + let framework_info = get_framework_resource_hint(analysis); + + let (machine_type, reasoning) = match provider { + CloudProvider::Hetzner => { + match framework_info.memory_requirement { + MemoryRequirement::Low => ( + "cx23".to_string(), + format!("cx23 (2 vCPU, 4GB) recommended: {} services are memory-efficient", framework_info.name), + ), + MemoryRequirement::Medium => ( + "cx33".to_string(), + format!("cx33 (4 vCPU, 8GB) recommended: {} may benefit from more resources", framework_info.name), + ), + MemoryRequirement::High => ( + "cx43".to_string(), + format!("cx43 (8 vCPU, 16GB) recommended: {} requires significant memory (JVM, ML, etc.)", framework_info.name), + ), + } + } + CloudProvider::Gcp => { + match framework_info.memory_requirement { + MemoryRequirement::Low => ( + "e2-small".to_string(), + format!("e2-small (0.5 vCPU, 2GB) recommended: {} services are lightweight", framework_info.name), + ), + MemoryRequirement::Medium => ( + "e2-medium".to_string(), + format!("e2-medium (1 vCPU, 4GB) recommended: {} may need moderate resources", framework_info.name), + ), + MemoryRequirement::High => ( + "e2-standard-2".to_string(), + format!("e2-standard-2 (2 vCPU, 8GB) recommended: {} requires significant memory", framework_info.name), + ), + } + } + _ => { + // Fallback for unsupported providers + ( + get_default_machine_type(provider).to_string(), + "Default machine type selected".to_string(), + ) + } + }; + + (machine_type, reasoning) +} + +/// Memory requirement categories +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum MemoryRequirement { + Low, // Node.js, Go, Rust - efficient runtimes + Medium, // Python, Ruby - moderate memory + High, // Java/JVM, ML frameworks - memory intensive +} + +/// Framework resource hint for machine selection +struct FrameworkResourceHint { + name: String, + memory_requirement: MemoryRequirement, +} + +/// Analyze project to determine framework resource requirements +fn get_framework_resource_hint(analysis: &ProjectAnalysis) -> FrameworkResourceHint { + // Check for JVM-based frameworks (high memory) + for tech in &analysis.technologies { + if matches!(tech.category, TechnologyCategory::BackendFramework) { + let name_lower = tech.name.to_lowercase(); + + // JVM frameworks - high memory + if name_lower.contains("spring") || name_lower.contains("quarkus") + || name_lower.contains("micronaut") || name_lower.contains("ktor") { + return FrameworkResourceHint { + name: tech.name.clone(), + memory_requirement: MemoryRequirement::High, + }; + } + + // Go, Rust frameworks - low memory + if name_lower.contains("gin") || name_lower.contains("echo") + || name_lower.contains("fiber") || name_lower.contains("chi") + || name_lower.contains("actix") || name_lower.contains("axum") + || name_lower.contains("rocket") { + return FrameworkResourceHint { + name: tech.name.clone(), + memory_requirement: MemoryRequirement::Low, + }; + } + + // Node.js frameworks - low memory + if name_lower.contains("express") || name_lower.contains("fastify") + || name_lower.contains("koa") || name_lower.contains("hono") + || name_lower.contains("elysia") || name_lower.contains("nest") { + return FrameworkResourceHint { + name: tech.name.clone(), + memory_requirement: MemoryRequirement::Low, + }; + } + + // Python frameworks - medium memory + if name_lower.contains("fastapi") || name_lower.contains("flask") + || name_lower.contains("django") { + return FrameworkResourceHint { + name: tech.name.clone(), + memory_requirement: MemoryRequirement::Medium, + }; + } + } + } + + // Check languages if no framework detected + for lang in &analysis.languages { + let name_lower = lang.name.to_lowercase(); + + if name_lower.contains("java") || name_lower.contains("kotlin") || name_lower.contains("scala") { + return FrameworkResourceHint { + name: lang.name.clone(), + memory_requirement: MemoryRequirement::High, + }; + } + + if name_lower.contains("go") || name_lower.contains("rust") { + return FrameworkResourceHint { + name: lang.name.clone(), + memory_requirement: MemoryRequirement::Low, + }; + } + + if name_lower.contains("javascript") || name_lower.contains("typescript") { + return FrameworkResourceHint { + name: lang.name.clone(), + memory_requirement: MemoryRequirement::Low, + }; + } + + if name_lower.contains("python") { + return FrameworkResourceHint { + name: lang.name.clone(), + memory_requirement: MemoryRequirement::Medium, + }; + } + } + + // Default fallback + FrameworkResourceHint { + name: "Unknown".to_string(), + memory_requirement: MemoryRequirement::Medium, + } +} + +/// Select region based on user hint or defaults +fn select_region(provider: &CloudProvider, user_hint: Option<&str>) -> (String, String) { + if let Some(hint) = user_hint { + // Validate hint is a valid region for this provider + let regions = get_regions_for_provider(provider); + if regions.iter().any(|r| r.id == hint) { + return ( + hint.to_string(), + format!("{} selected: User preference", hint), + ); + } + } + + let default_region = get_default_region(provider); + let reasoning = match provider { + CloudProvider::Hetzner => format!("{} (Nuremberg) selected: Default EU region, low latency for European users", default_region), + CloudProvider::Gcp => format!("{} (Iowa) selected: Default US region, good general-purpose choice", default_region), + _ => format!("{} selected: Default region for provider", default_region), + }; + + (default_region.to_string(), reasoning) +} + +/// Select the best port from analysis results +fn select_port(analysis: &ProjectAnalysis) -> (u16, String) { + // Priority: SourceCode > PackageJson > ConfigFile > FrameworkDefault > Dockerfile > DockerCompose > EnvVar + let port_priority = |source: &Option| -> u8 { + match source { + Some(PortSource::SourceCode) => 7, + Some(PortSource::PackageJson) => 6, + Some(PortSource::ConfigFile) => 5, + Some(PortSource::FrameworkDefault) => 4, + Some(PortSource::Dockerfile) => 3, + Some(PortSource::DockerCompose) => 2, + Some(PortSource::EnvVar) => 1, + None => 0, + } + }; + + // Find the highest priority port + let best_port = analysis.ports.iter() + .max_by_key(|p| port_priority(&p.source)); + + if let Some(port) = best_port { + let source_desc = match &port.source { + Some(PortSource::SourceCode) => "Detected from source code analysis", + Some(PortSource::PackageJson) => "Detected from package.json scripts", + Some(PortSource::ConfigFile) => "Detected from configuration file", + Some(PortSource::FrameworkDefault) => { + // Try to get framework name + let framework_name = analysis.technologies.iter() + .find(|t| matches!(t.category, TechnologyCategory::BackendFramework | TechnologyCategory::MetaFramework)) + .map(|t| t.name.as_str()) + .unwrap_or("framework"); + return (port.number, format!("Framework default ({}: {})", framework_name, port.number)); + } + Some(PortSource::Dockerfile) => "Detected from Dockerfile EXPOSE", + Some(PortSource::DockerCompose) => "Detected from docker-compose.yml", + Some(PortSource::EnvVar) => "Detected from environment variable reference", + None => "Detected from project analysis", + }; + return (port.number, source_desc.to_string()); + } + + // Fallback to 8080 + (8080, "Default port 8080: No port detected in project".to_string()) +} + +/// Select the best health endpoint from analysis +fn select_health_endpoint(analysis: &ProjectAnalysis) -> Option { + // Find highest confidence health endpoint + analysis.health_endpoints.iter() + .max_by(|a, b| a.confidence.partial_cmp(&b.confidence).unwrap_or(std::cmp::Ordering::Equal)) + .map(|e| e.path.clone()) +} + +/// Calculate overall confidence in the recommendation +fn calculate_confidence(analysis: &ProjectAnalysis, port_source: &str, has_health_endpoint: bool) -> f32 { + let mut confidence: f32 = 0.5; // Base confidence + + // Boost for detected port from reliable source + if port_source.contains("source code") || port_source.contains("package.json") { + confidence += 0.2; + } else if port_source.contains("Dockerfile") || port_source.contains("framework") { + confidence += 0.1; + } + + // Boost for detected framework + let has_framework = analysis.technologies.iter() + .any(|t| matches!(t.category, TechnologyCategory::BackendFramework | TechnologyCategory::MetaFramework)); + if has_framework { + confidence += 0.15; + } + + // Boost for health endpoint + if has_health_endpoint { + confidence += 0.1; + } + + // Penalty if using fallback port + if port_source.contains("No port detected") || port_source.contains("Default port") { + confidence -= 0.2; + } + + confidence.clamp(0.0, 1.0) +} + +/// Build alternative options for user customization +fn build_alternatives(selected_provider: &CloudProvider, available_providers: &[CloudProvider]) -> RecommendationAlternatives { + // Build provider options + let providers: Vec = CloudProvider::all() + .iter() + .map(|p| ProviderOption { + provider: p.clone(), + available: available_providers.contains(p) && p.is_available(), + reason_if_unavailable: if !p.is_available() { + Some(format!("{} coming soon", p.display_name())) + } else if !available_providers.contains(p) { + Some("Not connected".to_string()) + } else { + None + }, + }) + .collect(); + + // Build machine type options for selected provider + let machine_types: Vec = get_machine_types_for_provider(selected_provider) + .iter() + .map(|m| MachineOption { + machine_type: m.id.to_string(), + vcpu: m.cpu.to_string(), + memory_gb: m.memory.to_string(), + description: m.description.map(String::from).unwrap_or_default(), + }) + .collect(); + + // Build region options for selected provider + let regions: Vec = get_regions_for_provider(selected_provider) + .iter() + .map(|r| RegionOption { + region: r.id.to_string(), + display_name: format!("{} ({})", r.name, r.location), + }) + .collect(); + + RecommendationAlternatives { + providers, + machine_types, + regions, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::analyzer::{ + AnalysisMetadata, ArchitectureType, DetectedLanguage, DetectedTechnology, + HealthEndpoint, InfrastructurePresence, Port, ProjectType, TechnologyCategory, + }; + use std::collections::HashMap; + use std::path::PathBuf; + + fn create_minimal_analysis() -> ProjectAnalysis { + #[allow(deprecated)] + ProjectAnalysis { + project_root: PathBuf::from("/test"), + languages: vec![], + technologies: vec![], + frameworks: vec![], + dependencies: HashMap::new(), + entry_points: vec![], + ports: vec![], + health_endpoints: vec![], + environment_variables: vec![], + project_type: ProjectType::WebApplication, + build_scripts: vec![], + services: vec![], + architecture_type: ArchitectureType::Monolithic, + docker_analysis: None, + infrastructure: None, + analysis_metadata: AnalysisMetadata { + timestamp: "2024-01-01T00:00:00Z".to_string(), + analyzer_version: "0.1.0".to_string(), + analysis_duration_ms: 100, + files_analyzed: 10, + confidence_score: 0.8, + }, + } + } + + #[test] + fn test_nodejs_express_recommendation() { + let mut analysis = create_minimal_analysis(); + analysis.languages.push(DetectedLanguage { + name: "JavaScript".to_string(), + version: Some("18".to_string()), + confidence: 0.9, + files: vec![], + main_dependencies: vec!["express".to_string()], + dev_dependencies: vec![], + package_manager: Some("npm".to_string()), + }); + analysis.technologies.push(DetectedTechnology { + name: "Express".to_string(), + version: Some("4.18".to_string()), + category: TechnologyCategory::BackendFramework, + confidence: 0.9, + requires: vec![], + conflicts_with: vec![], + is_primary: true, + file_indicators: vec![], + }); + analysis.ports.push(Port { + number: 3000, + protocol: crate::analyzer::Protocol::Http, + description: Some("Express default".to_string()), + source: Some(PortSource::PackageJson), + }); + + let input = RecommendationInput { + analysis, + available_providers: vec![CloudProvider::Hetzner, CloudProvider::Gcp], + has_existing_k8s: false, + user_region_hint: None, + }; + + let rec = recommend_deployment(input); + + // Express should get a small machine + assert!(rec.machine_type == "cx23" || rec.machine_type == "e2-small"); + assert_eq!(rec.port, 3000); + assert!(rec.machine_reasoning.contains("Express")); + } + + #[test] + fn test_java_spring_recommendation() { + let mut analysis = create_minimal_analysis(); + analysis.languages.push(DetectedLanguage { + name: "Java".to_string(), + version: Some("17".to_string()), + confidence: 0.9, + files: vec![], + main_dependencies: vec!["spring-boot".to_string()], + dev_dependencies: vec![], + package_manager: Some("maven".to_string()), + }); + analysis.technologies.push(DetectedTechnology { + name: "Spring Boot".to_string(), + version: Some("3.0".to_string()), + category: TechnologyCategory::BackendFramework, + confidence: 0.9, + requires: vec![], + conflicts_with: vec![], + is_primary: true, + file_indicators: vec![], + }); + analysis.ports.push(Port { + number: 8080, + protocol: crate::analyzer::Protocol::Http, + description: Some("Spring Boot default".to_string()), + source: Some(PortSource::FrameworkDefault), + }); + + let input = RecommendationInput { + analysis, + available_providers: vec![CloudProvider::Hetzner], + has_existing_k8s: false, + user_region_hint: None, + }; + + let rec = recommend_deployment(input); + + // Spring Boot should get a larger machine (JVM needs memory) + assert!(rec.machine_type == "cx43" || rec.machine_reasoning.contains("memory")); + assert_eq!(rec.port, 8080); + } + + #[test] + fn test_existing_k8s_suggests_kubernetes_target() { + let mut analysis = create_minimal_analysis(); + analysis.infrastructure = Some(InfrastructurePresence { + has_kubernetes: true, + kubernetes_paths: vec![PathBuf::from("k8s/")], + has_helm: false, + helm_chart_paths: vec![], + has_docker_compose: false, + has_terraform: false, + terraform_paths: vec![], + has_deployment_config: false, + summary: Some("Kubernetes manifests detected".to_string()), + }); + + let input = RecommendationInput { + analysis, + available_providers: vec![CloudProvider::Gcp], + has_existing_k8s: true, // User has K8s clusters + user_region_hint: None, + }; + + let rec = recommend_deployment(input); + assert_eq!(rec.target, DeploymentTarget::Kubernetes); + assert!(rec.target_reasoning.contains("Kubernetes")); + } + + #[test] + fn test_no_k8s_defaults_to_cloud_runner() { + let analysis = create_minimal_analysis(); + + let input = RecommendationInput { + analysis, + available_providers: vec![CloudProvider::Hetzner], + has_existing_k8s: false, + user_region_hint: None, + }; + + let rec = recommend_deployment(input); + assert_eq!(rec.target, DeploymentTarget::CloudRunner); + assert!(rec.target_reasoning.contains("Cloud Runner")); + } + + #[test] + fn test_port_fallback_to_8080() { + let analysis = create_minimal_analysis(); + + let input = RecommendationInput { + analysis, + available_providers: vec![CloudProvider::Hetzner], + has_existing_k8s: false, + user_region_hint: None, + }; + + let rec = recommend_deployment(input); + assert_eq!(rec.port, 8080); + assert!(rec.port_source.contains("No port detected") || rec.port_source.contains("Default")); + } + + #[test] + fn test_health_endpoint_included_when_detected() { + let mut analysis = create_minimal_analysis(); + analysis.health_endpoints.push(HealthEndpoint { + path: "/health".to_string(), + confidence: 0.9, + source: crate::analyzer::HealthEndpointSource::CodePattern, + description: Some("Found in source code".to_string()), + }); + + let input = RecommendationInput { + analysis, + available_providers: vec![CloudProvider::Hetzner], + has_existing_k8s: false, + user_region_hint: None, + }; + + let rec = recommend_deployment(input); + assert_eq!(rec.health_check_path, Some("/health".to_string())); + } + + #[test] + fn test_alternatives_populated() { + let analysis = create_minimal_analysis(); + + let input = RecommendationInput { + analysis, + available_providers: vec![CloudProvider::Hetzner, CloudProvider::Gcp], + has_existing_k8s: false, + user_region_hint: None, + }; + + let rec = recommend_deployment(input); + + assert!(!rec.alternatives.providers.is_empty()); + assert!(!rec.alternatives.machine_types.is_empty()); + assert!(!rec.alternatives.regions.is_empty()); + } + + #[test] + fn test_user_region_hint_respected() { + let analysis = create_minimal_analysis(); + + let input = RecommendationInput { + analysis, + available_providers: vec![CloudProvider::Hetzner], + has_existing_k8s: false, + user_region_hint: Some("fsn1".to_string()), + }; + + let rec = recommend_deployment(input); + assert_eq!(rec.region, "fsn1"); + assert!(rec.region_reasoning.contains("User preference")); + } + + #[test] + fn test_go_service_gets_small_machine() { + let mut analysis = create_minimal_analysis(); + analysis.technologies.push(DetectedTechnology { + name: "Gin".to_string(), + version: Some("1.9".to_string()), + category: TechnologyCategory::BackendFramework, + confidence: 0.9, + requires: vec![], + conflicts_with: vec![], + is_primary: true, + file_indicators: vec![], + }); + + let input = RecommendationInput { + analysis, + available_providers: vec![CloudProvider::Hetzner], + has_existing_k8s: false, + user_region_hint: None, + }; + + let rec = recommend_deployment(input); + // Go services should get small machine + assert_eq!(rec.machine_type, "cx23"); + assert!(rec.machine_reasoning.contains("memory-efficient") || rec.machine_reasoning.contains("Gin")); + } +} diff --git a/src/wizard/registry_provisioning.rs b/src/wizard/registry_provisioning.rs new file mode 100644 index 00000000..8313126c --- /dev/null +++ b/src/wizard/registry_provisioning.rs @@ -0,0 +1,191 @@ +//! Registry provisioning step for deployment wizard + +use crate::platform::api::types::{ + CloudProvider, CreateRegistryRequest, RegistrySummary, RegistryTaskState, +}; +use crate::platform::api::PlatformApiClient; +use crate::wizard::render::{display_step_header, wizard_render_config}; +use colored::Colorize; +use inquire::{InquireError, Text}; +use std::io::Write; +use std::time::Duration; +use tokio::time::sleep; + +/// Result of registry provisioning +#[derive(Debug)] +pub enum RegistryProvisioningResult { + /// Successfully provisioned + Success(RegistrySummary), + /// User cancelled + Cancelled, + /// Error during provisioning + Error(String), +} + +/// Provision a new artifact registry +pub async fn provision_registry( + client: &PlatformApiClient, + project_id: &str, + cluster_id: &str, + cluster_name: &str, + provider: CloudProvider, + region: &str, + gcp_project_id: Option<&str>, +) -> RegistryProvisioningResult { + display_step_header( + 4, + "Provision Registry", + "Create a new container registry for storing images.", + ); + + // Get registry name from user + let registry_name = match Text::new("Registry name:") + .with_default("main") + .with_help_message("Lowercase alphanumeric with hyphens (e.g., main, staging)") + .with_render_config(wizard_render_config()) + .prompt() + { + Ok(name) => sanitize_registry_name(&name), + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + return RegistryProvisioningResult::Cancelled; + } + Err(_) => return RegistryProvisioningResult::Cancelled, + }; + + println!( + "\n{} Provisioning registry: {}", + "โณ".yellow(), + registry_name.cyan() + ); + + // Build request + let request = CreateRegistryRequest { + project_id: project_id.to_string(), + cluster_id: cluster_id.to_string(), + cluster_name: cluster_name.to_string(), + registry_name: registry_name.clone(), + cloud_provider: provider.as_str().to_string(), + region: region.to_string(), + gcp_project_id: gcp_project_id.map(|s| s.to_string()), + }; + + // Start provisioning + let response = match client.create_registry(project_id, &request).await { + Ok(r) => r, + Err(e) => { + return RegistryProvisioningResult::Error(format!( + "Failed to start registry provisioning: {}", + e + )); + } + }; + + let task_id = response.task_id; + println!(" Task started: {}", task_id.dimmed()); + + // Poll for completion with progress display + let mut last_progress = 0; + loop { + sleep(Duration::from_secs(3)).await; + + let status = match client.get_registry_task_status(&task_id).await { + Ok(s) => s, + Err(e) => { + return RegistryProvisioningResult::Error(format!( + "Failed to get task status: {}", + e + )); + } + }; + + // Show progress + let progress = status.progress.unwrap_or(0); + if progress > last_progress { + let bar = progress_bar(progress); + let message = status + .overall_message + .as_deref() + .unwrap_or("Processing..."); + print!( + "\r {} {} {}", + bar, + format!("{}%", progress).cyan(), + message.dimmed() + ); + std::io::stdout().flush().ok(); + last_progress = progress; + } + + match status.status { + RegistryTaskState::Completed => { + println!("\n{} Registry provisioned successfully!", "โœ“".green()); + + let registry = RegistrySummary { + id: task_id.clone(), // Will be updated when we fetch actual registry + name: status.output.registry_name.unwrap_or(registry_name), + region: region.to_string(), + is_ready: true, + }; + + if let Some(url) = status.output.registry_url { + println!(" URL: {}", url.cyan()); + } + + return RegistryProvisioningResult::Success(registry); + } + RegistryTaskState::Failed => { + println!(); + let error_msg = status + .error + .map(|e| e.message) + .unwrap_or_else(|| "Unknown error".to_string()); + return RegistryProvisioningResult::Error(error_msg); + } + RegistryTaskState::Cancelled => { + println!(); + return RegistryProvisioningResult::Cancelled; + } + RegistryTaskState::Processing | RegistryTaskState::Unknown => { + // Continue polling + } + } + } +} + +/// Create a simple progress bar +fn progress_bar(percent: u8) -> String { + let filled = (percent as usize * 20) / 100; + let empty = 20 - filled; + format!("[{}{}]", "โ–ˆ".repeat(filled), "โ–‘".repeat(empty)) +} + +/// Sanitize registry name (lowercase, alphanumeric, hyphens) +fn sanitize_registry_name(name: &str) -> String { + name.to_lowercase() + .chars() + .map(|c| if c.is_alphanumeric() || c == '-' { c } else { '-' }) + .collect::() + .trim_matches('-') + .to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_sanitize_registry_name() { + assert_eq!(sanitize_registry_name("My Registry"), "my-registry"); + assert_eq!(sanitize_registry_name("test_name"), "test-name"); + assert_eq!(sanitize_registry_name("--test--"), "test"); + assert_eq!(sanitize_registry_name("MAIN"), "main"); + assert_eq!(sanitize_registry_name("prod-123"), "prod-123"); + } + + #[test] + fn test_progress_bar() { + assert_eq!(progress_bar(0), "[โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘]"); + assert_eq!(progress_bar(50), "[โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘โ–‘]"); + assert_eq!(progress_bar(100), "[โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ]"); + } +} diff --git a/src/wizard/registry_selection.rs b/src/wizard/registry_selection.rs new file mode 100644 index 00000000..6bad1a32 --- /dev/null +++ b/src/wizard/registry_selection.rs @@ -0,0 +1,112 @@ +//! Registry selection step for deployment wizard + +use crate::platform::api::types::RegistrySummary; +use crate::wizard::render::{display_step_header, status_indicator, wizard_render_config}; +use colored::Colorize; +use inquire::{InquireError, Select}; + +/// Result of registry selection step +#[derive(Debug, Clone)] +pub enum RegistrySelectionResult { + /// User selected an existing registry + Selected(RegistrySummary), + /// User wants to provision a new registry + ProvisionNew, + /// User wants to go back + Back, + /// User cancelled the wizard + Cancelled, +} + +/// Display registry selection for container image storage +pub fn select_registry(registries: &[RegistrySummary]) -> RegistrySelectionResult { + display_step_header( + 4, + "Select Registry", + "Choose where to store container images. You can use an existing registry or provision a new one.", + ); + + // Filter to ready registries + let ready_registries: Vec<&RegistrySummary> = registries.iter().filter(|r| r.is_ready).collect(); + + // Build options + let mut options: Vec = ready_registries + .iter() + .map(|r| { + format!( + "{} {} {}", + status_indicator(r.is_ready), + r.name.cyan(), + r.region.dimmed() + ) + }) + .collect(); + + // Always offer to provision new + options.push(format!("{} Provision new registry", "+".green())); + + // Add back option + options.push("โ† Back".dimmed().to_string()); + + let selection = Select::new("Select registry:", options.clone()) + .with_render_config(wizard_render_config()) + .with_help_message("โ†‘โ†“ to move, Enter to select, Esc to cancel") + .with_page_size(6) + .prompt(); + + match selection { + Ok(answer) => { + if answer.contains("Back") { + return RegistrySelectionResult::Back; + } + + if answer.contains("Provision new") { + println!("\n{} Will provision new registry during deployment", "โ†’".cyan()); + return RegistrySelectionResult::ProvisionNew; + } + + // Find selected registry by name + let selected = ready_registries + .iter() + .find(|r| answer.contains(&r.name)) + .copied(); + + match selected { + Some(registry) => { + println!( + "\n{} Selected registry: {} ({})", + "โœ“".green(), + registry.name, + registry.region + ); + RegistrySelectionResult::Selected(registry.clone()) + } + None => RegistrySelectionResult::Cancelled, + } + } + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + println!("\n{}", "Wizard cancelled.".dimmed()); + RegistrySelectionResult::Cancelled + } + Err(_) => RegistrySelectionResult::Cancelled, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_registry_selection_result_variants() { + let registry = RegistrySummary { + id: "r1".to_string(), + name: "main".to_string(), + region: "us-central1".to_string(), + is_ready: true, + }; + let _ = RegistrySelectionResult::Selected(registry); + let _ = RegistrySelectionResult::ProvisionNew; + let _ = RegistrySelectionResult::Back; + let _ = RegistrySelectionResult::Cancelled; + } +} diff --git a/src/wizard/render.rs b/src/wizard/render.rs new file mode 100644 index 00000000..b65fcdba --- /dev/null +++ b/src/wizard/render.rs @@ -0,0 +1,67 @@ +//! Shared rendering utilities for wizard prompts + +use colored::Colorize; +use inquire::ui::{Color, IndexPrefix, RenderConfig, StyleSheet, Styled}; + +/// Get the standard render config for wizard prompts +pub fn wizard_render_config() -> RenderConfig<'static> { + RenderConfig::default() + .with_highlighted_option_prefix(Styled::new("โ–ธ ").with_fg(Color::LightCyan)) + .with_option_index_prefix(IndexPrefix::Simple) + .with_selected_option(Some(StyleSheet::new().with_fg(Color::LightCyan))) + .with_scroll_up_prefix(Styled::new("โ–ฒ ")) + .with_scroll_down_prefix(Styled::new("โ–ผ ")) +} + +/// Display a wizard step header box +pub fn display_step_header(step_number: u8, step_name: &str, description: &str) { + let term_width = term_size::dimensions().map(|(w, _)| w).unwrap_or(80); + let box_width = term_width.min(70); + let inner_width = box_width - 4; + + println!(); + // Top border with step indicator + let header = format!("โ”€ Step {} ยท {} ", step_number, step_name); + println!( + "{}{}{}", + "โ”Œ".bright_cyan(), + header.bright_cyan(), + "โ”€".repeat(inner_width.saturating_sub(header.len())).bright_cyan() + ); + + // Description + let desc_lines = textwrap::wrap(description, inner_width - 2); + for line in &desc_lines { + println!( + "{} {}", + "โ”‚".dimmed(), + line.white() + ); + } + + // Bottom border + println!( + "{}{}", + "โ””".dimmed(), + "โ”€".repeat(box_width - 1).dimmed() + ); + println!(); +} + +/// Format a status indicator (checkmark or X) +pub fn status_indicator(connected: bool) -> String { + if connected { + "โœ“".green().to_string() + } else { + "โœ—".red().to_string() + } +} + +/// Format a count badge +pub fn count_badge(count: usize, label: &str) -> String { + if count > 0 { + format!("{} {}", count.to_string().cyan(), label.dimmed()) + } else { + format!("{} {}", "0".dimmed(), label.dimmed()) + } +} diff --git a/src/wizard/repository_selection.rs b/src/wizard/repository_selection.rs new file mode 100644 index 00000000..f1dfc199 --- /dev/null +++ b/src/wizard/repository_selection.rs @@ -0,0 +1,582 @@ +//! Repository selection step for the deployment wizard +//! +//! Detects the repository from local git remote or asks user to select. + +use crate::platform::api::types::{AvailableRepository, ProjectRepository}; +use crate::platform::api::PlatformApiClient; +use crate::wizard::render::{display_step_header, wizard_render_config}; +use colored::Colorize; +use inquire::{Confirm, InquireError, Select}; +use std::fmt; +use std::path::Path; +use std::process::Command; + +/// Result of repository selection step +#[derive(Debug, Clone)] +pub enum RepositorySelectionResult { + /// User selected a repository (already connected) + Selected(ProjectRepository), + /// User chose to connect a new repository + ConnectNew(AvailableRepository), + /// Need GitHub App installation for this org + NeedsGitHubApp { + installation_url: String, + org_name: String, + }, + /// No GitHub App installations found + NoInstallations { installation_url: String }, + /// No repositories connected to project + NoRepositories, + /// User cancelled the wizard + Cancelled, + /// An error occurred + Error(String), +} + +/// Wrapper for displaying repository options in the selection menu +struct RepositoryOption { + repository: ProjectRepository, + is_detected: bool, +} + +impl fmt::Display for RepositoryOption { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let marker = if self.is_detected { " (detected)" } else { "" }; + write!( + f, + "{}{} {}", + self.repository.repository_full_name.cyan(), + marker.green(), + self.repository + .default_branch + .as_deref() + .unwrap_or("main") + .dimmed() + ) + } +} + +/// Detect the git remote URL from the current directory +fn detect_git_remote(project_path: &Path) -> Option { + let output = Command::new("git") + .args(["remote", "get-url", "origin"]) + .current_dir(project_path) + .output() + .ok()?; + + if output.status.success() { + let url = String::from_utf8(output.stdout).ok()?; + Some(url.trim().to_string()) + } else { + None + } +} + +/// Parse repository full name from git remote URL +/// Handles both SSH (git@github.com:owner/repo.git) and HTTPS (https://github.com/owner/repo.git) +fn parse_repo_from_url(url: &str) -> Option { + let url = url.trim(); + + // SSH format: git@github.com:owner/repo.git + if url.starts_with("git@") { + let parts: Vec<&str> = url.split(':').collect(); + if parts.len() == 2 { + let path = parts[1].trim_end_matches(".git"); + return Some(path.to_string()); + } + } + + // HTTPS format: https://github.com/owner/repo.git + if url.starts_with("https://") || url.starts_with("http://") { + if let Some(path) = url.split('/').skip(3).collect::>().join("/").strip_suffix(".git") { + return Some(path.to_string()); + } + // Without .git suffix + let path: String = url.split('/').skip(3).collect::>().join("/"); + if !path.is_empty() { + return Some(path); + } + } + + None +} + +/// Find a repository in the available repositories list by full name +fn find_in_available<'a>( + repo_full_name: &str, + available: &'a [AvailableRepository], +) -> Option<&'a AvailableRepository> { + available + .iter() + .find(|r| r.full_name.eq_ignore_ascii_case(repo_full_name)) +} + +/// Check if a repository ID is in the connected list +fn is_repo_connected(repo_id: i64, connected_ids: &[i64]) -> bool { + connected_ids.contains(&repo_id) +} + +/// Extract organization/owner name from a repo full name +fn extract_org_name(repo_full_name: &str) -> String { + repo_full_name + .split('/') + .next() + .unwrap_or(repo_full_name) + .to_string() +} + +/// Prompt user to connect a detected repository +fn prompt_connect_repository( + available: &AvailableRepository, + connected: &[ProjectRepository], +) -> RepositorySelectionResult { + println!( + "\n{} Detected repository: {}", + "โ†’".cyan(), + available.full_name.cyan() + ); + println!( + "{}", + "This repository is not connected to the project.".dimmed() + ); + + // Build options + let connect_option = format!("Connect {} (detected)", available.full_name); + let mut options = vec![connect_option]; + + // Add connected repos as alternatives + for repo in connected { + options.push(format!( + "Use {} (already connected)", + repo.repository_full_name + )); + } + + let selection = Select::new("What would you like to do?", options) + .with_render_config(wizard_render_config()) + .with_help_message("Use โ†‘/โ†“ to navigate, Enter to select") + .prompt(); + + match selection { + Ok(choice) if choice.starts_with("Connect") => { + RepositorySelectionResult::ConnectNew(available.clone()) + } + Ok(choice) => { + // Find which connected repo was selected + let repo_name = choice + .split(" (already connected)") + .next() + .unwrap_or("") + .trim() + .trim_start_matches("Use "); + if let Some(repo) = connected + .iter() + .find(|r| r.repository_full_name == repo_name) + { + RepositorySelectionResult::Selected(repo.clone()) + } else { + RepositorySelectionResult::Cancelled + } + } + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + RepositorySelectionResult::Cancelled + } + Err(_) => RepositorySelectionResult::Cancelled, + } +} + +/// Prompt user to install GitHub App +async fn prompt_github_app_install( + client: &PlatformApiClient, + org_name: &str, +) -> RepositorySelectionResult { + println!( + "\n{} GitHub App not installed for: {}", + "โš ".yellow(), + org_name.cyan() + ); + println!( + "{}", + "The Syncable GitHub App needs to be installed to connect this repository.".dimmed() + ); + + match client.get_github_installation_url().await { + Ok(response) => { + let install = Confirm::new("Open browser to install GitHub App?") + .with_default(true) + .prompt(); + + if let Ok(true) = install { + if webbrowser::open(&response.installation_url).is_ok() { + println!( + "{} Opened browser. Complete the installation, then run this command again.", + "โ†’".cyan() + ); + } else { + println!("Visit: {}", response.installation_url); + } + } + RepositorySelectionResult::NeedsGitHubApp { + installation_url: response.installation_url, + org_name: org_name.to_string(), + } + } + Err(e) => RepositorySelectionResult::Error(format!("Failed to get installation URL: {}", e)), + } +} + +/// Select repository for deployment +/// +/// Smart repository selection with connection flow: +/// 1. Check for GitHub App installations +/// 2. Fetch connected and available repositories +/// 3. Detect local git remote and match against repos +/// 4. Offer to connect if local repo available but not connected +/// 5. Fall back to manual selection from available repos +pub async fn select_repository( + client: &PlatformApiClient, + project_id: &str, + project_path: &Path, +) -> RepositorySelectionResult { + // Check for GitHub App installations first + let installations = match client.list_github_installations().await { + Ok(response) => response.installations, + Err(e) => { + return RepositorySelectionResult::Error(format!( + "Failed to fetch GitHub installations: {}", + e + )); + } + }; + + // If no installations, prompt to install GitHub App + if installations.is_empty() { + println!( + "\n{} No GitHub App installations found.", + "โš ".yellow() + ); + match client.get_github_installation_url().await { + Ok(response) => { + println!("Install the Syncable GitHub App to connect repositories."); + let install = Confirm::new("Open browser to install GitHub App?") + .with_default(true) + .prompt(); + + if let Ok(true) = install { + if webbrowser::open(&response.installation_url).is_ok() { + println!( + "{} Opened browser. Complete the installation, then run this command again.", + "โ†’".cyan() + ); + } else { + println!("Visit: {}", response.installation_url); + } + } + return RepositorySelectionResult::NoInstallations { + installation_url: response.installation_url, + }; + } + Err(e) => { + return RepositorySelectionResult::Error(format!( + "Failed to get installation URL: {}", + e + )); + } + } + } + + // Fetch connected repositories + let repos_response = match client.list_project_repositories(project_id).await { + Ok(response) => response, + Err(e) => { + return RepositorySelectionResult::Error(format!( + "Failed to fetch repositories: {}", + e + )); + } + }; + let connected_repos = repos_response.repositories; + + // Fetch available repositories (from all GitHub installations) + let available_response = match client + .list_available_repositories(Some(project_id), None, None) + .await + { + Ok(response) => response, + Err(e) => { + return RepositorySelectionResult::Error(format!( + "Failed to fetch available repositories: {}", + e + )); + } + }; + let available_repos = available_response.repositories; + let connected_ids = available_response.connected_repositories; + + // Try to auto-detect from git remote + let detected_repo_name = detect_git_remote(project_path).and_then(|url| parse_repo_from_url(&url)); + + if let Some(ref local_repo_name) = detected_repo_name { + // Check if already connected to this project + if let Some(connected) = connected_repos + .iter() + .find(|r| r.repository_full_name.eq_ignore_ascii_case(local_repo_name)) + { + // Auto-select connected repo + println!( + "\n{} Using detected repository: {}", + "โœ“".green(), + connected.repository_full_name.cyan() + ); + return RepositorySelectionResult::Selected(connected.clone()); + } + + // Check if available but not connected + if let Some(available) = find_in_available(local_repo_name, &available_repos) { + if !is_repo_connected(available.id, &connected_ids) { + // Offer to connect this repository + return prompt_connect_repository(available, &connected_repos); + } + } + + // Local repo not in available list - might need GitHub App for this org + let org_name = extract_org_name(local_repo_name); + let org_has_installation = installations + .iter() + .any(|i| i.account_login.eq_ignore_ascii_case(&org_name)); + + if !org_has_installation { + // Need to install GitHub App for this organization + return prompt_github_app_install(client, &org_name).await; + } + + // Org has installation but repo not available - might be private or restricted + println!( + "\n{} Repository {} not accessible.", + "โš ".yellow(), + local_repo_name.cyan() + ); + println!( + "{}", + "Check that the Syncable GitHub App has access to this repository.".dimmed() + ); + } + + // No local repo detected or couldn't match - show selection UI + if connected_repos.is_empty() && available_repos.is_empty() { + println!( + "\n{} No repositories available.", + "โš ".yellow() + ); + println!( + "{}", + "Connect a repository using the GitHub App installation.".dimmed() + ); + return RepositorySelectionResult::NoRepositories; + } + + display_step_header( + 0, + "Select Repository", + "Choose which repository to deploy from.", + ); + + // Build options: connected repos first, then available (unconnected) repos + let mut options: Vec = connected_repos + .iter() + .map(|repo| { + let is_detected = detected_repo_name + .as_ref() + .map(|name| repo.repository_full_name.eq_ignore_ascii_case(name)) + .unwrap_or(false); + RepositoryOption { + repository: repo.clone(), + is_detected, + } + }) + .collect(); + + // Put detected repo first if found + options.sort_by(|a, b| b.is_detected.cmp(&a.is_detected)); + + if options.is_empty() { + // No connected repos - offer available repos to connect + println!( + "{}", + "No repositories connected yet. Select one to connect:".dimmed() + ); + + let available_options: Vec = available_repos + .iter() + .filter(|r| !is_repo_connected(r.id, &connected_ids)) + .map(|r| r.full_name.clone()) + .collect(); + + if available_options.is_empty() { + return RepositorySelectionResult::NoRepositories; + } + + let selection = Select::new("Select repository to connect:", available_options) + .with_render_config(wizard_render_config()) + .with_help_message("Use โ†‘/โ†“ to navigate, Enter to select") + .prompt(); + + match selection { + Ok(selected_name) => { + if let Some(available) = available_repos.iter().find(|r| r.full_name == selected_name) + { + return RepositorySelectionResult::ConnectNew(available.clone()); + } + RepositorySelectionResult::Cancelled + } + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + RepositorySelectionResult::Cancelled + } + Err(_) => RepositorySelectionResult::Cancelled, + } + } else { + // Show connected repos for selection + let selection = Select::new("Select repository:", options) + .with_render_config(wizard_render_config()) + .with_help_message("Use โ†‘/โ†“ to navigate, Enter to select") + .prompt(); + + match selection { + Ok(selected) => { + println!( + "\n{} Selected repository: {}", + "โœ“".green(), + selected.repository.repository_full_name.cyan() + ); + RepositorySelectionResult::Selected(selected.repository) + } + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + RepositorySelectionResult::Cancelled + } + Err(_) => RepositorySelectionResult::Cancelled, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_repo_from_ssh_url() { + let url = "git@github.com:owner/my-repo.git"; + assert_eq!(parse_repo_from_url(url), Some("owner/my-repo".to_string())); + } + + #[test] + fn test_parse_repo_from_https_url() { + let url = "https://github.com/owner/my-repo.git"; + assert_eq!(parse_repo_from_url(url), Some("owner/my-repo".to_string())); + } + + #[test] + fn test_parse_repo_from_https_url_no_git() { + let url = "https://github.com/owner/my-repo"; + assert_eq!(parse_repo_from_url(url), Some("owner/my-repo".to_string())); + } + + #[test] + fn test_repository_selection_result_variants() { + let repo = ProjectRepository { + id: "test".to_string(), + project_id: "proj".to_string(), + repository_id: 123, + repository_name: "test".to_string(), + repository_full_name: "owner/test".to_string(), + repository_owner: "owner".to_string(), + repository_private: false, + default_branch: Some("main".to_string()), + is_active: true, + connection_type: None, + repository_type: None, + is_primary_git_ops: None, + github_installation_id: None, + user_id: None, + created_at: None, + updated_at: None, + }; + let available = AvailableRepository { + id: 456, + name: "test-repo".to_string(), + full_name: "owner/test-repo".to_string(), + owner: Some("owner".to_string()), + private: false, + default_branch: Some("main".to_string()), + description: None, + html_url: None, + installation_id: Some(789), + }; + let _ = RepositorySelectionResult::Selected(repo); + let _ = RepositorySelectionResult::ConnectNew(available); + let _ = RepositorySelectionResult::NeedsGitHubApp { + installation_url: "https://github.com/apps/syncable".to_string(), + org_name: "my-org".to_string(), + }; + let _ = RepositorySelectionResult::NoInstallations { + installation_url: "https://github.com/apps/syncable".to_string(), + }; + let _ = RepositorySelectionResult::NoRepositories; + let _ = RepositorySelectionResult::Cancelled; + let _ = RepositorySelectionResult::Error("test".to_string()); + } + + #[test] + fn test_extract_org_name() { + assert_eq!(extract_org_name("owner/repo"), "owner"); + assert_eq!(extract_org_name("my-org/my-app"), "my-org"); + assert_eq!(extract_org_name("repo-only"), "repo-only"); + } + + #[test] + fn test_is_repo_connected() { + let connected = vec![1, 2, 3, 5]; + assert!(is_repo_connected(1, &connected)); + assert!(is_repo_connected(3, &connected)); + assert!(!is_repo_connected(4, &connected)); + assert!(!is_repo_connected(100, &connected)); + } + + #[test] + fn test_find_in_available() { + let available = vec![ + AvailableRepository { + id: 1, + name: "repo-a".to_string(), + full_name: "owner/repo-a".to_string(), + owner: Some("owner".to_string()), + private: false, + default_branch: Some("main".to_string()), + description: None, + html_url: None, + installation_id: Some(100), + }, + AvailableRepository { + id: 2, + name: "repo-b".to_string(), + full_name: "other/repo-b".to_string(), + owner: Some("other".to_string()), + private: true, + default_branch: Some("main".to_string()), + description: None, + html_url: None, + installation_id: Some(200), + }, + ]; + + let found = find_in_available("owner/repo-a", &available); + assert!(found.is_some()); + assert_eq!(found.unwrap().id, 1); + + // Case insensitive + let found_case = find_in_available("OWNER/REPO-A", &available); + assert!(found_case.is_some()); + + let not_found = find_in_available("nonexistent/repo", &available); + assert!(not_found.is_none()); + } +} diff --git a/src/wizard/target_selection.rs b/src/wizard/target_selection.rs new file mode 100644 index 00000000..8bbc9c1a --- /dev/null +++ b/src/wizard/target_selection.rs @@ -0,0 +1,105 @@ +//! Target selection step for deployment wizard + +use crate::platform::api::types::{DeploymentTarget, ProviderDeploymentStatus}; +use crate::wizard::render::{display_step_header, wizard_render_config}; +use colored::Colorize; +use inquire::{InquireError, Select}; + +/// Result of target selection step +#[derive(Debug, Clone)] +pub enum TargetSelectionResult { + /// User selected a deployment target + Selected(DeploymentTarget), + /// User wants to go back to provider selection + Back, + /// User cancelled the wizard + Cancelled, +} + +/// Display target selection based on provider capabilities +pub fn select_target(provider_status: &ProviderDeploymentStatus) -> TargetSelectionResult { + display_step_header( + 2, + "Select Target", + "Choose how to deploy your service. Cloud Runner is fully managed. Kubernetes gives you more control.", + ); + + let available_targets = provider_status.available_targets(); + + if available_targets.is_empty() { + println!( + "\n{}", + "No deployment targets available for this provider.".red() + ); + return TargetSelectionResult::Cancelled; + } + + // Build options with descriptions + let mut options: Vec = available_targets + .iter() + .map(|t| { + match t { + DeploymentTarget::CloudRunner => { + format!( + "{} {}", + "Cloud Runner".cyan(), + "Fully managed, auto-scaling containers".dimmed() + ) + } + DeploymentTarget::Kubernetes => { + let cluster_count = provider_status.clusters.iter().filter(|c| c.is_healthy).count(); + format!( + "{} {} cluster{} available", + "Kubernetes".cyan(), + cluster_count, + if cluster_count == 1 { "" } else { "s" } + ) + } + } + }) + .collect(); + + // Add back option + options.push("โ† Back to provider selection".dimmed().to_string()); + + let selection = Select::new("Select deployment target:", options.clone()) + .with_render_config(wizard_render_config()) + .with_help_message("โ†‘โ†“ to move, Enter to select, Esc to cancel") + .with_page_size(4) + .prompt(); + + match selection { + Ok(answer) => { + if answer.contains("Back") { + return TargetSelectionResult::Back; + } + + let target = if answer.contains("Cloud Runner") { + DeploymentTarget::CloudRunner + } else { + DeploymentTarget::Kubernetes + }; + + println!("\n{} Selected: {}", "โœ“".green(), target.display_name()); + TargetSelectionResult::Selected(target) + } + Err(InquireError::OperationCanceled) | Err(InquireError::OperationInterrupted) => { + println!("\n{}", "Wizard cancelled.".dimmed()); + TargetSelectionResult::Cancelled + } + Err(_) => TargetSelectionResult::Cancelled, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_target_selection_result_variants() { + let _ = TargetSelectionResult::Selected(DeploymentTarget::CloudRunner); + let _ = TargetSelectionResult::Selected(DeploymentTarget::Kubernetes); + let _ = TargetSelectionResult::Back; + let _ = TargetSelectionResult::Cancelled; + } +}