diff --git a/.cargo/config.toml b/.cargo/config.toml
index edac6981..9fb13e97 100644
--- a/.cargo/config.toml
+++ b/.cargo/config.toml
@@ -9,5 +9,4 @@ t = "test"
tr = "test --release"
r = "run"
rr = "run --release"
-fmt = "fmt --all"
-clippy = "clippy --all-targets --all-features"
\ No newline at end of file
+# Note: cargo fmt and clippy aliases removed - were recursive and broke CI
\ No newline at end of file
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 00000000..e1ef3a9c
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,72 @@
+name: CI
+
+on:
+ push:
+ branches: [main, develop]
+ pull_request:
+ branches: [main, develop]
+
+env:
+ CARGO_TERM_COLOR: always
+ RUST_BACKTRACE: 1
+ # Override target-cpu=native from .cargo/config.toml (breaks CI runners)
+ RUSTFLAGS: ""
+
+jobs:
+ build:
+ name: Build & Test
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest, macos-latest, windows-latest]
+ rust: [stable]
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install Rust toolchain
+ uses: dtolnay/rust-toolchain@master
+ with:
+ toolchain: ${{ matrix.rust }}
+ components: clippy, rustfmt
+
+ - name: Cache cargo registry
+ uses: actions/cache@v4
+ with:
+ path: |
+ ~/.cargo/registry
+ ~/.cargo/git
+ target
+ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
+ restore-keys: |
+ ${{ runner.os }}-cargo-
+
+ - name: Check formatting
+ if: matrix.os == 'ubuntu-latest'
+ run: cargo fmt --all -- --check
+
+ - name: Build
+ run: cargo build --verbose
+
+ - name: Run tests
+ run: cargo test --verbose
+
+ - name: Clippy
+ if: matrix.os == 'ubuntu-latest'
+ run: cargo clippy -- -D warnings
+
+ # Security audit
+ security:
+ name: Security Audit
+ runs-on: ubuntu-latest
+ permissions:
+ checks: write
+ contents: read
+ steps:
+ - uses: actions/checkout@v4
+ - uses: rustsec/audit-check@v2
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ # Only fail on actual vulnerabilities, not unmaintained warnings
+ ignore: RUSTSEC-2020-0163,RUSTSEC-2024-0320,RUSTSEC-2025-0057,RUSTSEC-2025-0074,RUSTSEC-2025-0075,RUSTSEC-2025-0080,RUSTSEC-2025-0081,RUSTSEC-2025-0098,RUSTSEC-2025-0104,RUSTSEC-2025-0134
diff --git a/.gitignore b/.gitignore
index 8b5b8d33..c5175f34 100644
--- a/.gitignore
+++ b/.gitignore
@@ -38,5 +38,4 @@ syncable-ide-companion/*.vsix
syncable-ide-companion/node_modules/
syncable-ide-companion/dist/
-syncable-cli.tape
-syncable-cli-demo.gif
\ No newline at end of file
+syncable-cli.tape
\ No newline at end of file
diff --git a/Cargo.lock b/Cargo.lock
index 12c617aa..c8417f99 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -101,6 +101,12 @@ version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
+[[package]]
+name = "arraydeque"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236"
+
[[package]]
name = "arrayref"
version = "0.3.9"
@@ -859,7 +865,7 @@ version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e"
dependencies = [
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -1370,7 +1376,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18"
dependencies = [
"libc",
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -1414,7 +1420,7 @@ checksum = "0ce92ff622d6dadf7349484f42c93271a0d49b7cc4d466a936405bacbe10aa78"
dependencies = [
"cfg-if",
"rustix",
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -2523,6 +2529,15 @@ version = "0.15.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3"
+[[package]]
+name = "hashlink"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af"
+dependencies = [
+ "hashbrown 0.14.5",
+]
+
[[package]]
name = "heapless"
version = "0.8.0"
@@ -3007,7 +3022,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
dependencies = [
"hermit-abi",
"libc",
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -3034,7 +3049,7 @@ dependencies = [
"portable-atomic",
"portable-atomic-util",
"serde",
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -3864,7 +3879,7 @@ dependencies = [
"once_cell",
"socket2 0.5.10",
"tracing",
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -4225,7 +4240,7 @@ dependencies = [
"errno",
"libc",
"linux-raw-sys",
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -4888,6 +4903,7 @@ dependencies = [
"toml 0.9.6",
"uuid",
"walkdir",
+ "yaml-rust2",
]
[[package]]
@@ -4979,7 +4995,7 @@ dependencies = [
"getrandom 0.3.3",
"once_cell",
"rustix",
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -5831,7 +5847,7 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -6194,6 +6210,17 @@ dependencies = [
"linked-hash-map",
]
+[[package]]
+name = "yaml-rust2"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2a1a1c0bc9823338a3bdf8c61f994f23ac004c6fa32c08cd152984499b445e8d"
+dependencies = [
+ "arraydeque",
+ "encoding_rs",
+ "hashlink",
+]
+
[[package]]
name = "yoke"
version = "0.8.0"
diff --git a/Cargo.toml b/Cargo.toml
index 7a85c2de..c7846a3b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -25,6 +25,7 @@ clap = { version = "4", features = ["derive", "env", "cargo"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
serde_yaml = "0.9"
+yaml-rust2 = "0.9" # YAML parsing with position tracking for dclint
toml = "0.9"
log = "0.4"
env_logger = "0.11"
diff --git a/README.md b/README.md
index ecec30c2..97b8a818 100644
--- a/README.md
+++ b/README.md
@@ -9,10 +9,20 @@
+
+
+
+
+
+
+
+
+
-
+
+
@@ -33,25 +43,9 @@
**Stop copy-pasting Dockerfiles from Stack Overflow.** Syncable CLI is an AI-powered assistant that understands your codebase and generates production-ready infrastructure ā Dockerfiles, Kubernetes manifests, Terraform configs, and CI/CD pipelines ā tailored specifically to your project.
-```bash
-$ sync-ctl chat
-š¤ Syncable Agent powered by Claude
-
-You: Create a production Dockerfile for this project
-
-Agent: I've analyzed your Express.js + TypeScript project. Here's an optimized
-multi-stage Dockerfile with:
- ā Non-root user for security
- ā Layer caching for faster builds
- ā Health checks configured
- ā Production dependencies only
-
-[Creates Dockerfile with VS Code diff view]
-
-You: Now add Redis caching and create a docker-compose
-
-Agent: I'll add Redis to your stack and create a compose file...
-```
+
+
+
## ā” Quick Start
@@ -249,8 +243,13 @@ See [LICENSE](LICENSE) for the full license text.
The Dockerfile linting functionality (`src/analyzer/hadolint/`) is a Rust translation
of [Hadolint](https://github.com/hadolint/hadolint), originally written in Haskell by
-Lukas Martinelli and contributors. See [THIRD_PARTY_NOTICES.md](THIRD_PARTY_NOTICES.md)
-for full attribution details.
+Lukas Martinelli and contributors.
+
+The Docker Compose linting functionality (`src/analyzer/dclint/`) is a Rust implementation
+inspired by [docker-compose-linter](https://github.com/zavoloklom/docker-compose-linter)
+by Sergey Suspended.
+
+See [THIRD_PARTY_NOTICES.md](THIRD_PARTY_NOTICES.md) for full attribution details.
---
diff --git a/THIRD_PARTY_NOTICES.md b/THIRD_PARTY_NOTICES.md
index bdb1f680..b8648718 100644
--- a/THIRD_PARTY_NOTICES.md
+++ b/THIRD_PARTY_NOTICES.md
@@ -47,6 +47,42 @@ https://www.gnu.org/licenses/gpl-3.0.en.html
---
+## Docker Compose Linter
+
+The Docker Compose linting functionality in `src/analyzer/dclint/` is a Rust
+implementation inspired by the docker-compose-linter project.
+
+**Original Project:** [docker-compose-linter](https://github.com/zavoloklom/docker-compose-linter)
+
+**Original Author:** Sergey Suspended (zavoloklom)
+
+**Original License:** MIT License
+
+**Original Copyright:**
+```
+Copyright (c) 2024 Sergey Suspended
+```
+
+**What was implemented:**
+- Docker Compose YAML validation logic
+- Lint rule concepts (DCL001-DCL015 series)
+- Service configuration validation patterns
+- Best practices enforcement
+
+**Modifications made:**
+- Complete implementation in Rust (original was TypeScript)
+- Integration with Syncable-CLI's agent and tool system
+- Native async support for streaming output
+- Adaptation to Rust error handling patterns
+- Additional rules and improvements specific to Syncable's use cases
+
+**License Notice:**
+The original docker-compose-linter is licensed under MIT. Our Rust implementation
+is original code inspired by the rule concepts and validation patterns from the
+original project.
+
+---
+
## ShellCheck (Rule Concepts)
Some shell-related lint rules are inspired by ShellCheck.
@@ -65,10 +101,11 @@ concepts and documentation.
## Acknowledgments
-We are grateful to the open source community and the authors of Hadolint for
-creating and maintaining excellent Dockerfile linting tools. This translation
-to Rust allows native integration with Syncable-CLI while preserving the
-valuable rule definitions and linting logic developed by the original authors.
+We are grateful to the open source community and the authors of Hadolint and
+docker-compose-linter for creating and maintaining excellent container configuration
+linting tools. These Rust implementations allow native integration with Syncable-CLI
+while preserving the valuable rule definitions and linting logic developed by the
+original authors.
If you are the author of any software mentioned here and believe the attribution
is incorrect or incomplete, please open an issue at:
diff --git a/examples/check_vulnerabilities.rs b/examples/check_vulnerabilities.rs
index bab25f6b..aeeda5d0 100644
--- a/examples/check_vulnerabilities.rs
+++ b/examples/check_vulnerabilities.rs
@@ -1,23 +1,23 @@
-use syncable_cli::analyzer::dependency_parser::{DependencyParser};
-use syncable_cli::analyzer::vulnerability::VulnerabilityChecker;
use std::path::Path;
+use syncable_cli::analyzer::dependency_parser::DependencyParser;
+use syncable_cli::analyzer::vulnerability::VulnerabilityChecker;
#[tokio::main]
async fn main() -> Result<(), Box> {
env_logger::init();
-
+
let project_path = Path::new(".");
println!("š Checking vulnerabilities in: {}", project_path.display());
-
+
// Parse dependencies
let parser = DependencyParser::new();
let dependencies = parser.parse_all_dependencies(project_path)?;
-
+
if dependencies.is_empty() {
println!("No dependencies found.");
return Ok(());
}
-
+
// Print found dependencies
for (lang, deps) in &dependencies {
println!("\n{:?} dependencies: {}", lang, deps.len());
@@ -28,16 +28,21 @@ async fn main() -> Result<(), Box> {
println!(" ... and {} more", deps.len() - 5);
}
}
-
+
// Check vulnerabilities
println!("\nš”ļø Checking for vulnerabilities...");
let checker = VulnerabilityChecker::new();
- let report = checker.check_all_dependencies(&dependencies, project_path).await?;
-
+ let report = checker
+ .check_all_dependencies(&dependencies, project_path)
+ .await?;
+
println!("\nš Vulnerability Report");
- println!("Checked at: {}", report.checked_at.format("%Y-%m-%d %H:%M:%S UTC"));
+ println!(
+ "Checked at: {}",
+ report.checked_at.format("%Y-%m-%d %H:%M:%S UTC")
+ );
println!("Total vulnerabilities: {}", report.total_vulnerabilities);
-
+
if report.total_vulnerabilities > 0 {
println!("\nSeverity breakdown:");
if report.critical_count > 0 {
@@ -52,10 +57,13 @@ async fn main() -> Result<(), Box> {
if report.low_count > 0 {
println!(" LOW: {}", report.low_count);
}
-
+
println!("\nVulnerable dependencies:");
for vuln_dep in &report.vulnerable_dependencies {
- println!("\n š¦ {} v{} ({:?})", vuln_dep.name, vuln_dep.version, vuln_dep.language);
+ println!(
+ "\n š¦ {} v{} ({:?})",
+ vuln_dep.name, vuln_dep.version, vuln_dep.language
+ );
for vuln in &vuln_dep.vulnerabilities {
println!(" ā ļø {} [{:?}] - {}", vuln.id, vuln.severity, vuln.title);
if let Some(ref cve) = vuln.cve {
@@ -69,6 +77,6 @@ async fn main() -> Result<(), Box> {
} else {
println!("\nā
No known vulnerabilities found!");
}
-
+
Ok(())
-}
\ No newline at end of file
+}
diff --git a/examples/debug_java_vulnerabilities.rs b/examples/debug_java_vulnerabilities.rs
index 8c34a9d2..1ebecd55 100644
--- a/examples/debug_java_vulnerabilities.rs
+++ b/examples/debug_java_vulnerabilities.rs
@@ -1,9 +1,9 @@
use env_logger;
-use log::{info, error};
+use log::{error, info};
+use std::env;
+use std::path::Path;
use syncable_cli::analyzer::dependency_parser::{DependencyParser, Language};
use syncable_cli::analyzer::vulnerability::VulnerabilityChecker;
-use std::path::Path;
-use std::env;
#[tokio::main]
async fn main() -> Result<(), Box> {
@@ -11,7 +11,7 @@ async fn main() -> Result<(), Box> {
env_logger::Builder::from_default_env()
.filter_level(log::LevelFilter::Debug)
.init();
-
+
// Get project path from command line args or use current directory
let args: Vec = env::args().collect();
let project_path = if args.len() > 1 {
@@ -19,14 +19,17 @@ async fn main() -> Result<(), Box> {
} else {
Path::new(".")
};
-
- info!("š Debug Java vulnerability scanning in: {}", project_path.display());
-
+
+ info!(
+ "š Debug Java vulnerability scanning in: {}",
+ project_path.display()
+ );
+
// Parse dependencies
let parser = DependencyParser::new();
info!("š¦ Parsing dependencies...");
let dependencies = parser.parse_all_dependencies(project_path)?;
-
+
if dependencies.is_empty() {
error!("ā No dependencies found!");
info!("Make sure you're in a Java project directory with:");
@@ -34,7 +37,7 @@ async fn main() -> Result<(), Box> {
info!(" - build.gradle or build.gradle.kts (Gradle project)");
return Ok(());
}
-
+
// Show detailed dependency information
info!("š Found dependencies in {} languages:", dependencies.len());
for (lang, deps) in &dependencies {
@@ -42,14 +45,17 @@ async fn main() -> Result<(), Box> {
if *lang == Language::Java {
info!(" Java dependencies details:");
for dep in deps.iter().take(10) {
- info!(" - {} v{} (source: {:?})", dep.name, dep.version, dep.source);
+ info!(
+ " - {} v{} (source: {:?})",
+ dep.name, dep.version, dep.source
+ );
}
if deps.len() > 10 {
info!(" ... and {} more", deps.len() - 10);
}
}
}
-
+
// Check if Java dependencies were found
if !dependencies.contains_key(&Language::Java) {
error!("ā No Java dependencies detected!");
@@ -57,15 +63,20 @@ async fn main() -> Result<(), Box> {
info!("1. Make sure you're in a Java project directory");
info!("2. For Maven projects: ensure pom.xml exists and has section");
info!("3. For Gradle projects: ensure build.gradle exists with dependency declarations");
- info!("4. Run 'mvn dependency:resolve' or 'gradle build' to ensure dependencies are resolved");
+ info!(
+ "4. Run 'mvn dependency:resolve' or 'gradle build' to ensure dependencies are resolved"
+ );
return Ok(());
}
-
+
// Check vulnerabilities
info!("š”ļø Checking for vulnerabilities...");
let checker = VulnerabilityChecker::new();
-
- match checker.check_all_dependencies(&dependencies, project_path).await {
+
+ match checker
+ .check_all_dependencies(&dependencies, project_path)
+ .await
+ {
Ok(report) => {
info!("ā
Vulnerability scan completed successfully!");
info!("š Results:");
@@ -74,12 +85,16 @@ async fn main() -> Result<(), Box> {
info!(" High: {}", report.high_count);
info!(" Medium: {}", report.medium_count);
info!(" Low: {}", report.low_count);
-
+
if report.total_vulnerabilities > 0 {
info!("šØ Vulnerable dependencies:");
for vuln_dep in &report.vulnerable_dependencies {
- info!(" - {} v{} ({} vulnerabilities)",
- vuln_dep.name, vuln_dep.version, vuln_dep.vulnerabilities.len());
+ info!(
+ " - {} v{} ({} vulnerabilities)",
+ vuln_dep.name,
+ vuln_dep.version,
+ vuln_dep.vulnerabilities.len()
+ );
for vuln in &vuln_dep.vulnerabilities {
info!(" ⢠{} [{:?}] - {}", vuln.id, vuln.severity, vuln.title);
}
@@ -89,7 +104,9 @@ async fn main() -> Result<(), Box> {
info!("This could mean:");
info!(" - Your dependencies are up to date and secure");
info!(" - The vulnerability scanner (grype) didn't find any issues");
- info!(" - The dependency versions couldn't be matched with vulnerability databases");
+ info!(
+ " - The dependency versions couldn't be matched with vulnerability databases"
+ );
}
}
Err(e) => {
@@ -100,6 +117,6 @@ async fn main() -> Result<(), Box> {
info!(" - Dependencies not resolved: run 'mvn dependency:resolve'");
}
}
-
+
Ok(())
-}
\ No newline at end of file
+}
diff --git a/examples/security_analysis.rs b/examples/security_analysis.rs
index aa49ceb1..57e02e74 100644
--- a/examples/security_analysis.rs
+++ b/examples/security_analysis.rs
@@ -1,25 +1,40 @@
use std::path::Path;
-use syncable_cli::analyzer::{analyze_project, SecurityAnalyzer, SecurityAnalysisConfig};
+use syncable_cli::analyzer::{SecurityAnalysisConfig, SecurityAnalyzer, analyze_project};
fn main() -> Result<(), Box> {
// Initialize logging
env_logger::init();
-
+
// Get project path from command line arguments or use current directory
- let project_path = std::env::args()
- .nth(1)
- .unwrap_or_else(|| ".".to_string());
-
+ let project_path = std::env::args().nth(1).unwrap_or_else(|| ".".to_string());
+
println!("š Analyzing security for project: {}", project_path);
-
+
// First perform a general project analysis
let project_analysis = analyze_project(Path::new(&project_path))?;
-
+
println!("š Project Analysis Summary:");
- println!(" Languages: {:?}", project_analysis.languages.iter().map(|l| &l.name).collect::>());
- println!(" Technologies: {:?}", project_analysis.technologies.iter().map(|t| &t.name).collect::>());
- println!(" Environment Variables: {}", project_analysis.environment_variables.len());
-
+ println!(
+ " Languages: {:?}",
+ project_analysis
+ .languages
+ .iter()
+ .map(|l| &l.name)
+ .collect::>()
+ );
+ println!(
+ " Technologies: {:?}",
+ project_analysis
+ .technologies
+ .iter()
+ .map(|t| &t.name)
+ .collect::>()
+ );
+ println!(
+ " Environment Variables: {}",
+ project_analysis.environment_variables.len()
+ );
+
// Create security analyzer with default configuration
let security_config = SecurityAnalysisConfig {
include_low_severity: true, // Include low severity findings for demonstration
@@ -27,11 +42,7 @@ fn main() -> Result<(), Box> {
check_code_patterns: true,
check_infrastructure: true,
check_compliance: true,
- frameworks_to_check: vec![
- "SOC2".to_string(),
- "GDPR".to_string(),
- "OWASP".to_string(),
- ],
+ frameworks_to_check: vec!["SOC2".to_string(), "GDPR".to_string(), "OWASP".to_string()],
ignore_patterns: vec![
"node_modules".to_string(),
".git".to_string(),
@@ -40,20 +51,23 @@ fn main() -> Result<(), Box> {
skip_gitignored_files: true,
downgrade_gitignored_severity: false,
};
-
+
let mut security_analyzer = SecurityAnalyzer::with_config(security_config)?;
-
+
// Perform security analysis
println!("\nš”ļø Running comprehensive security analysis...");
let security_report = security_analyzer.analyze_security(&project_analysis)?;
-
+
// Display results
println!("\nš Security Analysis Report");
println!("āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā");
- println!("š Overall Security Score: {:.1}/100", security_report.overall_score);
+ println!(
+ "š Overall Security Score: {:.1}/100",
+ security_report.overall_score
+ );
println!("ā ļø Risk Level: {:?}", security_report.risk_level);
println!("š Total Findings: {}", security_report.total_findings);
-
+
if !security_report.findings_by_severity.is_empty() {
println!("\nš Findings by Severity:");
for (severity, count) in &security_report.findings_by_severity {
@@ -67,7 +81,7 @@ fn main() -> Result<(), Box> {
println!(" {} {:?}: {}", emoji, severity, count);
}
}
-
+
if !security_report.findings_by_category.is_empty() {
println!("\nšļø Findings by Category:");
for (category, count) in &security_report.findings_by_category {
@@ -84,12 +98,12 @@ fn main() -> Result<(), Box> {
println!(" {} {:?}: {}", emoji, category, count);
}
}
-
+
// Display detailed findings
if !security_report.findings.is_empty() {
println!("\nš Detailed Security Findings:");
println!("āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā");
-
+
for (i, finding) in security_report.findings.iter().enumerate() {
let severity_emoji = match finding.severity {
syncable_cli::analyzer::SecuritySeverity::Critical => "šØ",
@@ -98,10 +112,16 @@ fn main() -> Result<(), Box> {
syncable_cli::analyzer::SecuritySeverity::Low => "ā¹ļø ",
syncable_cli::analyzer::SecuritySeverity::Info => "š”",
};
-
- println!("\n{}. {} [{}] {}", i + 1, severity_emoji, finding.id, finding.title);
+
+ println!(
+ "\n{}. {} [{}] {}",
+ i + 1,
+ severity_emoji,
+ finding.id,
+ finding.title
+ );
println!(" š {}", finding.description);
-
+
if let Some(file) = &finding.file_path {
print!(" š File: {}", file.display());
if let Some(line) = finding.line_number {
@@ -109,24 +129,24 @@ fn main() -> Result<(), Box> {
}
println!();
}
-
+
if let Some(evidence) = &finding.evidence {
println!(" š Evidence: {}", evidence);
}
-
+
if !finding.remediation.is_empty() {
println!(" š§ Remediation:");
for remediation in &finding.remediation {
println!(" ⢠{}", remediation);
}
}
-
+
if let Some(cwe) = &finding.cwe_id {
println!(" š·ļø CWE: {}", cwe);
}
}
}
-
+
// Display recommendations
if !security_report.recommendations.is_empty() {
println!("\nš” Security Recommendations:");
@@ -135,7 +155,7 @@ fn main() -> Result<(), Box> {
println!("{}. {}", i + 1, recommendation);
}
}
-
+
// Display compliance status
if !security_report.compliance_status.is_empty() {
println!("\nš Compliance Status:");
@@ -143,21 +163,30 @@ fn main() -> Result<(), Box> {
for (framework, status) in &security_report.compliance_status {
println!("šļø {}: {:.1}% coverage", framework, status.coverage);
if !status.missing_controls.is_empty() {
- println!(" Missing controls: {}", status.missing_controls.join(", "));
+ println!(
+ " Missing controls: {}",
+ status.missing_controls.join(", ")
+ );
}
}
}
-
+
println!("\nā
Security analysis completed!");
-
+
// Exit with appropriate code based on findings
- if security_report.findings_by_severity.contains_key(&syncable_cli::analyzer::SecuritySeverity::Critical) {
+ if security_report
+ .findings_by_severity
+ .contains_key(&syncable_cli::analyzer::SecuritySeverity::Critical)
+ {
println!("ā Critical security issues found. Please address immediately.");
std::process::exit(1);
- } else if security_report.findings_by_severity.contains_key(&syncable_cli::analyzer::SecuritySeverity::High) {
+ } else if security_report
+ .findings_by_severity
+ .contains_key(&syncable_cli::analyzer::SecuritySeverity::High)
+ {
println!("ā ļø High severity security issues found. Review recommended.");
std::process::exit(2);
}
-
+
Ok(())
-}
\ No newline at end of file
+}
diff --git a/examples/test_project_context.rs b/examples/test_project_context.rs
index b76e07be..c77359ed 100644
--- a/examples/test_project_context.rs
+++ b/examples/test_project_context.rs
@@ -1,33 +1,31 @@
//! Example: Test Project Context Analyzer
-//!
+//!
//! This example demonstrates the Project Context Analyzer functionality
//! by analyzing the current project.
-use syncable_cli::analyzer::{analyze_project, ProjectType};
use std::env;
use std::path::Path;
+use syncable_cli::analyzer::{ProjectType, analyze_project};
fn main() -> Result<(), Box> {
// Initialize logger
env_logger::init();
-
+
// Get the project path from command line or use current directory
- let path = env::args()
- .nth(1)
- .unwrap_or_else(|| ".".to_string());
-
+ let path = env::args().nth(1).unwrap_or_else(|| ".".to_string());
+
let project_path = Path::new(&path);
-
+
println!("š Analyzing project at: {}", project_path.display());
println!("{}", "=".repeat(60));
-
+
// Run the analysis
let analysis = analyze_project(project_path)?;
-
+
// Display Project Context Analysis Results
println!("\nš PROJECT CONTEXT ANALYSIS RESULTS");
println!("{}", "=".repeat(60));
-
+
// Project Type (Roadmap Requirement #5)
println!("\nšÆ Project Type: {:?}", analysis.project_type);
match analysis.project_type {
@@ -39,7 +37,7 @@ fn main() -> Result<(), Box> {
ProjectType::StaticSite => println!(" This is a static website"),
_ => println!(" Project type details not available"),
}
-
+
// Entry Points (Roadmap Requirement #1)
println!("\nš Entry Points ({}):", analysis.entry_points.len());
for (i, entry) in analysis.entry_points.iter().enumerate() {
@@ -51,7 +49,7 @@ fn main() -> Result<(), Box> {
println!(" Command: {}", cmd);
}
}
-
+
// Ports (Roadmap Requirement #2)
println!("\nš Exposed Ports ({}):", analysis.ports.len());
for port in &analysis.ports {
@@ -60,79 +58,122 @@ fn main() -> Result<(), Box> {
println!(" {}", desc);
}
}
-
+
// Environment Variables (Roadmap Requirement #3)
- println!("\nš Environment Variables ({}):", analysis.environment_variables.len());
- let required_vars: Vec<_> = analysis.environment_variables.iter()
+ println!(
+ "\nš Environment Variables ({}):",
+ analysis.environment_variables.len()
+ );
+ let required_vars: Vec<_> = analysis
+ .environment_variables
+ .iter()
.filter(|ev| ev.required)
.collect();
- let optional_vars: Vec<_> = analysis.environment_variables.iter()
+ let optional_vars: Vec<_> = analysis
+ .environment_variables
+ .iter()
.filter(|ev| !ev.required)
.collect();
-
+
if !required_vars.is_empty() {
println!(" Required:");
for var in required_vars {
- println!(" - {} {}",
+ println!(
+ " - {} {}",
var.name,
- if let Some(desc) = &var.description {
- format!("({})", desc)
- } else {
- String::new()
+ if let Some(desc) = &var.description {
+ format!("({})", desc)
+ } else {
+ String::new()
}
);
}
}
-
+
if !optional_vars.is_empty() {
println!(" Optional:");
for var in optional_vars {
- println!(" - {} = {:?}",
- var.name,
+ println!(
+ " - {} = {:?}",
+ var.name,
var.default_value.as_deref().unwrap_or("no default")
);
}
}
-
+
// Build Scripts (Roadmap Requirement #4)
println!("\nšØ Build Scripts ({}):", analysis.build_scripts.len());
- let default_scripts: Vec<_> = analysis.build_scripts.iter()
+ let default_scripts: Vec<_> = analysis
+ .build_scripts
+ .iter()
.filter(|bs| bs.is_default)
.collect();
- let other_scripts: Vec<_> = analysis.build_scripts.iter()
+ let other_scripts: Vec<_> = analysis
+ .build_scripts
+ .iter()
.filter(|bs| !bs.is_default)
.collect();
-
+
if !default_scripts.is_empty() {
println!(" Default scripts:");
for script in default_scripts {
println!(" - {}: {}", script.name, script.command);
}
}
-
+
if !other_scripts.is_empty() {
println!(" Other scripts:");
for script in other_scripts {
println!(" - {}: {}", script.name, script.command);
}
}
-
+
// Summary
println!("\nš SUMMARY");
println!("{}", "=".repeat(60));
println!("ā
All 5 Project Context Analyzer requirements verified:");
- println!(" 1. Entry points detected: {}",
- if analysis.entry_points.is_empty() { "ā None" } else { "ā
Yes" });
- println!(" 2. Ports identified: {}",
- if analysis.ports.is_empty() { "ā None" } else { "ā
Yes" });
- println!(" 3. Environment variables extracted: {}",
- if analysis.environment_variables.is_empty() { "ā None" } else { "ā
Yes" });
- println!(" 4. Build scripts analyzed: {}",
- if analysis.build_scripts.is_empty() { "ā None" } else { "ā
Yes" });
- println!(" 5. Project type determined: {}",
- if matches!(analysis.project_type, ProjectType::Unknown) { "ā Unknown" } else { "ā
Yes" });
-
+ println!(
+ " 1. Entry points detected: {}",
+ if analysis.entry_points.is_empty() {
+ "ā None"
+ } else {
+ "ā
Yes"
+ }
+ );
+ println!(
+ " 2. Ports identified: {}",
+ if analysis.ports.is_empty() {
+ "ā None"
+ } else {
+ "ā
Yes"
+ }
+ );
+ println!(
+ " 3. Environment variables extracted: {}",
+ if analysis.environment_variables.is_empty() {
+ "ā None"
+ } else {
+ "ā
Yes"
+ }
+ );
+ println!(
+ " 4. Build scripts analyzed: {}",
+ if analysis.build_scripts.is_empty() {
+ "ā None"
+ } else {
+ "ā
Yes"
+ }
+ );
+ println!(
+ " 5. Project type determined: {}",
+ if matches!(analysis.project_type, ProjectType::Unknown) {
+ "ā Unknown"
+ } else {
+ "ā
Yes"
+ }
+ );
+
println!("\n⨠Project Context Analysis Complete!");
-
+
Ok(())
-}
\ No newline at end of file
+}
diff --git a/src/agent/commands.rs b/src/agent/commands.rs
index 9ebb19de..dd381e75 100644
--- a/src/agent/commands.rs
+++ b/src/agent/commands.rs
@@ -8,7 +8,7 @@
use crate::agent::ui::colors::ansi;
use crossterm::{
- cursor::{self, MoveUp, MoveToColumn},
+ cursor::{self, MoveToColumn, MoveUp},
event::{self, Event, KeyCode},
execute,
terminal::{self, Clear, ClearType},
@@ -244,7 +244,7 @@ impl TokenUsage {
let input_cost = (self.prompt_tokens as f64 / 1_000_000.0) * input_per_m;
let output_cost = (self.completion_tokens as f64 / 1_000_000.0) * output_per_m;
-
+
(input_cost, output_cost, input_cost + output_cost)
}
@@ -260,20 +260,41 @@ impl TokenUsage {
};
println!();
- println!(" {}āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā{}", ansi::PURPLE, ansi::RESET);
+ println!(
+ " {}āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā{}",
+ ansi::PURPLE,
+ ansi::RESET
+ );
println!(" {}š° Session Cost & Usage{}", ansi::PURPLE, ansi::RESET);
- println!(" {}āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā{}", ansi::PURPLE, ansi::RESET);
+ println!(
+ " {}āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā{}",
+ ansi::PURPLE,
+ ansi::RESET
+ );
println!();
println!(" {}Model:{} {}", ansi::DIM, ansi::RESET, model);
- println!(" {}Duration:{} {:02}:{:02}:{:02}",
- ansi::DIM, ansi::RESET,
+ println!(
+ " {}Duration:{} {:02}:{:02}:{:02}",
+ ansi::DIM,
+ ansi::RESET,
duration.as_secs() / 3600,
(duration.as_secs() % 3600) / 60,
duration.as_secs() % 60
);
- println!(" {}Requests:{} {}", ansi::DIM, ansi::RESET, self.request_count);
+ println!(
+ " {}Requests:{} {}",
+ ansi::DIM,
+ ansi::RESET,
+ self.request_count
+ );
println!();
- println!(" {}Tokens{} ({}){}:", ansi::CYAN, ansi::RESET, accuracy_note, ansi::RESET);
+ println!(
+ " {}Tokens{} ({}){}:",
+ ansi::CYAN,
+ ansi::RESET,
+ accuracy_note,
+ ansi::RESET
+ );
println!(" Input: {:>10} tokens", self.prompt_tokens);
println!(" Output: {:>10} tokens", self.completion_tokens);
@@ -282,7 +303,12 @@ impl TokenUsage {
println!();
println!(" {}Cache:{}", ansi::CYAN, ansi::RESET);
if self.cache_read_tokens > 0 {
- println!(" Read: {:>10} tokens {}(saved){}", self.cache_read_tokens, ansi::SUCCESS, ansi::RESET);
+ println!(
+ " Read: {:>10} tokens {}(saved){}",
+ self.cache_read_tokens,
+ ansi::SUCCESS,
+ ansi::RESET
+ );
}
if self.cache_creation_tokens > 0 {
println!(" Created: {:>10} tokens", self.cache_creation_tokens);
@@ -297,12 +323,22 @@ impl TokenUsage {
}
println!();
- println!(" {}Total: {:>10} tokens{}", ansi::BOLD, self.format_total(), ansi::RESET);
+ println!(
+ " {}Total: {:>10} tokens{}",
+ ansi::BOLD,
+ self.format_total(),
+ ansi::RESET
+ );
println!();
println!(" {}Estimated Cost:{}", ansi::SUCCESS, ansi::RESET);
println!(" Input: ${:.4}", input_cost);
println!(" Output: ${:.4}", output_cost);
- println!(" {}Total: ${:.4}{}", ansi::BOLD, total_cost, ansi::RESET);
+ println!(
+ " {}Total: ${:.4}{}",
+ ansi::BOLD,
+ total_cost,
+ ansi::RESET
+ );
println!();
// Show note about accuracy
@@ -311,7 +347,11 @@ impl TokenUsage {
println!(" {}(Based on actual API usage){}", ansi::DIM, ansi::RESET);
}
TokenCountType::Approximate => {
- println!(" {}(Estimates based on ~4 chars/token){}", ansi::DIM, ansi::RESET);
+ println!(
+ " {}(Estimates based on ~4 chars/token){}",
+ ansi::DIM,
+ ansi::RESET
+ );
}
}
println!();
@@ -343,11 +383,14 @@ impl CommandPicker {
self.filtered_commands = SLASH_COMMANDS
.iter()
.filter(|cmd| {
- cmd.name.starts_with(&self.filter) ||
- cmd.alias.map(|a| a.starts_with(&self.filter)).unwrap_or(false)
+ cmd.name.starts_with(&self.filter)
+ || cmd
+ .alias
+ .map(|a| a.starts_with(&self.filter))
+ .unwrap_or(false)
})
.collect();
-
+
// Reset selection if out of bounds
if self.selected_index >= self.filtered_commands.len() {
self.selected_index = 0;
@@ -363,7 +406,9 @@ impl CommandPicker {
/// Move selection down
pub fn move_down(&mut self) {
- if !self.filtered_commands.is_empty() && self.selected_index < self.filtered_commands.len() - 1 {
+ if !self.filtered_commands.is_empty()
+ && self.selected_index < self.filtered_commands.len() - 1
+ {
self.selected_index += 1;
}
}
@@ -376,7 +421,7 @@ impl CommandPicker {
/// Render the picker suggestions below current line
pub fn render_suggestions(&self) -> usize {
let mut stdout = io::stdout();
-
+
if self.filtered_commands.is_empty() {
println!("\n {}No matching commands{}", ansi::DIM, ansi::RESET);
let _ = stdout.flush();
@@ -385,19 +430,30 @@ impl CommandPicker {
for (i, cmd) in self.filtered_commands.iter().enumerate() {
let is_selected = i == self.selected_index;
-
+
if is_selected {
// Selected item - highlighted with arrow
- println!(" {}āø /{:<15}{} {}{}{}",
- ansi::PURPLE, cmd.name, ansi::RESET,
- ansi::PURPLE, cmd.description, ansi::RESET);
+ println!(
+ " {}āø /{:<15}{} {}{}{}",
+ ansi::PURPLE,
+ cmd.name,
+ ansi::RESET,
+ ansi::PURPLE,
+ cmd.description,
+ ansi::RESET
+ );
} else {
// Normal item - dimmed
- println!(" {} /{:<15} {}{}",
- ansi::DIM, cmd.name, cmd.description, ansi::RESET);
+ println!(
+ " {} /{:<15} {}{}",
+ ansi::DIM,
+ cmd.name,
+ cmd.description,
+ ansi::RESET
+ );
}
}
-
+
let _ = stdout.flush();
self.filtered_commands.len()
}
@@ -418,29 +474,33 @@ impl CommandPicker {
pub fn show_command_picker(initial_filter: &str) -> Option {
let mut picker = CommandPicker::new();
picker.set_filter(initial_filter);
-
+
// Enable raw mode for real-time key handling
if terminal::enable_raw_mode().is_err() {
// Fallback to simple mode if raw mode fails
return show_simple_picker(&picker);
}
-
+
let mut stdout = io::stdout();
let mut input_buffer = format!("/{}", initial_filter);
let mut last_rendered_lines = 0;
-
+
// Initial render
println!(); // Move to new line for suggestions
last_rendered_lines = picker.render_suggestions();
-
+
// Move back up to input line and position cursor
- let _ = execute!(stdout, MoveUp(last_rendered_lines as u16 + 1), MoveToColumn(0));
+ let _ = execute!(
+ stdout,
+ MoveUp(last_rendered_lines as u16 + 1),
+ MoveToColumn(0)
+ );
print!("{}You: {}{}", ansi::SUCCESS, ansi::RESET, input_buffer);
let _ = stdout.flush();
-
+
// Move down to after suggestions
let _ = execute!(stdout, cursor::MoveDown(last_rendered_lines as u16 + 1));
-
+
let result = loop {
// Wait for key event
if let Ok(Event::Key(key_event)) = event::read() {
@@ -477,7 +537,7 @@ pub fn show_command_picker(initial_filter: &str) -> Option {
input_buffer.push(c);
let filter = input_buffer.trim_start_matches('/');
picker.set_filter(filter);
-
+
// If there's an exact match and user typed enough, auto-select
if picker.filtered_commands.len() == 1 {
// Perfect match - could auto-complete
@@ -491,37 +551,37 @@ pub fn show_command_picker(initial_filter: &str) -> Option {
}
_ => {}
}
-
+
// Clear old suggestions and re-render
picker.clear_lines(last_rendered_lines);
-
+
// Re-render input line
let _ = execute!(stdout, Clear(ClearType::CurrentLine), MoveToColumn(0));
print!("{}You: {}{}", ansi::SUCCESS, ansi::RESET, input_buffer);
let _ = stdout.flush();
-
+
// Render suggestions below
println!();
last_rendered_lines = picker.render_suggestions();
-
+
// Move back to input line position
let _ = execute!(stdout, MoveUp(last_rendered_lines as u16 + 1));
let _ = execute!(stdout, MoveToColumn((5 + input_buffer.len()) as u16));
let _ = stdout.flush();
-
+
// Move down to after suggestions for next iteration
let _ = execute!(stdout, cursor::MoveDown(last_rendered_lines as u16 + 1));
}
};
-
+
// Disable raw mode
let _ = terminal::disable_raw_mode();
-
+
// Clean up display
picker.clear_lines(last_rendered_lines);
let _ = execute!(stdout, Clear(ClearType::CurrentLine), MoveToColumn(0));
let _ = stdout.flush();
-
+
result
}
@@ -530,19 +590,33 @@ fn show_simple_picker(picker: &CommandPicker) -> Option {
println!();
println!(" {}š Available Commands:{}", ansi::CYAN, ansi::RESET);
println!();
-
+
for (i, cmd) in picker.filtered_commands.iter().enumerate() {
- print!(" {} {}/{:<12}", format!("[{}]", i + 1), ansi::PURPLE, cmd.name);
+ print!(
+ " {} {}/{:<12}",
+ format!("[{}]", i + 1),
+ ansi::PURPLE,
+ cmd.name
+ );
if let Some(alias) = cmd.alias {
print!(" ({})", alias);
}
- println!("{} - {}{}{}", ansi::RESET, ansi::DIM, cmd.description, ansi::RESET);
+ println!(
+ "{} - {}{}{}",
+ ansi::RESET,
+ ansi::DIM,
+ cmd.description,
+ ansi::RESET
+ );
}
-
+
println!();
- print!(" Select (1-{}) or press Enter to cancel: ", picker.filtered_commands.len());
+ print!(
+ " Select (1-{}) or press Enter to cancel: ",
+ picker.filtered_commands.len()
+ );
let _ = io::stdout().flush();
-
+
let mut input = String::new();
if io::stdin().read_line(&mut input).is_ok() {
let input = input.trim();
@@ -552,15 +626,15 @@ fn show_simple_picker(picker: &CommandPicker) -> Option {
}
}
}
-
+
None
}
/// Check if a command matches a query (name or alias)
pub fn match_command(query: &str) -> Option<&'static SlashCommand> {
let query = query.trim_start_matches('/').to_lowercase();
-
- SLASH_COMMANDS.iter().find(|cmd| {
- cmd.name == query || cmd.alias.map(|a| a == query).unwrap_or(false)
- })
+
+ SLASH_COMMANDS
+ .iter()
+ .find(|cmd| cmd.name == query || cmd.alias.map(|a| a == query).unwrap_or(false))
}
diff --git a/src/agent/compact/config.rs b/src/agent/compact/config.rs
index a21374be..cf18ef06 100644
--- a/src/agent/compact/config.rs
+++ b/src/agent/compact/config.rs
@@ -162,10 +162,14 @@ impl CompactConfig {
if let Some(true) = self.thresholds.on_turn_end {
if last_is_user {
// Only trigger if we're also close to other thresholds
- let near_token = self.thresholds.token_threshold
+ let near_token = self
+ .thresholds
+ .token_threshold
.map(|t| token_count >= t / 2)
.unwrap_or(false);
- let near_turn = self.thresholds.turn_threshold
+ let near_turn = self
+ .thresholds
+ .turn_threshold
.map(|t| turn_count >= t / 2)
.unwrap_or(false);
@@ -187,19 +191,28 @@ impl CompactConfig {
) -> Option {
if let Some(threshold) = self.thresholds.token_threshold {
if token_count >= threshold {
- return Some(format!("token count ({}) >= threshold ({})", token_count, threshold));
+ return Some(format!(
+ "token count ({}) >= threshold ({})",
+ token_count, threshold
+ ));
}
}
if let Some(threshold) = self.thresholds.turn_threshold {
if turn_count >= threshold {
- return Some(format!("turn count ({}) >= threshold ({})", turn_count, threshold));
+ return Some(format!(
+ "turn count ({}) >= threshold ({})",
+ turn_count, threshold
+ ));
}
}
if let Some(threshold) = self.thresholds.message_threshold {
if message_count >= threshold {
- return Some(format!("message count ({}) >= threshold ({})", message_count, threshold));
+ return Some(format!(
+ "message count ({}) >= threshold ({})",
+ message_count, threshold
+ ));
}
}
diff --git a/src/agent/compact/strategy.rs b/src/agent/compact/strategy.rs
index 69d9d790..42a6b0b1 100644
--- a/src/agent/compact/strategy.rs
+++ b/src/agent/compact/strategy.rs
@@ -74,10 +74,7 @@ pub enum CompactionStrategy {
impl Default for CompactionStrategy {
fn default() -> Self {
// Default: evict 60% or retain last 10, whichever is more conservative
- Self::Min(
- Box::new(Self::Evict(0.6)),
- Box::new(Self::Retain(10)),
- )
+ Self::Min(Box::new(Self::Evict(0.6)), Box::new(Self::Retain(10)))
}
}
@@ -125,9 +122,7 @@ impl CompactionStrategy {
let evict_count = (total as f64 * fraction).floor() as usize;
total.saturating_sub(retention_window).min(evict_count)
}
- Self::Retain(keep) => {
- total.saturating_sub(*keep.max(&retention_window))
- }
+ Self::Retain(keep) => total.saturating_sub(*keep.max(&retention_window)),
Self::Min(a, b) => {
let end_a = a.calculate_raw_end(total, retention_window);
let end_b = b.calculate_raw_end(total, retention_window);
@@ -175,9 +170,7 @@ impl CompactionStrategy {
// Find the tool result with matching ID
if let Some(tool_id) = &last_evicted.tool_id {
for i in end..messages.len().min(end + 5) {
- if messages[i].is_tool_result
- && messages[i].tool_id.as_ref() == Some(tool_id)
- {
+ if messages[i].is_tool_result && messages[i].tool_id.as_ref() == Some(tool_id) {
// Found matching result - extend eviction to include it
end = i + 1;
break;
@@ -287,7 +280,7 @@ mod tests {
(MessageRole::System, false, false),
(MessageRole::User, false, false),
(MessageRole::Assistant, true, false), // has tool call
- (MessageRole::Tool, false, true), // tool result
+ (MessageRole::Tool, false, true), // tool result
(MessageRole::Assistant, false, false),
(MessageRole::User, false, false),
(MessageRole::Assistant, false, false),
@@ -315,7 +308,7 @@ mod tests {
(MessageRole::System, false, false),
(MessageRole::User, false, false),
(MessageRole::Assistant, false, false),
- (MessageRole::User, false, false), // droppable
+ (MessageRole::User, false, false), // droppable
(MessageRole::Assistant, false, false),
]);
messages[3].droppable = true;
@@ -339,9 +332,7 @@ mod tests {
// Retain(5) would evict 5, keeping 5
// Min should be more conservative = evict less = end at 5
- let messages = make_messages(&vec![
- (MessageRole::Assistant, false, false); 10
- ]);
+ let messages = make_messages(&vec![(MessageRole::Assistant, false, false); 10]);
let range = strategy.calculate_eviction_range(&messages, 3);
assert!(range.is_some());
diff --git a/src/agent/compact/summary.rs b/src/agent/compact/summary.rs
index d813e3bf..49c474f9 100644
--- a/src/agent/compact/summary.rs
+++ b/src/agent/compact/summary.rs
@@ -139,7 +139,11 @@ impl SummaryFrame {
content.push_str(&format!(
"This summary covers {} conversation turn{}.\n",
summary.turns_compacted,
- if summary.turns_compacted == 1 { "" } else { "s" }
+ if summary.turns_compacted == 1 {
+ ""
+ } else {
+ "s"
+ }
));
// Tool usage summary
@@ -274,7 +278,10 @@ impl SummaryFrame {
content.push_str("");
let token_count = content.len() / 4;
- Self { content, token_count }
+ Self {
+ content,
+ token_count,
+ }
}
}
diff --git a/src/agent/history.rs b/src/agent/history.rs
index 365f3f55..9415aaf8 100644
--- a/src/agent/history.rs
+++ b/src/agent/history.rs
@@ -204,8 +204,11 @@ impl ConversationHistory {
/// Get the reason for compaction (for logging)
pub fn compaction_reason(&self) -> Option {
- self.compact_config
- .compaction_reason(self.total_tokens, self.user_turn_count, self.turns.len())
+ self.compact_config.compaction_reason(
+ self.total_tokens,
+ self.user_turn_count,
+ self.turns.len(),
+ )
}
/// Get current token count
@@ -237,7 +240,7 @@ impl ConversationHistory {
pub fn compact(&mut self) -> Option {
use super::compact::strategy::{MessageMeta, MessageRole};
use super::compact::summary::{
- extract_assistant_action, extract_user_intent, ToolCallSummary, TurnSummary,
+ ToolCallSummary, TurnSummary, extract_assistant_action, extract_user_intent,
};
if self.turns.len() < 2 {
@@ -285,7 +288,8 @@ impl ConversationHistory {
let strategy = CompactionStrategy::default();
// Calculate eviction range with tool-call safety
- let range = strategy.calculate_eviction_range(&messages, self.compact_config.retention_window)?;
+ let range =
+ strategy.calculate_eviction_range(&messages, self.compact_config.retention_window)?;
if range.is_empty() {
return None;
@@ -383,8 +387,8 @@ impl ConversationHistory {
/// Convert history to Rig Message format for the agent
/// Uses structured summary frames to preserve context
pub fn to_messages(&self) -> Vec {
- use rig::completion::message::{AssistantContent, Text, UserContent};
use rig::OneOrMany;
+ use rig::completion::message::{AssistantContent, Text, UserContent};
let mut messages = Vec::new();
@@ -399,8 +403,9 @@ impl ConversationHistory {
messages.push(Message::Assistant {
id: None,
content: OneOrMany::one(AssistantContent::Text(Text {
- text: "I understand the previous context. I'll continue from where we left off."
- .to_string(),
+ text:
+ "I understand the previous context. I'll continue from where we left off."
+ .to_string(),
})),
});
}
@@ -436,7 +441,9 @@ impl ConversationHistory {
messages.push(Message::Assistant {
id: None,
- content: OneOrMany::one(AssistantContent::Text(Text { text: response_text })),
+ content: OneOrMany::one(AssistantContent::Text(Text {
+ text: response_text,
+ })),
});
}
@@ -451,10 +458,7 @@ impl ConversationHistory {
/// Get a brief status string for display
pub fn status(&self) -> String {
let compressed_info = if self.summary_frame.is_some() {
- format!(
- " (+{} compacted)",
- self.context_summary.turns_compacted
- )
+ format!(" (+{} compacted)", self.context_summary.turns_compacted)
} else {
String::new()
};
@@ -611,11 +615,7 @@ mod tests {
// Add turns to exceed threshold
for i in 0..5 {
- history.add_turn(
- format!("Question {}", i),
- format!("Answer {}", i),
- vec![],
- );
+ history.add_turn(format!("Question {}", i), format!("Answer {}", i), vec![]);
}
assert!(history.needs_compaction());
diff --git a/src/agent/ide/client.rs b/src/agent/ide/client.rs
index a3af43b4..12c54b53 100644
--- a/src/agent/ide/client.rs
+++ b/src/agent/ide/client.rs
@@ -3,7 +3,7 @@
//! Connects to the IDE's MCP server via HTTP SSE and provides methods
//! for opening diffs and receiving notifications.
-use super::detect::{detect_ide, get_ide_process_info, IdeInfo, IdeProcessInfo};
+use super::detect::{IdeInfo, IdeProcessInfo, detect_ide, get_ide_process_info};
use super::types::*;
use std::collections::HashMap;
use std::env;
@@ -161,16 +161,24 @@ impl IdeClient {
// Debug: show where we're looking
if cfg!(debug_assertions) || env::var("SYNCABLE_DEBUG").is_ok() {
- eprintln!("[IDE Debug] Looking for port files in temp_dir: {:?}", temp_dir);
+ eprintln!(
+ "[IDE Debug] Looking for port files in temp_dir: {:?}",
+ temp_dir
+ );
}
// Try Syncable extension first - scan all port files, match by workspace
let syncable_port_dir = temp_dir.join("syncable").join("ide");
if cfg!(debug_assertions) || env::var("SYNCABLE_DEBUG").is_ok() {
- eprintln!("[IDE Debug] Checking Syncable dir: {:?} (exists: {})",
- syncable_port_dir, syncable_port_dir.exists());
+ eprintln!(
+ "[IDE Debug] Checking Syncable dir: {:?} (exists: {})",
+ syncable_port_dir,
+ syncable_port_dir.exists()
+ );
}
- if let Some(config) = self.find_port_file_by_workspace(&syncable_port_dir, "syncable-ide-server") {
+ if let Some(config) =
+ self.find_port_file_by_workspace(&syncable_port_dir, "syncable-ide-server")
+ {
if cfg!(debug_assertions) || env::var("SYNCABLE_DEBUG").is_ok() {
eprintln!("[IDE Debug] Found Syncable config: port={}", config.port);
}
@@ -180,10 +188,15 @@ impl IdeClient {
// Try Gemini CLI extension (for compatibility)
let gemini_port_dir = temp_dir.join("gemini").join("ide");
if cfg!(debug_assertions) || env::var("SYNCABLE_DEBUG").is_ok() {
- eprintln!("[IDE Debug] Checking Gemini dir: {:?} (exists: {})",
- gemini_port_dir, gemini_port_dir.exists());
+ eprintln!(
+ "[IDE Debug] Checking Gemini dir: {:?} (exists: {})",
+ gemini_port_dir,
+ gemini_port_dir.exists()
+ );
}
- if let Some(config) = self.find_port_file_by_workspace(&gemini_port_dir, "gemini-ide-server") {
+ if let Some(config) =
+ self.find_port_file_by_workspace(&gemini_port_dir, "gemini-ide-server")
+ {
if cfg!(debug_assertions) || env::var("SYNCABLE_DEBUG").is_ok() {
eprintln!("[IDE Debug] Found Gemini config: port={}", config.port);
}
@@ -212,7 +225,10 @@ impl IdeClient {
if let Ok(content) = fs::read_to_string(entry.path()) {
if let Ok(config) = serde_json::from_str::(&content) {
if debug {
- eprintln!("[IDE Debug] Config workspace_path: {:?}", config.workspace_path);
+ eprintln!(
+ "[IDE Debug] Config workspace_path: {:?}",
+ config.workspace_path
+ );
}
if self.validate_workspace_path(&config.workspace_path) {
return Some(config);
@@ -255,7 +271,9 @@ impl IdeClient {
/// Establish HTTP connection and initialize MCP session
async fn establish_connection(&mut self) -> Result<(), IdeError> {
- let port = self.port.ok_or(IdeError::ConnectionFailed("No port".to_string()))?;
+ let port = self
+ .port
+ .ok_or(IdeError::ConnectionFailed("No port".to_string()))?;
let url = format!("http://127.0.0.1:{}/mcp", port);
// Build initialize request
@@ -274,7 +292,8 @@ impl IdeClient {
);
// Send initialize request
- let mut request = self.http_client
+ let mut request = self
+ .http_client
.post(&url)
.header("Accept", "application/json, text/event-stream")
.json(&init_request);
@@ -301,15 +320,12 @@ impl IdeClient {
.await
.map_err(|e| IdeError::ConnectionFailed(e.to_string()))?;
- let response_data: JsonRpcResponse = Self::parse_sse_response(&response_text)
- .map_err(IdeError::ConnectionFailed)?;
+ let response_data: JsonRpcResponse =
+ Self::parse_sse_response(&response_text).map_err(IdeError::ConnectionFailed)?;
if response_data.error.is_some() {
return Err(IdeError::ConnectionFailed(
- response_data
- .error
- .map(|e| e.message)
- .unwrap_or_default(),
+ response_data.error.map(|e| e.message).unwrap_or_default(),
));
}
@@ -326,8 +342,7 @@ impl IdeClient {
}
}
// Fallback: try parsing entire response as JSON (for non-SSE responses)
- serde_json::from_str(text)
- .map_err(|e| format!("Failed to parse response: {}", e))
+ serde_json::from_str(text).map_err(|e| format!("Failed to parse response: {}", e))
}
/// Get next request ID
@@ -343,12 +358,15 @@ impl IdeClient {
method: &str,
params: serde_json::Value,
) -> Result {
- let port = self.port.ok_or(IdeError::ConnectionFailed("Not connected".to_string()))?;
+ let port = self
+ .port
+ .ok_or(IdeError::ConnectionFailed("Not connected".to_string()))?;
let url = format!("http://127.0.0.1:{}/mcp", port);
let request = JsonRpcRequest::new(self.next_request_id(), method, params);
- let mut http_request = self.http_client
+ let mut http_request = self
+ .http_client
.post(&url)
.header("Accept", "application/json, text/event-stream")
.json(&request);
@@ -371,17 +389,22 @@ impl IdeClient {
.await
.map_err(|e| IdeError::RequestFailed(e.to_string()))?;
- Self::parse_sse_response(&response_text)
- .map_err(IdeError::RequestFailed)
+ Self::parse_sse_response(&response_text).map_err(IdeError::RequestFailed)
}
/// Open a diff view in the IDE
///
/// This sends the file path and new content to the IDE, which will show
/// a diff view. The method returns when the user accepts or rejects the diff.
- pub async fn open_diff(&self, file_path: &str, new_content: &str) -> Result {
+ pub async fn open_diff(
+ &self,
+ file_path: &str,
+ new_content: &str,
+ ) -> Result {
if !self.is_connected() {
- return Err(IdeError::ConnectionFailed("Not connected to IDE".to_string()));
+ return Err(IdeError::ConnectionFailed(
+ "Not connected to IDE".to_string(),
+ ));
}
let params = serde_json::to_value(ToolCallParams {
@@ -427,7 +450,9 @@ impl IdeClient {
/// Close a diff view in the IDE
pub async fn close_diff(&self, file_path: &str) -> Result, IdeError> {
if !self.is_connected() {
- return Err(IdeError::ConnectionFailed("Not connected to IDE".to_string()));
+ return Err(IdeError::ConnectionFailed(
+ "Not connected to IDE".to_string(),
+ ));
}
let params = serde_json::to_value(ToolCallParams {
@@ -449,7 +474,8 @@ impl IdeClient {
if content.content_type == "text" {
if let Some(text) = content.text {
if let Ok(parsed) = serde_json::from_str::(&text) {
- if let Some(content) = parsed.get("content").and_then(|c| c.as_str())
+ if let Some(content) =
+ parsed.get("content").and_then(|c| c.as_str())
{
return Ok(Some(content.to_string()));
}
@@ -505,9 +531,14 @@ impl IdeClient {
///
/// If `file_path` is provided, returns diagnostics only for that file.
/// Otherwise returns all diagnostics across the workspace.
- pub async fn get_diagnostics(&self, file_path: Option<&str>) -> Result {
+ pub async fn get_diagnostics(
+ &self,
+ file_path: Option<&str>,
+ ) -> Result {
if !self.is_connected() {
- return Err(IdeError::ConnectionFailed("Not connected to IDE".to_string()));
+ return Err(IdeError::ConnectionFailed(
+ "Not connected to IDE".to_string(),
+ ));
}
let params = serde_json::to_value(ToolCallParams {
@@ -529,13 +560,24 @@ impl IdeClient {
if content.content_type == "text" {
if let Some(text) = content.text {
// Try to parse as DiagnosticsResponse
- if let Ok(diag_response) = serde_json::from_str::(&text) {
+ if let Ok(diag_response) =
+ serde_json::from_str::(&text)
+ {
return Ok(diag_response);
}
// Try parsing as raw array of diagnostics
- if let Ok(diagnostics) = serde_json::from_str::>(&text) {
- let total_errors = diagnostics.iter().filter(|d| d.severity == DiagnosticSeverity::Error).count() as u32;
- let total_warnings = diagnostics.iter().filter(|d| d.severity == DiagnosticSeverity::Warning).count() as u32;
+ if let Ok(diagnostics) = serde_json::from_str::>(&text)
+ {
+ let total_errors = diagnostics
+ .iter()
+ .filter(|d| d.severity == DiagnosticSeverity::Error)
+ .count()
+ as u32;
+ let total_warnings = diagnostics
+ .iter()
+ .filter(|d| d.severity == DiagnosticSeverity::Warning)
+ .count()
+ as u32;
return Ok(DiagnosticsResponse {
diagnostics,
total_errors,
diff --git a/src/agent/ide/mod.rs b/src/agent/ide/mod.rs
index ff6a76f8..f444b357 100644
--- a/src/agent/ide/mod.rs
+++ b/src/agent/ide/mod.rs
@@ -3,10 +3,10 @@
//! Provides integration with IDEs (VS Code, Cursor, etc.) via MCP (Model Context Protocol).
//! This enables showing file diffs in the IDE's native diff viewer instead of terminal.
+pub mod client;
pub mod detect;
pub mod types;
-pub mod client;
-pub use client::{IdeClient, DiffResult, IdeError};
+pub use client::{DiffResult, IdeClient, IdeError};
pub use detect::{IdeInfo, detect_ide, get_ide_process_info};
pub use types::{Diagnostic, DiagnosticSeverity, DiagnosticsResponse};
diff --git a/src/agent/ide/types.rs b/src/agent/ide/types.rs
index 39da6dac..9356e7f8 100644
--- a/src/agent/ide/types.rs
+++ b/src/agent/ide/types.rs
@@ -170,7 +170,10 @@ pub struct OpenDiffArgs {
pub struct CloseDiffArgs {
#[serde(rename = "filePath")]
pub file_path: String,
- #[serde(rename = "suppressNotification", skip_serializing_if = "Option::is_none")]
+ #[serde(
+ rename = "suppressNotification",
+ skip_serializing_if = "Option::is_none"
+ )]
pub suppress_notification: Option,
}
diff --git a/src/agent/mod.rs b/src/agent/mod.rs
index 168f870d..4c2c65f8 100644
--- a/src/agent/mod.rs
+++ b/src/agent/mod.rs
@@ -39,6 +39,7 @@ pub mod session;
pub mod tools;
pub mod ui;
use colored::Colorize;
+use commands::TokenUsage;
use history::{ConversationHistory, ToolCallRecord};
use ide::IdeClient;
use rig::{
@@ -47,7 +48,6 @@ use rig::{
providers::{anthropic, openai},
};
use session::{ChatSession, PlanMode};
-use commands::TokenUsage;
use std::path::Path;
use std::sync::Arc;
use tokio::sync::Mutex as TokioMutex;
@@ -80,7 +80,10 @@ impl std::str::FromStr for ProviderType {
"openai" => Ok(ProviderType::OpenAI),
"anthropic" => Ok(ProviderType::Anthropic),
"bedrock" | "aws" | "aws-bedrock" => Ok(ProviderType::Bedrock),
- _ => Err(format!("Unknown provider: {}. Use: openai, anthropic, or bedrock", s)),
+ _ => Err(format!(
+ "Unknown provider: {}. Use: openai, anthropic, or bedrock",
+ s
+ )),
}
}
}
@@ -149,16 +152,16 @@ pub async fn run_interactive(
}
Err(e) => {
// IDE detected but companion not running or connection failed
- println!(
- "{} IDE companion not connected: {}",
- "!".yellow(),
- e
- );
+ println!("{} IDE companion not connected: {}", "!".yellow(), e);
None
}
}
} else {
- println!("{} No IDE detected (TERM_PROGRAM={})", "Ā·".dimmed(), std::env::var("TERM_PROGRAM").unwrap_or_default());
+ println!(
+ "{} No IDE detected (TERM_PROGRAM={})",
+ "Ā·".dimmed(),
+ std::env::var("TERM_PROGRAM").unwrap_or_default()
+ );
None
}
};
@@ -185,7 +188,10 @@ pub async fn run_interactive(
loop {
// Show conversation status if we have history
if !conversation_history.is_empty() {
- println!("{}", format!(" š¬ Context: {}", conversation_history.status()).dimmed());
+ println!(
+ "{}",
+ format!(" š¬ Context: {}", conversation_history.status()).dimmed()
+ );
}
// Check for pending input (from plan menu selection)
@@ -243,7 +249,10 @@ pub async fn run_interactive(
// Check API key before making request (in case provider changed)
if !ChatSession::has_api_key(session.provider) {
- eprintln!("{}", "No API key configured. Use /provider to set one.".yellow());
+ eprintln!(
+ "{}",
+ "No API key configured. Use /provider to set one.".yellow()
+ );
continue;
}
@@ -251,7 +260,10 @@ pub async fn run_interactive(
if conversation_history.needs_compaction() {
println!("{}", " š¦ Compacting conversation history...".dimmed());
if let Some(summary) = conversation_history.compact() {
- println!("{}", format!(" ā Compressed {} turns", summary.matches("Turn").count()).dimmed());
+ println!(
+ "{}",
+ format!(" ā Compressed {} turns", summary.matches("Turn").count()).dimmed()
+ );
}
}
@@ -263,7 +275,10 @@ pub async fn run_interactive(
+ 5000; // System prompt overhead estimate
if estimated_input_tokens > 150_000 {
- println!("{}", " ā Large context detected. Pre-truncating...".yellow());
+ println!(
+ "{}",
+ " ā Large context detected. Pre-truncating...".yellow()
+ );
let old_count = raw_chat_history.len();
// Keep last 20 messages when approaching limit
@@ -271,7 +286,15 @@ pub async fn run_interactive(
let drain_count = raw_chat_history.len() - 20;
raw_chat_history.drain(0..drain_count);
conversation_history.clear(); // Stay in sync
- println!("{}", format!(" ā Truncated {} ā {} messages", old_count, raw_chat_history.len()).dimmed());
+ println!(
+ "{}",
+ format!(
+ " ā Truncated {} ā {} messages",
+ old_count,
+ raw_chat_history.len()
+ )
+ .dimmed()
+ );
}
}
@@ -292,10 +315,12 @@ pub async fn run_interactive(
let mut succeeded = false;
while retry_attempt < MAX_RETRIES && continuation_count < MAX_CONTINUATIONS && !succeeded {
-
// Log if this is a continuation attempt
if continuation_count > 0 {
- eprintln!("{}", format!(" š” Sending continuation request...").dimmed());
+ eprintln!(
+ "{}",
+ format!(" š” Sending continuation request...").dimmed()
+ );
}
// Create hook for Claude Code style tool display
@@ -303,7 +328,11 @@ pub async fn run_interactive(
let project_path_buf = session.project_path.clone();
// Select prompt based on query type (analysis vs generation) and plan mode
- let preamble = get_system_prompt(&session.project_path, Some(¤t_input), session.plan_mode);
+ let preamble = get_system_prompt(
+ &session.project_path,
+ Some(¤t_input),
+ session.plan_mode,
+ );
let is_generation = prompts::is_generation_query(¤t_input);
let is_planning = session.plan_mode.is_planning();
@@ -315,16 +344,17 @@ pub async fn run_interactive(
let client = openai::Client::from_env();
// For GPT-5.x reasoning models, enable reasoning with summary output
// so we can see the model's thinking process
- let reasoning_params = if session.model.starts_with("gpt-5") || session.model.starts_with("o1") {
- Some(serde_json::json!({
- "reasoning": {
- "effort": "medium",
- "summary": "detailed"
- }
- }))
- } else {
- None
- };
+ let reasoning_params =
+ if session.model.starts_with("gpt-5") || session.model.starts_with("o1") {
+ Some(serde_json::json!({
+ "reasoning": {
+ "effort": "medium",
+ "summary": "detailed"
+ }
+ }))
+ } else {
+ None
+ };
let mut builder = client
.agent(&session.model)
@@ -334,6 +364,7 @@ pub async fn run_interactive(
.tool(SecurityScanTool::new(project_path_buf.clone()))
.tool(VulnerabilitiesTool::new(project_path_buf.clone()))
.tool(HadolintTool::new(project_path_buf.clone()))
+ .tool(DclintTool::new(project_path_buf.clone()))
.tool(TerraformFmtTool::new(project_path_buf.clone()))
.tool(TerraformValidateTool::new(project_path_buf.clone()))
.tool(TerraformInstallTool::new())
@@ -349,19 +380,20 @@ pub async fn run_interactive(
.tool(PlanListTool::new(project_path_buf.clone()));
} else if is_generation {
// Standard mode + generation query: all tools including file writes and plan execution
- let (mut write_file_tool, mut write_files_tool) = if let Some(ref client) = ide_client {
- (
- WriteFileTool::new(project_path_buf.clone())
- .with_ide_client(client.clone()),
- WriteFilesTool::new(project_path_buf.clone())
- .with_ide_client(client.clone()),
- )
- } else {
- (
- WriteFileTool::new(project_path_buf.clone()),
- WriteFilesTool::new(project_path_buf.clone()),
- )
- };
+ let (mut write_file_tool, mut write_files_tool) =
+ if let Some(ref client) = ide_client {
+ (
+ WriteFileTool::new(project_path_buf.clone())
+ .with_ide_client(client.clone()),
+ WriteFilesTool::new(project_path_buf.clone())
+ .with_ide_client(client.clone()),
+ )
+ } else {
+ (
+ WriteFileTool::new(project_path_buf.clone()),
+ WriteFilesTool::new(project_path_buf.clone()),
+ )
+ };
// Disable confirmations if auto-accept mode is enabled (from plan menu)
if auto_accept_writes {
write_file_tool = write_file_tool.without_confirmation();
@@ -384,7 +416,8 @@ pub async fn run_interactive(
// Allow up to 50 tool call turns for complex generation tasks
// Use hook to display tool calls as they happen
// Pass conversation history for context continuity
- agent.prompt(¤t_input)
+ agent
+ .prompt(¤t_input)
.with_history(&mut raw_chat_history)
.with_hook(hook.clone())
.multi_turn(50)
@@ -407,6 +440,7 @@ pub async fn run_interactive(
.tool(SecurityScanTool::new(project_path_buf.clone()))
.tool(VulnerabilitiesTool::new(project_path_buf.clone()))
.tool(HadolintTool::new(project_path_buf.clone()))
+ .tool(DclintTool::new(project_path_buf.clone()))
.tool(TerraformFmtTool::new(project_path_buf.clone()))
.tool(TerraformValidateTool::new(project_path_buf.clone()))
.tool(TerraformInstallTool::new())
@@ -422,19 +456,20 @@ pub async fn run_interactive(
.tool(PlanListTool::new(project_path_buf.clone()));
} else if is_generation {
// Standard mode + generation query: all tools including file writes and plan execution
- let (mut write_file_tool, mut write_files_tool) = if let Some(ref client) = ide_client {
- (
- WriteFileTool::new(project_path_buf.clone())
- .with_ide_client(client.clone()),
- WriteFilesTool::new(project_path_buf.clone())
- .with_ide_client(client.clone()),
- )
- } else {
- (
- WriteFileTool::new(project_path_buf.clone()),
- WriteFilesTool::new(project_path_buf.clone()),
- )
- };
+ let (mut write_file_tool, mut write_files_tool) =
+ if let Some(ref client) = ide_client {
+ (
+ WriteFileTool::new(project_path_buf.clone())
+ .with_ide_client(client.clone()),
+ WriteFilesTool::new(project_path_buf.clone())
+ .with_ide_client(client.clone()),
+ )
+ } else {
+ (
+ WriteFileTool::new(project_path_buf.clone()),
+ WriteFilesTool::new(project_path_buf.clone()),
+ )
+ };
// Disable confirmations if auto-accept mode is enabled (from plan menu)
if auto_accept_writes {
write_file_tool = write_file_tool.without_confirmation();
@@ -454,7 +489,8 @@ pub async fn run_interactive(
// Allow up to 50 tool call turns for complex generation tasks
// Use hook to display tool calls as they happen
// Pass conversation history for context continuity
- agent.prompt(¤t_input)
+ agent
+ .prompt(¤t_input)
.with_history(&mut raw_chat_history)
.with_hook(hook.clone())
.multi_turn(50)
@@ -484,6 +520,7 @@ pub async fn run_interactive(
.tool(SecurityScanTool::new(project_path_buf.clone()))
.tool(VulnerabilitiesTool::new(project_path_buf.clone()))
.tool(HadolintTool::new(project_path_buf.clone()))
+ .tool(DclintTool::new(project_path_buf.clone()))
.tool(TerraformFmtTool::new(project_path_buf.clone()))
.tool(TerraformValidateTool::new(project_path_buf.clone()))
.tool(TerraformInstallTool::new())
@@ -499,19 +536,20 @@ pub async fn run_interactive(
.tool(PlanListTool::new(project_path_buf.clone()));
} else if is_generation {
// Standard mode + generation query: all tools including file writes and plan execution
- let (mut write_file_tool, mut write_files_tool) = if let Some(ref client) = ide_client {
- (
- WriteFileTool::new(project_path_buf.clone())
- .with_ide_client(client.clone()),
- WriteFilesTool::new(project_path_buf.clone())
- .with_ide_client(client.clone()),
- )
- } else {
- (
- WriteFileTool::new(project_path_buf.clone()),
- WriteFilesTool::new(project_path_buf.clone()),
- )
- };
+ let (mut write_file_tool, mut write_files_tool) =
+ if let Some(ref client) = ide_client {
+ (
+ WriteFileTool::new(project_path_buf.clone())
+ .with_ide_client(client.clone()),
+ WriteFilesTool::new(project_path_buf.clone())
+ .with_ide_client(client.clone()),
+ )
+ } else {
+ (
+ WriteFileTool::new(project_path_buf.clone()),
+ WriteFilesTool::new(project_path_buf.clone()),
+ )
+ };
// Disable confirmations if auto-accept mode is enabled (from plan menu)
if auto_accept_writes {
write_file_tool = write_file_tool.without_confirmation();
@@ -532,7 +570,8 @@ pub async fn run_interactive(
let agent = builder.build();
// Use same multi-turn pattern as OpenAI/Anthropic
- agent.prompt(¤t_input)
+ agent
+ .prompt(¤t_input)
.with_history(&mut raw_chat_history)
.with_hook(hook.clone())
.multi_turn(50)
@@ -550,20 +589,28 @@ pub async fn run_interactive(
let hook_usage = hook.get_usage().await;
if hook_usage.has_data() {
// Use actual token counts from API response
- session.token_usage.add_actual(hook_usage.input_tokens, hook_usage.output_tokens);
+ session
+ .token_usage
+ .add_actual(hook_usage.input_tokens, hook_usage.output_tokens);
} else {
// Fall back to estimation when API doesn't provide usage
let prompt_tokens = TokenUsage::estimate_tokens(&input);
let completion_tokens = TokenUsage::estimate_tokens(&text);
- session.token_usage.add_estimated(prompt_tokens, completion_tokens);
+ session
+ .token_usage
+ .add_estimated(prompt_tokens, completion_tokens);
}
// Reset hook usage for next request batch
hook.reset_usage().await;
// Show context indicator like Forge: [model/~tokens]
- let model_short = session.model.split('/').last()
+ let model_short = session
+ .model
+ .split('/')
+ .last()
.unwrap_or(&session.model)
- .split(':').next()
+ .split(':')
+ .next()
.unwrap_or(&session.model);
println!();
println!(
@@ -581,7 +628,14 @@ pub async fn run_interactive(
// Show tool call summary if significant
if batch_tool_count > 10 {
- println!("{}", format!(" ā Completed with {} tool calls ({} total this session)", batch_tool_count, total_tool_calls).dimmed());
+ println!(
+ "{}",
+ format!(
+ " ā Completed with {} tool calls ({} total this session)",
+ batch_tool_count, total_tool_calls
+ )
+ .dimmed()
+ );
}
// Add to conversation history with tool call records
@@ -592,13 +646,19 @@ pub async fn run_interactive(
if conversation_history.needs_compaction() {
println!("{}", " š¦ Compacting conversation history...".dimmed());
if let Some(summary) = conversation_history.compact() {
- println!("{}", format!(" ā Compressed {} turns", summary.matches("Turn").count()).dimmed());
+ println!(
+ "{}",
+ format!(" ā Compressed {} turns", summary.matches("Turn").count())
+ .dimmed()
+ );
}
}
// Also update legacy session history for compatibility
session.history.push(("user".to_string(), input.clone()));
- session.history.push(("assistant".to_string(), text.clone()));
+ session
+ .history
+ .push(("assistant".to_string(), text.clone()));
// Check if plan_create was called - show interactive menu
if let Some(plan_info) = find_plan_create_call(&tool_calls) {
@@ -652,7 +712,10 @@ pub async fn run_interactive(
println!();
// Check if this is a max depth error - handle as checkpoint
- if err_str.contains("MaxDepth") || err_str.contains("max_depth") || err_str.contains("reached limit") {
+ if err_str.contains("MaxDepth")
+ || err_str.contains("max_depth")
+ || err_str.contains("reached limit")
+ {
// Extract what was done before hitting the limit
let completed_tools = extract_tool_calls_from_hook(&hook).await;
let agent_thinking = extract_agent_messages_from_hook(&hook).await;
@@ -666,18 +729,35 @@ pub async fn run_interactive(
// Check if we've hit the absolute maximum
if total_tool_calls >= MAX_TOOL_CALLS {
- eprintln!("{}", format!("Maximum tool call limit ({}) reached.", MAX_TOOL_CALLS).red());
- eprintln!("{}", "The task is too complex. Try breaking it into smaller parts.".dimmed());
+ eprintln!(
+ "{}",
+ format!("Maximum tool call limit ({}) reached.", MAX_TOOL_CALLS)
+ .red()
+ );
+ eprintln!(
+ "{}",
+ "The task is too complex. Try breaking it into smaller parts."
+ .dimmed()
+ );
break;
}
// Ask user if they want to continue (unless auto-continue is enabled)
let should_continue = if auto_continue_tools {
- eprintln!("{}", " Auto-continuing (you selected 'always')...".dimmed());
+ eprintln!(
+ "{}",
+ " Auto-continuing (you selected 'always')...".dimmed()
+ );
true
} else {
- eprintln!("{}", "Excessive tool calls used. Want to continue?".yellow());
- eprintln!("{}", " [y] Yes, continue [n] No, stop [a] Always continue".dimmed());
+ eprintln!(
+ "{}",
+ "Excessive tool calls used. Want to continue?".yellow()
+ );
+ eprintln!(
+ "{}",
+ " [y] Yes, continue [n] No, stop [a] Always continue".dimmed()
+ );
print!(" > ");
let _ = std::io::Write::flush(&mut std::io::stdout());
@@ -698,51 +778,84 @@ pub async fn run_interactive(
};
if !should_continue {
- eprintln!("{}", "Stopped by user. Type 'continue' to resume later.".dimmed());
+ eprintln!(
+ "{}",
+ "Stopped by user. Type 'continue' to resume later.".dimmed()
+ );
// Add partial progress to history
if !completed_tools.is_empty() {
conversation_history.add_turn(
current_input.clone(),
- format!("[Stopped at checkpoint - {} tools completed]", batch_tool_count),
- vec![]
+ format!(
+ "[Stopped at checkpoint - {} tools completed]",
+ batch_tool_count
+ ),
+ vec![],
);
}
break;
}
// Continue from checkpoint
- eprintln!("{}", format!(
- " ā Continuing... {} remaining tool calls available",
- MAX_TOOL_CALLS - total_tool_calls
- ).dimmed());
+ eprintln!(
+ "{}",
+ format!(
+ " ā Continuing... {} remaining tool calls available",
+ MAX_TOOL_CALLS - total_tool_calls
+ )
+ .dimmed()
+ );
// Add partial progress to history (without duplicating tool calls)
conversation_history.add_turn(
current_input.clone(),
- format!("[Checkpoint - {} tools completed, continuing...]", batch_tool_count),
- vec![]
+ format!(
+ "[Checkpoint - {} tools completed, continuing...]",
+ batch_tool_count
+ ),
+ vec![],
);
// Build continuation prompt
- current_input = build_continuation_prompt(&input, &completed_tools, &agent_thinking);
+ current_input =
+ build_continuation_prompt(&input, &completed_tools, &agent_thinking);
// Brief delay before continuation
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
continue; // Continue the loop without incrementing retry_attempt
- } else if err_str.contains("rate") || err_str.contains("Rate") || err_str.contains("429")
- || err_str.contains("Too many tokens") || err_str.contains("please wait")
- || err_str.contains("throttl") || err_str.contains("Throttl") {
+ } else if err_str.contains("rate")
+ || err_str.contains("Rate")
+ || err_str.contains("429")
+ || err_str.contains("Too many tokens")
+ || err_str.contains("please wait")
+ || err_str.contains("throttl")
+ || err_str.contains("Throttl")
+ {
eprintln!("{}", "ā Rate limited by API provider.".yellow());
// Wait before retry for rate limits (longer wait for "too many tokens")
retry_attempt += 1;
- let wait_secs = if err_str.contains("Too many tokens") { 30 } else { 5 };
- eprintln!("{}", format!(" Waiting {} seconds before retry ({}/{})...", wait_secs, retry_attempt, MAX_RETRIES).dimmed());
+ let wait_secs = if err_str.contains("Too many tokens") {
+ 30
+ } else {
+ 5
+ };
+ eprintln!(
+ "{}",
+ format!(
+ " Waiting {} seconds before retry ({}/{})...",
+ wait_secs, retry_attempt, MAX_RETRIES
+ )
+ .dimmed()
+ );
tokio::time::sleep(tokio::time::Duration::from_secs(wait_secs)).await;
} else if is_input_too_long_error(&err_str) {
// Context too large - truncate raw_chat_history directly
// NOTE: We truncate raw_chat_history (actual messages) not conversation_history
// because conversation_history may be empty/stale during errors
- eprintln!("{}", "ā Context too large for model. Truncating history...".yellow());
+ eprintln!(
+ "{}",
+ "ā Context too large for model. Truncating history...".yellow()
+ );
let old_token_count = estimate_raw_history_tokens(&raw_chat_history);
let old_msg_count = raw_chat_history.len();
@@ -773,10 +886,21 @@ pub async fn run_interactive(
// Retry with truncated context
retry_attempt += 1;
if retry_attempt < MAX_RETRIES {
- eprintln!("{}", format!(" ā Retrying with truncated context ({}/{})...", retry_attempt, MAX_RETRIES).dimmed());
+ eprintln!(
+ "{}",
+ format!(
+ " ā Retrying with truncated context ({}/{})...",
+ retry_attempt, MAX_RETRIES
+ )
+ .dimmed()
+ );
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
} else {
- eprintln!("{}", "Context still too large after truncation. Try /clear to reset.".red());
+ eprintln!(
+ "{}",
+ "Context still too large after truncation. Try /clear to reset."
+ .red()
+ );
break;
}
} else if is_truncation_error(&err_str) {
@@ -785,7 +909,8 @@ pub async fn run_interactive(
let agent_thinking = extract_agent_messages_from_hook(&hook).await;
// Count actually completed tools (not in-progress)
- let completed_count = completed_tools.iter()
+ let completed_count = completed_tools
+ .iter()
.filter(|t| !t.result_summary.contains("IN PROGRESS"))
.count();
let in_progress_count = completed_tools.len() - completed_count;
@@ -796,7 +921,10 @@ pub async fn run_interactive(
let status_msg = if in_progress_count > 0 {
format!(
"ā Response truncated. {} completed, {} in-progress. Auto-continuing ({}/{})...",
- completed_count, in_progress_count, continuation_count, MAX_CONTINUATIONS
+ completed_count,
+ in_progress_count,
+ continuation_count,
+ MAX_CONTINUATIONS
)
} else {
format!(
@@ -820,14 +948,28 @@ pub async fn run_interactive(
// Check if we need compaction after adding this heavy turn
// This is important for long multi-turn sessions with many tool calls
if conversation_history.needs_compaction() {
- eprintln!("{}", " š¦ Compacting history before continuation...".dimmed());
+ eprintln!(
+ "{}",
+ " š¦ Compacting history before continuation...".dimmed()
+ );
if let Some(summary) = conversation_history.compact() {
- eprintln!("{}", format!(" ā Compressed {} turns", summary.matches("Turn").count()).dimmed());
+ eprintln!(
+ "{}",
+ format!(
+ " ā Compressed {} turns",
+ summary.matches("Turn").count()
+ )
+ .dimmed()
+ );
}
}
// Build continuation prompt with context
- current_input = build_continuation_prompt(&input, &completed_tools, &agent_thinking);
+ current_input = build_continuation_prompt(
+ &input,
+ &completed_tools,
+ &agent_thinking,
+ );
// Log continuation details for debugging
eprintln!("{}", format!(
@@ -843,7 +985,14 @@ pub async fn run_interactive(
} else if retry_attempt < MAX_RETRIES {
// No tool calls completed - simple retry
retry_attempt += 1;
- eprintln!("{}", format!("ā Response error (attempt {}/{}). Retrying...", retry_attempt, MAX_RETRIES).yellow());
+ eprintln!(
+ "{}",
+ format!(
+ "ā Response error (attempt {}/{}). Retrying...",
+ retry_attempt, MAX_RETRIES
+ )
+ .yellow()
+ );
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
} else {
// Max retries/continuations reached
@@ -851,16 +1000,30 @@ pub async fn run_interactive(
if continuation_count >= MAX_CONTINUATIONS {
eprintln!("{}", format!("Max continuations ({}) reached. The task is too complex for one request.", MAX_CONTINUATIONS).dimmed());
} else {
- eprintln!("{}", "Max retries reached. The response may be too complex.".dimmed());
+ eprintln!(
+ "{}",
+ "Max retries reached. The response may be too complex."
+ .dimmed()
+ );
}
- eprintln!("{}", "Try breaking your request into smaller parts.".dimmed());
+ eprintln!(
+ "{}",
+ "Try breaking your request into smaller parts.".dimmed()
+ );
break;
}
} else if err_str.contains("timeout") || err_str.contains("Timeout") {
// Timeout - simple retry
retry_attempt += 1;
if retry_attempt < MAX_RETRIES {
- eprintln!("{}", format!("ā Request timed out (attempt {}/{}). Retrying...", retry_attempt, MAX_RETRIES).yellow());
+ eprintln!(
+ "{}",
+ format!(
+ "ā Request timed out (attempt {}/{}). Retrying...",
+ retry_attempt, MAX_RETRIES
+ )
+ .yellow()
+ );
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
} else {
eprintln!("{}", "Request timed out. Please try again.".red());
@@ -870,11 +1033,29 @@ pub async fn run_interactive(
// Unknown error - show details and break
eprintln!("{}", format!("Error: {}", e).red());
if continuation_count > 0 {
- eprintln!("{}", format!(" (occurred during continuation attempt {})", continuation_count).dimmed());
+ eprintln!(
+ "{}",
+ format!(
+ " (occurred during continuation attempt {})",
+ continuation_count
+ )
+ .dimmed()
+ );
}
eprintln!("{}", "Error details for debugging:".dimmed());
- eprintln!("{}", format!(" - retry_attempt: {}/{}", retry_attempt, MAX_RETRIES).dimmed());
- eprintln!("{}", format!(" - continuation_count: {}/{}", continuation_count, MAX_CONTINUATIONS).dimmed());
+ eprintln!(
+ "{}",
+ format!(" - retry_attempt: {}/{}", retry_attempt, MAX_RETRIES)
+ .dimmed()
+ );
+ eprintln!(
+ "{}",
+ format!(
+ " - continuation_count: {}/{}",
+ continuation_count, MAX_CONTINUATIONS
+ )
+ .dimmed()
+ );
break;
}
}
@@ -891,29 +1072,34 @@ async fn extract_tool_calls_from_hook(hook: &ToolDisplayHook) -> Vec String {
fn estimate_raw_history_tokens(messages: &[rig::completion::Message]) -> usize {
use rig::completion::message::{AssistantContent, UserContent};
- messages.iter().map(|msg| -> usize {
- match msg {
- rig::completion::Message::User { content } => {
- content.iter().map(|c| -> usize {
- match c {
- UserContent::Text(t) => t.text.len() / 4,
- _ => 100, // Estimate for images/documents
- }
- }).sum::()
- }
- rig::completion::Message::Assistant { content, .. } => {
- content.iter().map(|c| -> usize {
- match c {
- AssistantContent::Text(t) => t.text.len() / 4,
- AssistantContent::ToolCall(tc) => {
- // arguments is serde_json::Value, convert to string for length estimate
- let args_len = tc.function.arguments.to_string().len();
- (tc.function.name.len() + args_len) / 4
- }
- _ => 100,
- }
- }).sum::()
+ messages
+ .iter()
+ .map(|msg| -> usize {
+ match msg {
+ rig::completion::Message::User { content } => {
+ content
+ .iter()
+ .map(|c| -> usize {
+ match c {
+ UserContent::Text(t) => t.text.len() / 4,
+ _ => 100, // Estimate for images/documents
+ }
+ })
+ .sum::()
+ }
+ rig::completion::Message::Assistant { content, .. } => {
+ content
+ .iter()
+ .map(|c| -> usize {
+ match c {
+ AssistantContent::Text(t) => t.text.len() / 4,
+ AssistantContent::ToolCall(tc) => {
+ // arguments is serde_json::Value, convert to string for length estimate
+ let args_len = tc.function.arguments.to_string().len();
+ (tc.function.name.len() + args_len) / 4
+ }
+ _ => 100,
+ }
+ })
+ .sum::()
+ }
}
- }
- }).sum()
+ })
+ .sum()
}
/// Find a plan_create tool call in the list and extract plan info
@@ -972,13 +1167,15 @@ fn find_plan_create_call(tool_calls: &[ToolCallRecord]) -> Option<(String, usize
if tc.tool_name == "plan_create" {
// Try to parse the result_summary as JSON to extract plan_path
// Note: result_summary may be truncated, so we have multiple fallbacks
- let plan_path = if let Ok(result) = serde_json::from_str::(&tc.result_summary) {
- result.get("plan_path")
- .and_then(|v| v.as_str())
- .map(|s| s.to_string())
- } else {
- None
- };
+ let plan_path =
+ if let Ok(result) = serde_json::from_str::(&tc.result_summary) {
+ result
+ .get("plan_path")
+ .and_then(|v| v.as_str())
+ .map(|s| s.to_string())
+ } else {
+ None
+ };
// If JSON parsing failed, find the most recently created plan file
// This is more reliable than trying to reconstruct the path from truncated args
@@ -1040,7 +1237,8 @@ fn count_tasks_in_plan_file(plan_path: &str) -> Option {
// Count task checkboxes: - [ ], - [x], - [~], - [!]
let task_regex = Regex::new(r"^\s*-\s*\[[ x~!]\]").ok()?;
- let count = content.lines()
+ let count = content
+ .lines()
.filter(|line| task_regex.is_match(line))
.count();
@@ -1103,7 +1301,11 @@ fn build_continuation_prompt(
dirs_listed.insert(tool.args_summary.clone());
}
_ => {
- other_tools.push(format!("{}({})", tool.tool_name, truncate_string(&tool.args_summary, 40)));
+ other_tools.push(format!(
+ "{}({})",
+ tool.tool_name,
+ truncate_string(&tool.args_summary, 40)
+ ));
}
}
}
@@ -1194,16 +1396,17 @@ pub async fn run_query(
let model_name = model.as_deref().unwrap_or("gpt-5.2");
// For GPT-5.x reasoning models, enable reasoning with summary output
- let reasoning_params = if model_name.starts_with("gpt-5") || model_name.starts_with("o1") {
- Some(serde_json::json!({
- "reasoning": {
- "effort": "medium",
- "summary": "detailed"
- }
- }))
- } else {
- None
- };
+ let reasoning_params =
+ if model_name.starts_with("gpt-5") || model_name.starts_with("o1") {
+ Some(serde_json::json!({
+ "reasoning": {
+ "effort": "medium",
+ "summary": "detailed"
+ }
+ }))
+ } else {
+ None
+ };
let mut builder = client
.agent(model_name)
@@ -1213,6 +1416,7 @@ pub async fn run_query(
.tool(SecurityScanTool::new(project_path_buf.clone()))
.tool(VulnerabilitiesTool::new(project_path_buf.clone()))
.tool(HadolintTool::new(project_path_buf.clone()))
+ .tool(DclintTool::new(project_path_buf.clone()))
.tool(TerraformFmtTool::new(project_path_buf.clone()))
.tool(TerraformValidateTool::new(project_path_buf.clone()))
.tool(TerraformInstallTool::new())
@@ -1255,6 +1459,7 @@ pub async fn run_query(
.tool(SecurityScanTool::new(project_path_buf.clone()))
.tool(VulnerabilitiesTool::new(project_path_buf.clone()))
.tool(HadolintTool::new(project_path_buf.clone()))
+ .tool(DclintTool::new(project_path_buf.clone()))
.tool(TerraformFmtTool::new(project_path_buf.clone()))
.tool(TerraformValidateTool::new(project_path_buf.clone()))
.tool(TerraformInstallTool::new())
@@ -1280,7 +1485,9 @@ pub async fn run_query(
ProviderType::Bedrock => {
// Bedrock provider via rig-bedrock - same pattern as Anthropic
let client = rig_bedrock::client::Client::from_env();
- let model_name = model.as_deref().unwrap_or("global.anthropic.claude-sonnet-4-5-20250929-v1:0");
+ let model_name = model
+ .as_deref()
+ .unwrap_or("global.anthropic.claude-sonnet-4-5-20250929-v1:0");
// Extended thinking for Claude via Bedrock
let thinking_params = serde_json::json!({
@@ -1298,6 +1505,7 @@ pub async fn run_query(
.tool(SecurityScanTool::new(project_path_buf.clone()))
.tool(VulnerabilitiesTool::new(project_path_buf.clone()))
.tool(HadolintTool::new(project_path_buf.clone()))
+ .tool(DclintTool::new(project_path_buf.clone()))
.tool(TerraformFmtTool::new(project_path_buf.clone()))
.tool(TerraformValidateTool::new(project_path_buf.clone()))
.tool(TerraformInstallTool::new())
@@ -1312,9 +1520,7 @@ pub async fn run_query(
.tool(ShellTool::new(project_path_buf.clone()));
}
- let agent = builder
- .additional_params(thinking_params)
- .build();
+ let agent = builder.additional_params(thinking_params).build();
agent
.prompt(query)
diff --git a/src/agent/prompts/mod.rs b/src/agent/prompts/mod.rs
index f35c2999..1fd28727 100644
--- a/src/agent/prompts/mod.rs
+++ b/src/agent/prompts/mod.rs
@@ -447,17 +447,42 @@ chart/
pub fn is_generation_query(query: &str) -> bool {
let query_lower = query.to_lowercase();
let generation_keywords = [
- "create", "generate", "write", "make", "build",
- "dockerfile", "docker-compose", "docker compose",
- "terraform", "helm", "kubernetes", "k8s",
- "manifest", "chart", "module", "infrastructure",
- "containerize", "containerise", "deploy", "ci/cd", "pipeline",
+ "create",
+ "generate",
+ "write",
+ "make",
+ "build",
+ "dockerfile",
+ "docker-compose",
+ "docker compose",
+ "terraform",
+ "helm",
+ "kubernetes",
+ "k8s",
+ "manifest",
+ "chart",
+ "module",
+ "infrastructure",
+ "containerize",
+ "containerise",
+ "deploy",
+ "ci/cd",
+ "pipeline",
// Code development keywords
- "implement", "translate", "port", "convert", "refactor",
- "add feature", "new feature", "develop", "code",
+ "implement",
+ "translate",
+ "port",
+ "convert",
+ "refactor",
+ "add feature",
+ "new feature",
+ "develop",
+ "code",
];
- generation_keywords.iter().any(|kw| query_lower.contains(kw))
+ generation_keywords
+ .iter()
+ .any(|kw| query_lower.contains(kw))
}
/// Get the planning mode prompt (read-only exploration)
@@ -540,16 +565,26 @@ Task status markers:
pub fn is_plan_continuation_query(query: &str) -> bool {
let query_lower = query.to_lowercase();
let continuation_keywords = [
- "continue", "resume", "pick up", "carry on",
- "where we left off", "where i left off", "where it left off",
- "finish the plan", "complete the plan",
- "continue the plan", "resume the plan",
+ "continue",
+ "resume",
+ "pick up",
+ "carry on",
+ "where we left off",
+ "where i left off",
+ "where it left off",
+ "finish the plan",
+ "complete the plan",
+ "continue the plan",
+ "resume the plan",
];
let plan_keywords = ["plan", "task", "tasks"];
// Direct continuation phrases
- if continuation_keywords.iter().any(|kw| query_lower.contains(kw)) {
+ if continuation_keywords
+ .iter()
+ .any(|kw| query_lower.contains(kw))
+ {
return true;
}
@@ -567,10 +602,21 @@ pub fn is_code_development_query(query: &str) -> bool {
// DevOps-specific terms - if these appear, it's DevOps not code dev
let devops_keywords = [
- "dockerfile", "docker-compose", "docker compose",
- "terraform", "helm", "kubernetes", "k8s",
- "manifest", "chart", "infrastructure",
- "containerize", "containerise", "deploy", "ci/cd", "pipeline",
+ "dockerfile",
+ "docker-compose",
+ "docker compose",
+ "terraform",
+ "helm",
+ "kubernetes",
+ "k8s",
+ "manifest",
+ "chart",
+ "infrastructure",
+ "containerize",
+ "containerise",
+ "deploy",
+ "ci/cd",
+ "pipeline",
];
// If it's clearly DevOps, return false
@@ -580,11 +626,30 @@ pub fn is_code_development_query(query: &str) -> bool {
// Code development keywords
let code_keywords = [
- "implement", "translate", "port", "convert", "refactor",
- "add feature", "new feature", "develop", "module", "library",
- "crate", "function", "class", "struct", "trait",
- "rust", "python", "javascript", "typescript", "haskell",
- "code", "rewrite", "build a", "create a",
+ "implement",
+ "translate",
+ "port",
+ "convert",
+ "refactor",
+ "add feature",
+ "new feature",
+ "develop",
+ "module",
+ "library",
+ "crate",
+ "function",
+ "class",
+ "struct",
+ "trait",
+ "rust",
+ "python",
+ "javascript",
+ "typescript",
+ "haskell",
+ "code",
+ "rewrite",
+ "build a",
+ "create a",
];
code_keywords.iter().any(|kw| query_lower.contains(kw))
diff --git a/src/agent/session.rs b/src/agent/session.rs
index f036af1a..52576e82 100644
--- a/src/agent/session.rs
+++ b/src/agent/session.rs
@@ -8,9 +8,9 @@
//! - `/clear` - Clear conversation history
//! - `/exit` or `/quit` - Exit the session
-use crate::agent::commands::{TokenUsage, SLASH_COMMANDS};
-use crate::agent::{AgentError, AgentResult, ProviderType};
+use crate::agent::commands::{SLASH_COMMANDS, TokenUsage};
use crate::agent::ui::ansi;
+use crate::agent::{AgentError, AgentResult, ProviderType};
use crate::config::{load_agent_config, save_agent_config};
use colored::Colorize;
use std::io::{self, Write};
@@ -63,13 +63,15 @@ pub fn find_incomplete_plans(project_path: &std::path::Path) -> Vec 0 && (pending > 0 || in_progress > 0) {
- let rel_path = path.strip_prefix(project_path)
+ let rel_path = path
+ .strip_prefix(project_path)
.map(|p| p.display().to_string())
.unwrap_or_else(|_| path.display().to_string());
incomplete.push(IncompletePlan {
path: rel_path,
- filename: path.file_name()
+ filename: path
+ .file_name()
.map(|n| n.to_string_lossy().to_string())
.unwrap_or_default(),
done,
@@ -130,17 +132,38 @@ pub fn get_available_models(provider: ProviderType) -> Vec<(&'static str, &'stat
("o1-preview", "o1-preview - Advanced reasoning"),
],
ProviderType::Anthropic => vec![
- ("claude-opus-4-5-20251101", "Claude Opus 4.5 - Most capable (Nov 2025)"),
- ("claude-sonnet-4-5-20250929", "Claude Sonnet 4.5 - Balanced (Sep 2025)"),
- ("claude-haiku-4-5-20251001", "Claude Haiku 4.5 - Fast (Oct 2025)"),
+ (
+ "claude-opus-4-5-20251101",
+ "Claude Opus 4.5 - Most capable (Nov 2025)",
+ ),
+ (
+ "claude-sonnet-4-5-20250929",
+ "Claude Sonnet 4.5 - Balanced (Sep 2025)",
+ ),
+ (
+ "claude-haiku-4-5-20251001",
+ "Claude Haiku 4.5 - Fast (Oct 2025)",
+ ),
("claude-sonnet-4-20250514", "Claude Sonnet 4 - Previous gen"),
],
// Bedrock models - use cross-region inference profile format (global. prefix)
ProviderType::Bedrock => vec![
- ("global.anthropic.claude-opus-4-5-20251101-v1:0", "Claude Opus 4.5 - Most capable (Nov 2025)"),
- ("global.anthropic.claude-sonnet-4-5-20250929-v1:0", "Claude Sonnet 4.5 - Balanced (Sep 2025)"),
- ("global.anthropic.claude-haiku-4-5-20251001-v1:0", "Claude Haiku 4.5 - Fast (Oct 2025)"),
- ("global.anthropic.claude-sonnet-4-20250514-v1:0", "Claude Sonnet 4 - Previous gen"),
+ (
+ "global.anthropic.claude-opus-4-5-20251101-v1:0",
+ "Claude Opus 4.5 - Most capable (Nov 2025)",
+ ),
+ (
+ "global.anthropic.claude-sonnet-4-5-20250929-v1:0",
+ "Claude Sonnet 4.5 - Balanced (Sep 2025)",
+ ),
+ (
+ "global.anthropic.claude-haiku-4-5-20251001-v1:0",
+ "Claude Haiku 4.5 - Fast (Oct 2025)",
+ ),
+ (
+ "global.anthropic.claude-sonnet-4-20250514-v1:0",
+ "Claude Sonnet 4 - Previous gen",
+ ),
],
}
}
@@ -193,7 +216,9 @@ impl ChatSession {
ProviderType::Anthropic => std::env::var("ANTHROPIC_API_KEY").ok(),
ProviderType::Bedrock => {
// Check for AWS credentials from env vars
- if std::env::var("AWS_ACCESS_KEY_ID").is_ok() && std::env::var("AWS_SECRET_ACCESS_KEY").is_ok() {
+ if std::env::var("AWS_ACCESS_KEY_ID").is_ok()
+ && std::env::var("AWS_SECRET_ACCESS_KEY").is_ok()
+ {
return true;
}
if std::env::var("AWS_PROFILE").is_ok() {
@@ -215,19 +240,31 @@ impl ChatSession {
if let Some(profile) = agent_config.profiles.get(profile_name) {
match provider {
ProviderType::OpenAI => {
- if profile.openai.as_ref().map(|o| !o.api_key.is_empty()).unwrap_or(false) {
+ if profile
+ .openai
+ .as_ref()
+ .map(|o| !o.api_key.is_empty())
+ .unwrap_or(false)
+ {
return true;
}
}
ProviderType::Anthropic => {
- if profile.anthropic.as_ref().map(|a| !a.api_key.is_empty()).unwrap_or(false) {
+ if profile
+ .anthropic
+ .as_ref()
+ .map(|a| !a.api_key.is_empty())
+ .unwrap_or(false)
+ {
return true;
}
}
ProviderType::Bedrock => {
if let Some(bedrock) = &profile.bedrock {
- if bedrock.profile.is_some() ||
- (bedrock.access_key_id.is_some() && bedrock.secret_access_key.is_some()) {
+ if bedrock.profile.is_some()
+ || (bedrock.access_key_id.is_some()
+ && bedrock.secret_access_key.is_some())
+ {
return true;
}
}
@@ -240,19 +277,31 @@ impl ChatSession {
for profile in agent_config.profiles.values() {
match provider {
ProviderType::OpenAI => {
- if profile.openai.as_ref().map(|o| !o.api_key.is_empty()).unwrap_or(false) {
+ if profile
+ .openai
+ .as_ref()
+ .map(|o| !o.api_key.is_empty())
+ .unwrap_or(false)
+ {
return true;
}
}
ProviderType::Anthropic => {
- if profile.anthropic.as_ref().map(|a| !a.api_key.is_empty()).unwrap_or(false) {
+ if profile
+ .anthropic
+ .as_ref()
+ .map(|a| !a.api_key.is_empty())
+ .unwrap_or(false)
+ {
return true;
}
}
ProviderType::Bedrock => {
if let Some(bedrock) = &profile.bedrock {
- if bedrock.profile.is_some() ||
- (bedrock.access_key_id.is_some() && bedrock.secret_access_key.is_some()) {
+ if bedrock.profile.is_some()
+ || (bedrock.access_key_id.is_some()
+ && bedrock.secret_access_key.is_some())
+ {
return true;
}
}
@@ -266,21 +315,23 @@ impl ChatSession {
ProviderType::Anthropic => agent_config.anthropic_api_key.is_some(),
ProviderType::Bedrock => {
if let Some(bedrock) = &agent_config.bedrock {
- bedrock.profile.is_some() ||
- (bedrock.access_key_id.is_some() && bedrock.secret_access_key.is_some())
+ bedrock.profile.is_some()
+ || (bedrock.access_key_id.is_some() && bedrock.secret_access_key.is_some())
} else {
agent_config.bedrock_configured.unwrap_or(false)
}
}
}
}
-
+
/// Load API key from config if not in env, and set it in env for use
pub fn load_api_key_to_env(provider: ProviderType) {
let agent_config = load_agent_config();
// Try to get credentials from active global profile first
- let active_profile = agent_config.active_profile.as_ref()
+ let active_profile = agent_config
+ .active_profile
+ .as_ref()
.and_then(|name| agent_config.profiles.get(name));
match provider {
@@ -294,12 +345,16 @@ impl ChatSession {
.map(|o| o.api_key.clone())
.filter(|k| !k.is_empty())
{
- unsafe { std::env::set_var("OPENAI_API_KEY", &key); }
+ unsafe {
+ std::env::set_var("OPENAI_API_KEY", &key);
+ }
return;
}
// Fall back to legacy key
if let Some(key) = &agent_config.openai_api_key {
- unsafe { std::env::set_var("OPENAI_API_KEY", key); }
+ unsafe {
+ std::env::set_var("OPENAI_API_KEY", key);
+ }
}
}
ProviderType::Anthropic => {
@@ -312,12 +367,16 @@ impl ChatSession {
.map(|a| a.api_key.clone())
.filter(|k| !k.is_empty())
{
- unsafe { std::env::set_var("ANTHROPIC_API_KEY", &key); }
+ unsafe {
+ std::env::set_var("ANTHROPIC_API_KEY", &key);
+ }
return;
}
// Fall back to legacy key
if let Some(key) = &agent_config.anthropic_api_key {
- unsafe { std::env::set_var("ANTHROPIC_API_KEY", key); }
+ unsafe {
+ std::env::set_var("ANTHROPIC_API_KEY", key);
+ }
}
}
ProviderType::Bedrock => {
@@ -330,20 +389,30 @@ impl ChatSession {
// Load region
if std::env::var("AWS_REGION").is_err() {
if let Some(region) = &bedrock.region {
- unsafe { std::env::set_var("AWS_REGION", region); }
+ unsafe {
+ std::env::set_var("AWS_REGION", region);
+ }
}
}
// Load profile OR access keys (profile takes precedence)
if let Some(profile) = &bedrock.profile {
if std::env::var("AWS_PROFILE").is_err() {
- unsafe { std::env::set_var("AWS_PROFILE", profile); }
+ unsafe {
+ std::env::set_var("AWS_PROFILE", profile);
+ }
}
- } else if let (Some(key_id), Some(secret)) = (&bedrock.access_key_id, &bedrock.secret_access_key) {
+ } else if let (Some(key_id), Some(secret)) =
+ (&bedrock.access_key_id, &bedrock.secret_access_key)
+ {
if std::env::var("AWS_ACCESS_KEY_ID").is_err() {
- unsafe { std::env::set_var("AWS_ACCESS_KEY_ID", key_id); }
+ unsafe {
+ std::env::set_var("AWS_ACCESS_KEY_ID", key_id);
+ }
}
if std::env::var("AWS_SECRET_ACCESS_KEY").is_err() {
- unsafe { std::env::set_var("AWS_SECRET_ACCESS_KEY", secret); }
+ unsafe {
+ std::env::set_var("AWS_SECRET_ACCESS_KEY", secret);
+ }
}
}
}
@@ -368,9 +437,15 @@ impl ChatSession {
use crate::config::types::BedrockConfig as BedrockConfigType;
println!();
- println!("{}", "āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā".cyan());
+ println!(
+ "{}",
+ "āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā".cyan()
+ );
println!("{}", " š§ AWS Bedrock Setup Wizard".cyan().bold());
- println!("{}", "āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā".cyan());
+ println!(
+ "{}",
+ "āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā".cyan()
+ );
println!();
println!("AWS Bedrock provides access to Claude models via AWS.");
println!("You'll need an AWS account with Bedrock access enabled.");
@@ -379,20 +454,34 @@ impl ChatSession {
// Step 1: Choose authentication method
println!("{}", "Step 1: Choose authentication method".white().bold());
println!();
- println!(" {} Use AWS Profile (from ~/.aws/credentials)", "[1]".cyan());
- println!(" {}", "Best for: AWS CLI users, SSO, multiple accounts".dimmed());
+ println!(
+ " {} Use AWS Profile (from ~/.aws/credentials)",
+ "[1]".cyan()
+ );
+ println!(
+ " {}",
+ "Best for: AWS CLI users, SSO, multiple accounts".dimmed()
+ );
println!();
println!(" {} Enter Access Keys directly", "[2]".cyan());
- println!(" {}", "Best for: Quick setup, CI/CD environments".dimmed());
+ println!(
+ " {}",
+ "Best for: Quick setup, CI/CD environments".dimmed()
+ );
println!();
println!(" {} Use existing environment variables", "[3]".cyan());
- println!(" {}", "Best for: Already configured AWS_* env vars".dimmed());
+ println!(
+ " {}",
+ "Best for: Already configured AWS_* env vars".dimmed()
+ );
println!();
print!("Enter choice [1-3]: ");
io::stdout().flush().unwrap();
let mut choice = String::new();
- io::stdin().read_line(&mut choice).map_err(|e| AgentError::ToolError(e.to_string()))?;
+ io::stdin()
+ .read_line(&mut choice)
+ .map_err(|e| AgentError::ToolError(e.to_string()))?;
let choice = choice.trim();
let mut bedrock_config = BedrockConfigType::default();
@@ -407,27 +496,40 @@ impl ChatSession {
io::stdout().flush().unwrap();
let mut profile = String::new();
- io::stdin().read_line(&mut profile).map_err(|e| AgentError::ToolError(e.to_string()))?;
+ io::stdin()
+ .read_line(&mut profile)
+ .map_err(|e| AgentError::ToolError(e.to_string()))?;
let profile = profile.trim();
- let profile = if profile.is_empty() { "default" } else { profile };
+ let profile = if profile.is_empty() {
+ "default"
+ } else {
+ profile
+ };
bedrock_config.profile = Some(profile.to_string());
// Set in env for current session
- unsafe { std::env::set_var("AWS_PROFILE", profile); }
+ unsafe {
+ std::env::set_var("AWS_PROFILE", profile);
+ }
println!("{}", format!("ā Using profile: {}", profile).green());
}
"2" => {
// Access Keys
println!();
println!("{}", "Step 2: Enter AWS Access Keys".white().bold());
- println!("{}", "Get these from AWS Console ā IAM ā Security credentials".dimmed());
+ println!(
+ "{}",
+ "Get these from AWS Console ā IAM ā Security credentials".dimmed()
+ );
println!();
print!("AWS Access Key ID: ");
io::stdout().flush().unwrap();
let mut access_key = String::new();
- io::stdin().read_line(&mut access_key).map_err(|e| AgentError::ToolError(e.to_string()))?;
+ io::stdin()
+ .read_line(&mut access_key)
+ .map_err(|e| AgentError::ToolError(e.to_string()))?;
let access_key = access_key.trim().to_string();
if access_key.is_empty() {
@@ -437,11 +539,15 @@ impl ChatSession {
print!("AWS Secret Access Key: ");
io::stdout().flush().unwrap();
let mut secret_key = String::new();
- io::stdin().read_line(&mut secret_key).map_err(|e| AgentError::ToolError(e.to_string()))?;
+ io::stdin()
+ .read_line(&mut secret_key)
+ .map_err(|e| AgentError::ToolError(e.to_string()))?;
let secret_key = secret_key.trim().to_string();
if secret_key.is_empty() {
- return Err(AgentError::MissingApiKey("AWS_SECRET_ACCESS_KEY".to_string()));
+ return Err(AgentError::MissingApiKey(
+ "AWS_SECRET_ACCESS_KEY".to_string(),
+ ));
}
bedrock_config.access_key_id = Some(access_key.clone());
@@ -474,9 +580,15 @@ impl ChatSession {
if bedrock_config.region.is_none() {
println!();
println!("{}", "Step 2: Select AWS Region".white().bold());
- println!("{}", "Bedrock is available in select regions. Common choices:".dimmed());
+ println!(
+ "{}",
+ "Bedrock is available in select regions. Common choices:".dimmed()
+ );
println!();
- println!(" {} us-east-1 (N. Virginia) - Most models", "[1]".cyan());
+ println!(
+ " {} us-east-1 (N. Virginia) - Most models",
+ "[1]".cyan()
+ );
println!(" {} us-west-2 (Oregon)", "[2]".cyan());
println!(" {} eu-west-1 (Ireland)", "[3]".cyan());
println!(" {} ap-northeast-1 (Tokyo)", "[4]".cyan());
@@ -485,7 +597,9 @@ impl ChatSession {
io::stdout().flush().unwrap();
let mut region_choice = String::new();
- io::stdin().read_line(&mut region_choice).map_err(|e| AgentError::ToolError(e.to_string()))?;
+ io::stdin()
+ .read_line(&mut region_choice)
+ .map_err(|e| AgentError::ToolError(e.to_string()))?;
let region = match region_choice.trim() {
"1" | "" => "us-east-1",
"2" => "us-west-2",
@@ -495,7 +609,9 @@ impl ChatSession {
};
bedrock_config.region = Some(region.to_string());
- unsafe { std::env::set_var("AWS_REGION", region); }
+ unsafe {
+ std::env::set_var("AWS_REGION", region);
+ }
println!("{}", format!("ā Region: {}", region).green());
}
@@ -514,13 +630,26 @@ impl ChatSession {
io::stdout().flush().unwrap();
let mut model_choice = String::new();
- io::stdin().read_line(&mut model_choice).map_err(|e| AgentError::ToolError(e.to_string()))?;
+ io::stdin()
+ .read_line(&mut model_choice)
+ .map_err(|e| AgentError::ToolError(e.to_string()))?;
let model_idx: usize = model_choice.trim().parse().unwrap_or(1);
let model_idx = model_idx.saturating_sub(1).min(models.len() - 1);
let selected_model = models[model_idx].0.to_string();
bedrock_config.default_model = Some(selected_model.clone());
- println!("{}", format!("ā Default model: {}", models[model_idx].1.split(" - ").next().unwrap_or(&selected_model)).green());
+ println!(
+ "{}",
+ format!(
+ "ā Default model: {}",
+ models[model_idx]
+ .1
+ .split(" - ")
+ .next()
+ .unwrap_or(&selected_model)
+ )
+ .green()
+ );
// Save configuration
let mut agent_config = load_agent_config();
@@ -528,16 +657,25 @@ impl ChatSession {
agent_config.bedrock_configured = Some(true);
if let Err(e) = save_agent_config(&agent_config) {
- eprintln!("{}", format!("Warning: Could not save config: {}", e).yellow());
+ eprintln!(
+ "{}",
+ format!("Warning: Could not save config: {}", e).yellow()
+ );
} else {
println!();
println!("{}", "ā Configuration saved to ~/.syncable.toml".green());
}
println!();
- println!("{}", "āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā".cyan());
+ println!(
+ "{}",
+ "āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā".cyan()
+ );
println!("{}", " ā
AWS Bedrock setup complete!".green().bold());
- println!("{}", "āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā".cyan());
+ println!(
+ "{}",
+ "āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā".cyan()
+ );
println!();
Ok(selected_model)
@@ -553,16 +691,21 @@ impl ChatSession {
let env_var = match provider {
ProviderType::OpenAI => "OPENAI_API_KEY",
ProviderType::Anthropic => "ANTHROPIC_API_KEY",
- ProviderType::Bedrock => unreachable!(), // Handled above
+ ProviderType::Bedrock => unreachable!(), // Handled above
};
- println!("\n{}", format!("š No API key found for {}", provider).yellow());
+ println!(
+ "\n{}",
+ format!("š No API key found for {}", provider).yellow()
+ );
println!("Please enter your {} API key:", provider);
print!("> ");
io::stdout().flush().unwrap();
let mut key = String::new();
- io::stdin().read_line(&mut key).map_err(|e| AgentError::ToolError(e.to_string()))?;
+ io::stdin()
+ .read_line(&mut key)
+ .map_err(|e| AgentError::ToolError(e.to_string()))?;
let key = key.trim().to_string();
if key.is_empty() {
@@ -580,11 +723,14 @@ impl ChatSession {
match provider {
ProviderType::OpenAI => agent_config.openai_api_key = Some(key.clone()),
ProviderType::Anthropic => agent_config.anthropic_api_key = Some(key.clone()),
- ProviderType::Bedrock => unreachable!(), // Handled above
+ ProviderType::Bedrock => unreachable!(), // Handled above
}
if let Err(e) = save_agent_config(&agent_config) {
- eprintln!("{}", format!("Warning: Could not save config: {}", e).yellow());
+ eprintln!(
+ "{}",
+ format!("Warning: Could not save config: {}", e).yellow()
+ );
} else {
println!("{}", "ā API key saved to ~/.syncable.toml".green());
}
@@ -595,30 +741,41 @@ impl ChatSession {
/// Handle /model command - interactive model selection
pub fn handle_model_command(&mut self) -> AgentResult<()> {
let models = get_available_models(self.provider);
-
- println!("\n{}", format!("š Available models for {}:", self.provider).cyan().bold());
+
+ println!(
+ "\n{}",
+ format!("š Available models for {}:", self.provider)
+ .cyan()
+ .bold()
+ );
println!();
-
+
for (i, (id, desc)) in models.iter().enumerate() {
let marker = if *id == self.model { "ā " } else { " " };
let num = format!("[{}]", i + 1);
- println!(" {} {} {} - {}", marker, num.dimmed(), id.white().bold(), desc.dimmed());
+ println!(
+ " {} {} {} - {}",
+ marker,
+ num.dimmed(),
+ id.white().bold(),
+ desc.dimmed()
+ );
}
-
+
println!();
println!("Enter number to select, or press Enter to keep current:");
print!("> ");
io::stdout().flush().unwrap();
-
+
let mut input = String::new();
io::stdin().read_line(&mut input).ok();
let input = input.trim();
-
+
if input.is_empty() {
println!("{}", format!("Keeping model: {}", self.model).dimmed());
return Ok(());
}
-
+
if let Ok(num) = input.parse::() {
if num >= 1 && num <= models.len() {
let (id, desc) = models[num - 1];
@@ -628,7 +785,10 @@ impl ChatSession {
let mut agent_config = load_agent_config();
agent_config.default_model = Some(id.to_string());
if let Err(e) = save_agent_config(&agent_config) {
- eprintln!("{}", format!("Warning: Could not save config: {}", e).yellow());
+ eprintln!(
+ "{}",
+ format!("Warning: Could not save config: {}", e).yellow()
+ );
}
println!("{}", format!("ā Switched to {} - {}", id, desc).green());
@@ -643,7 +803,10 @@ impl ChatSession {
let mut agent_config = load_agent_config();
agent_config.default_model = Some(input.to_string());
if let Err(e) = save_agent_config(&agent_config) {
- eprintln!("{}", format!("Warning: Could not save config: {}", e).yellow());
+ eprintln!(
+ "{}",
+ format!("Warning: Could not save config: {}", e).yellow()
+ );
}
println!("{}", format!("ā Set model to: {}", input).green());
@@ -654,31 +817,45 @@ impl ChatSession {
/// Handle /provider command - switch provider with API key prompt if needed
pub fn handle_provider_command(&mut self) -> AgentResult<()> {
- let providers = [ProviderType::OpenAI, ProviderType::Anthropic, ProviderType::Bedrock];
-
+ let providers = [
+ ProviderType::OpenAI,
+ ProviderType::Anthropic,
+ ProviderType::Bedrock,
+ ];
+
println!("\n{}", "š Available providers:".cyan().bold());
println!();
-
+
for (i, provider) in providers.iter().enumerate() {
- let marker = if *provider == self.provider { "ā " } else { " " };
+ let marker = if *provider == self.provider {
+ "ā "
+ } else {
+ " "
+ };
let has_key = if Self::has_api_key(*provider) {
"ā API key configured".green()
} else {
"ā No API key".yellow()
};
let num = format!("[{}]", i + 1);
- println!(" {} {} {} - {}", marker, num.dimmed(), provider.to_string().white().bold(), has_key);
+ println!(
+ " {} {} {} - {}",
+ marker,
+ num.dimmed(),
+ provider.to_string().white().bold(),
+ has_key
+ );
}
-
+
println!();
println!("Enter number to select:");
print!("> ");
io::stdout().flush().unwrap();
-
+
let mut input = String::new();
io::stdin().read_line(&mut input).ok();
let input = input.trim();
-
+
if let Ok(num) = input.parse::() {
if num >= 1 && num <= providers.len() {
let new_provider = providers[num - 1];
@@ -701,9 +878,12 @@ impl ChatSession {
ProviderType::Bedrock => {
// Use saved model preference if available
let agent_config = load_agent_config();
- agent_config.bedrock
+ agent_config
+ .bedrock
.and_then(|b| b.default_model)
- .unwrap_or_else(|| "global.anthropic.claude-sonnet-4-5-20250929-v1:0".to_string())
+ .unwrap_or_else(|| {
+ "global.anthropic.claude-sonnet-4-5-20250929-v1:0".to_string()
+ })
}
};
self.model = default_model.clone();
@@ -713,21 +893,35 @@ impl ChatSession {
agent_config.default_provider = new_provider.to_string();
agent_config.default_model = Some(default_model.clone());
if let Err(e) = save_agent_config(&agent_config) {
- eprintln!("{}", format!("Warning: Could not save config: {}", e).yellow());
+ eprintln!(
+ "{}",
+ format!("Warning: Could not save config: {}", e).yellow()
+ );
}
- println!("{}", format!("ā Switched to {} with model {}", new_provider, default_model).green());
+ println!(
+ "{}",
+ format!(
+ "ā Switched to {} with model {}",
+ new_provider, default_model
+ )
+ .green()
+ );
} else {
println!("{}", "Invalid selection".red());
}
}
-
+
Ok(())
}
/// Handle /reset command - reset provider credentials
pub fn handle_reset_command(&mut self) -> AgentResult<()> {
- let providers = [ProviderType::OpenAI, ProviderType::Anthropic, ProviderType::Bedrock];
+ let providers = [
+ ProviderType::OpenAI,
+ ProviderType::Anthropic,
+ ProviderType::Bedrock,
+ ];
println!("\n{}", "š Reset Provider Credentials".cyan().bold());
println!();
@@ -739,7 +933,12 @@ impl ChatSession {
"ā not configured".dimmed()
};
let num = format!("[{}]", i + 1);
- println!(" {} {} - {}", num.dimmed(), provider.to_string().white().bold(), status);
+ println!(
+ " {} {} - {}",
+ num.dimmed(),
+ provider.to_string().white().bold(),
+ status
+ );
}
println!(" {} All providers", "[4]".dimmed());
println!();
@@ -762,12 +961,16 @@ impl ChatSession {
"1" => {
agent_config.openai_api_key = None;
// SAFETY: Single-threaded CLI context during command handling
- unsafe { std::env::remove_var("OPENAI_API_KEY"); }
+ unsafe {
+ std::env::remove_var("OPENAI_API_KEY");
+ }
println!("{}", "ā OpenAI credentials cleared".green());
}
"2" => {
agent_config.anthropic_api_key = None;
- unsafe { std::env::remove_var("ANTHROPIC_API_KEY"); }
+ unsafe {
+ std::env::remove_var("ANTHROPIC_API_KEY");
+ }
println!("{}", "ā Anthropic credentials cleared".green());
}
"3" => {
@@ -806,7 +1009,10 @@ impl ChatSession {
// Save updated config
if let Err(e) = save_agent_config(&agent_config) {
- eprintln!("{}", format!("Warning: Could not save config: {}", e).yellow());
+ eprintln!(
+ "{}",
+ format!("Warning: Could not save config: {}", e).yellow()
+ );
} else {
println!("{}", "Configuration saved to ~/.syncable.toml".dimmed());
}
@@ -823,7 +1029,11 @@ impl ChatSession {
if current_cleared {
println!();
println!("{}", "Current provider credentials were cleared.".yellow());
- println!("Use {} to reconfigure or {} to switch providers.", "/provider".cyan(), "/p".cyan());
+ println!(
+ "Use {} to reconfigure or {} to switch providers.",
+ "/provider".cyan(),
+ "/p".cyan()
+ );
}
Ok(())
@@ -831,7 +1041,7 @@ impl ChatSession {
/// Handle /profile command - manage global profiles
pub fn handle_profile_command(&mut self) -> AgentResult<()> {
- use crate::config::types::{Profile, OpenAIProfile, AnthropicProfile};
+ use crate::config::types::{AnthropicProfile, OpenAIProfile, Profile};
let mut agent_config = load_agent_config();
@@ -886,7 +1096,11 @@ impl ChatSession {
let desc = desc.trim();
let profile = Profile {
- description: if desc.is_empty() { None } else { Some(desc.to_string()) },
+ description: if desc.is_empty() {
+ None
+ } else {
+ Some(desc.to_string())
+ },
default_provider: None,
default_model: None,
openai: None,
@@ -902,16 +1116,25 @@ impl ChatSession {
}
if let Err(e) = save_agent_config(&agent_config) {
- eprintln!("{}", format!("Warning: Could not save config: {}", e).yellow());
+ eprintln!(
+ "{}",
+ format!("Warning: Could not save config: {}", e).yellow()
+ );
}
println!("{}", format!("ā Profile '{}' created", name).green());
- println!("{}", "Use option [3] to configure providers for this profile".dimmed());
+ println!(
+ "{}",
+ "Use option [3] to configure providers for this profile".dimmed()
+ );
}
"2" => {
// Switch active profile
if agent_config.profiles.is_empty() {
- println!("{}", "No profiles configured. Create one first with option [1].".yellow());
+ println!(
+ "{}",
+ "No profiles configured. Create one first with option [1].".yellow()
+ );
return Ok(());
}
@@ -937,18 +1160,28 @@ impl ChatSession {
if let Some(profile) = agent_config.profiles.get(&name) {
// Clear old env vars and load new ones
if let Some(openai) = &profile.openai {
- unsafe { std::env::set_var("OPENAI_API_KEY", &openai.api_key); }
+ unsafe {
+ std::env::set_var("OPENAI_API_KEY", &openai.api_key);
+ }
}
if let Some(anthropic) = &profile.anthropic {
- unsafe { std::env::set_var("ANTHROPIC_API_KEY", &anthropic.api_key); }
+ unsafe {
+ std::env::set_var("ANTHROPIC_API_KEY", &anthropic.api_key);
+ }
}
if let Some(bedrock) = &profile.bedrock {
if let Some(region) = &bedrock.region {
- unsafe { std::env::set_var("AWS_REGION", region); }
+ unsafe {
+ std::env::set_var("AWS_REGION", region);
+ }
}
if let Some(aws_profile) = &bedrock.profile {
- unsafe { std::env::set_var("AWS_PROFILE", aws_profile); }
- } else if let (Some(key_id), Some(secret)) = (&bedrock.access_key_id, &bedrock.secret_access_key) {
+ unsafe {
+ std::env::set_var("AWS_PROFILE", aws_profile);
+ }
+ } else if let (Some(key_id), Some(secret)) =
+ (&bedrock.access_key_id, &bedrock.secret_access_key)
+ {
unsafe {
std::env::set_var("AWS_ACCESS_KEY_ID", key_id);
std::env::set_var("AWS_SECRET_ACCESS_KEY", secret);
@@ -965,7 +1198,10 @@ impl ChatSession {
}
if let Err(e) = save_agent_config(&agent_config) {
- eprintln!("{}", format!("Warning: Could not save config: {}", e).yellow());
+ eprintln!(
+ "{}",
+ format!("Warning: Could not save config: {}", e).yellow()
+ );
}
println!("{}", format!("ā Switched to profile '{}'", name).green());
@@ -975,7 +1211,10 @@ impl ChatSession {
let profile_name = if let Some(name) = &agent_config.active_profile {
name.clone()
} else if agent_config.profiles.is_empty() {
- println!("{}", "No profiles configured. Create one first with option [1].".yellow());
+ println!(
+ "{}",
+ "No profiles configured. Create one first with option [1].".yellow()
+ );
return Ok(());
} else {
print!("Enter profile name to configure: ");
@@ -995,7 +1234,12 @@ impl ChatSession {
return Ok(());
}
- println!("\n{}", format!("Configure provider for '{}':", profile_name).white().bold());
+ println!(
+ "\n{}",
+ format!("Configure provider for '{}':", profile_name)
+ .white()
+ .bold()
+ );
println!(" {} OpenAI", "[1]".cyan());
println!(" {} Anthropic", "[2]".cyan());
println!(" {} AWS Bedrock", "[3]".cyan());
@@ -1026,7 +1270,10 @@ impl ChatSession {
default_model: None,
});
}
- println!("{}", format!("ā OpenAI configured for profile '{}'", profile_name).green());
+ println!(
+ "{}",
+ format!("ā OpenAI configured for profile '{}'", profile_name).green()
+ );
}
"2" => {
// Configure Anthropic
@@ -1048,7 +1295,11 @@ impl ChatSession {
default_model: None,
});
}
- println!("{}", format!("ā Anthropic configured for profile '{}'", profile_name).green());
+ println!(
+ "{}",
+ format!("ā Anthropic configured for profile '{}'", profile_name)
+ .green()
+ );
}
"3" => {
// Configure Bedrock - use the wizard
@@ -1063,7 +1314,10 @@ impl ChatSession {
profile.default_model = Some(selected_model);
}
}
- println!("{}", format!("ā Bedrock configured for profile '{}'", profile_name).green());
+ println!(
+ "{}",
+ format!("ā Bedrock configured for profile '{}'", profile_name).green()
+ );
}
_ => {
println!("{}", "Invalid selection".red());
@@ -1072,7 +1326,10 @@ impl ChatSession {
}
if let Err(e) = save_agent_config(&agent_config) {
- eprintln!("{}", format!("Warning: Could not save config: {}", e).yellow());
+ eprintln!(
+ "{}",
+ format!("Warning: Could not save config: {}", e).yellow()
+ );
}
}
"4" => {
@@ -1100,7 +1357,10 @@ impl ChatSession {
}
if let Err(e) = save_agent_config(&agent_config) {
- eprintln!("{}", format!("Warning: Could not save config: {}", e).yellow());
+ eprintln!(
+ "{}",
+ format!("Warning: Could not save config: {}", e).yellow()
+ );
}
println!("{}", format!("ā Deleted profile '{}'", name).green());
@@ -1122,7 +1382,10 @@ impl ChatSession {
if incomplete.is_empty() {
println!("\n{}", "No incomplete plans found.".dimmed());
- println!("{}", "Create a plan using plan mode (Shift+Tab) and the plan_create tool.".dimmed());
+ println!(
+ "{}",
+ "Create a plan using plan mode (Shift+Tab) and the plan_create tool.".dimmed()
+ );
return Ok(());
}
@@ -1151,7 +1414,10 @@ impl ChatSession {
println!();
println!("{}", "To continue a plan, say:".dimmed());
println!(" {}", "\"continue the plan at plans/FILENAME.md\"".cyan());
- println!(" {}", "or just \"continue\" to resume the most recent one".cyan());
+ println!(
+ " {}",
+ "or just \"continue\" to resume the most recent one".cyan()
+ );
println!();
Ok(())
@@ -1169,15 +1435,29 @@ impl ChatSession {
println!("{}", "š Profiles:".cyan());
for (name, profile) in &config.profiles {
- let marker = if Some(name.as_str()) == active { "ā " } else { " " };
+ let marker = if Some(name.as_str()) == active {
+ "ā "
+ } else {
+ " "
+ };
let desc = profile.description.as_deref().unwrap_or("");
- let desc_fmt = if desc.is_empty() { String::new() } else { format!(" - {}", desc) };
+ let desc_fmt = if desc.is_empty() {
+ String::new()
+ } else {
+ format!(" - {}", desc)
+ };
// Show which providers are configured
let mut providers = Vec::new();
- if profile.openai.is_some() { providers.push("OpenAI"); }
- if profile.anthropic.is_some() { providers.push("Anthropic"); }
- if profile.bedrock.is_some() { providers.push("Bedrock"); }
+ if profile.openai.is_some() {
+ providers.push("OpenAI");
+ }
+ if profile.anthropic.is_some() {
+ providers.push("Anthropic");
+ }
+ if profile.bedrock.is_some() {
+ providers.push("Bedrock");
+ }
let providers_str = if providers.is_empty() {
"(no providers configured)".to_string()
@@ -1185,7 +1465,13 @@ impl ChatSession {
format!("[{}]", providers.join(", "))
};
- println!(" {} {}{} {}", marker, name.white().bold(), desc_fmt.dimmed(), providers_str.dimmed());
+ println!(
+ " {} {}{} {}",
+ marker,
+ name.white().bold(),
+ desc_fmt.dimmed(),
+ providers_str.dimmed()
+ );
}
println!();
}
@@ -1193,63 +1479,170 @@ impl ChatSession {
/// Handle /help command
pub fn print_help() {
println!();
- println!(" {}āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā{}", ansi::PURPLE, ansi::RESET);
+ println!(
+ " {}āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā{}",
+ ansi::PURPLE,
+ ansi::RESET
+ );
println!(" {}š Available Commands{}", ansi::PURPLE, ansi::RESET);
- println!(" {}āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā{}", ansi::PURPLE, ansi::RESET);
+ println!(
+ " {}āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā{}",
+ ansi::PURPLE,
+ ansi::RESET
+ );
println!();
-
+
for cmd in SLASH_COMMANDS.iter() {
let alias = cmd.alias.map(|a| format!(" ({})", a)).unwrap_or_default();
- println!(" {}/{:<12}{}{} - {}{}{}",
- ansi::CYAN, cmd.name, alias, ansi::RESET,
- ansi::DIM, cmd.description, ansi::RESET
+ println!(
+ " {}/{:<12}{}{} - {}{}{}",
+ ansi::CYAN,
+ cmd.name,
+ alias,
+ ansi::RESET,
+ ansi::DIM,
+ cmd.description,
+ ansi::RESET
);
}
-
+
println!();
- println!(" {}Tip: Type / to see interactive command picker!{}", ansi::DIM, ansi::RESET);
+ println!(
+ " {}Tip: Type / to see interactive command picker!{}",
+ ansi::DIM,
+ ansi::RESET
+ );
println!();
}
-
/// Print session banner with colorful SYNCABLE ASCII art
pub fn print_logo() {
- // Colors matching the logo gradient: purple ā orange ā pink
- // Using ANSI 256 colors for better gradient
+ // Colors matching the logo gradient: purple ā orange ā pink
+ // Using ANSI 256 colors for better gradient
// Purple shades for S, y
- let purple = "\x1b[38;5;141m"; // Light purple
- // Orange shades for n, c
- let orange = "\x1b[38;5;216m"; // Peach/orange
+ let purple = "\x1b[38;5;141m"; // Light purple
+ // Orange shades for n, c
+ let orange = "\x1b[38;5;216m"; // Peach/orange
// Pink shades for a, b, l, e
- let pink = "\x1b[38;5;212m"; // Hot pink
+ let pink = "\x1b[38;5;212m"; // Hot pink
let magenta = "\x1b[38;5;207m"; // Magenta
let reset = "\x1b[0m";
println!();
println!(
"{} āāāāāāāā{}{} āāā āāā{}{}āāāā āāā{}{} āāāāāāā{}{} āāāāāā {}{}āāāāāāā {}{}āāā {}{}āāāāāāāā{}",
- purple, reset, purple, reset, orange, reset, orange, reset, pink, reset, pink, reset, magenta, reset, magenta, reset
+ purple,
+ reset,
+ purple,
+ reset,
+ orange,
+ reset,
+ orange,
+ reset,
+ pink,
+ reset,
+ pink,
+ reset,
+ magenta,
+ reset,
+ magenta,
+ reset
);
println!(
"{} āāāāāāāā{}{} āāāā āāāā{}{}āāāāā āāā{}{} āāāāāāāā{}{} āāāāāāāā{}{}āāāāāāāā{}{}āāā {}{}āāāāāāāā{}",
- purple, reset, purple, reset, orange, reset, orange, reset, pink, reset, pink, reset, magenta, reset, magenta, reset
+ purple,
+ reset,
+ purple,
+ reset,
+ orange,
+ reset,
+ orange,
+ reset,
+ pink,
+ reset,
+ pink,
+ reset,
+ magenta,
+ reset,
+ magenta,
+ reset
);
println!(
"{} āāāāāāāā{}{} āāāāāāā {}{}āāāāāā āāā{}{} āāā {}{} āāāāāāāā{}{}āāāāāāāā{}{}āāā {}{}āāāāāā {}",
- purple, reset, purple, reset, orange, reset, orange, reset, pink, reset, pink, reset, magenta, reset, magenta, reset
+ purple,
+ reset,
+ purple,
+ reset,
+ orange,
+ reset,
+ orange,
+ reset,
+ pink,
+ reset,
+ pink,
+ reset,
+ magenta,
+ reset,
+ magenta,
+ reset
);
println!(
"{} āāāāāāāā{}{} āāāāā {}{}āāāāāāāāāā{}{} āāā {}{} āāāāāāāā{}{}āāāāāāāā{}{}āāā {}{}āāāāāā {}",
- purple, reset, purple, reset, orange, reset, orange, reset, pink, reset, pink, reset, magenta, reset, magenta, reset
+ purple,
+ reset,
+ purple,
+ reset,
+ orange,
+ reset,
+ orange,
+ reset,
+ pink,
+ reset,
+ pink,
+ reset,
+ magenta,
+ reset,
+ magenta,
+ reset
);
println!(
"{} āāāāāāāā{}{} āāā {}{}āāā āāāāāā{}{} āāāāāāāā{}{} āāā āāā{}{}āāāāāāāā{}{}āāāāāāāā{}{}āāāāāāāā{}",
- purple, reset, purple, reset, orange, reset, orange, reset, pink, reset, pink, reset, magenta, reset, magenta, reset
+ purple,
+ reset,
+ purple,
+ reset,
+ orange,
+ reset,
+ orange,
+ reset,
+ pink,
+ reset,
+ pink,
+ reset,
+ magenta,
+ reset,
+ magenta,
+ reset
);
println!(
"{} āāāāāāāā{}{} āāā {}{}āāā āāāāā{}{} āāāāāāā{}{} āāā āāā{}{}āāāāāāā {}{}āāāāāāāā{}{}āāāāāāāā{}",
- purple, reset, purple, reset, orange, reset, orange, reset, pink, reset, pink, reset, magenta, reset, magenta, reset
+ purple,
+ reset,
+ purple,
+ reset,
+ orange,
+ reset,
+ orange,
+ reset,
+ pink,
+ reset,
+ pink,
+ reset,
+ magenta,
+ reset,
+ magenta,
+ reset
);
println!();
}
@@ -1263,7 +1656,8 @@ impl ChatSession {
println!(
" {} {}",
"š".dimmed(),
- "Want to deploy? Deploy instantly from Syncable Platform ā https://syncable.dev".dimmed()
+ "Want to deploy? Deploy instantly from Syncable Platform ā https://syncable.dev"
+ .dimmed()
);
println!();
@@ -1275,10 +1669,7 @@ impl ChatSession {
self.provider.to_string().cyan(),
self.model.cyan()
);
- println!(
- " {}",
- "Your AI-powered code analysis assistant".dimmed()
- );
+ println!(" {}", "Your AI-powered code analysis assistant".dimmed());
// Check for incomplete plans and show a hint
let incomplete_plans = find_incomplete_plans(&self.project_path);
@@ -1317,18 +1708,17 @@ impl ChatSession {
);
}
-
/// Process a command (returns true if should continue, false if should exit)
pub fn process_command(&mut self, input: &str) -> AgentResult {
let cmd = input.trim().to_lowercase();
-
+
// Handle bare "/" - now handled interactively in read_input
// Just show help if they somehow got here
if cmd == "/" {
Self::print_help();
return Ok(true);
}
-
+
match cmd.as_str() {
"/exit" | "/quit" | "/q" => {
println!("\n{}", "š Goodbye!".green());
@@ -1362,11 +1752,18 @@ impl ChatSession {
_ => {
if cmd.starts_with('/') {
// Unknown command - interactive picker already handled in read_input
- println!("{}", format!("Unknown command: {}. Type /help for available commands.", cmd).yellow());
+ println!(
+ "{}",
+ format!(
+ "Unknown command: {}. Type /help for available commands.",
+ cmd
+ )
+ .yellow()
+ );
}
}
}
-
+
Ok(true)
}
@@ -1412,7 +1809,11 @@ impl ChatSession {
pub fn read_input(&self) -> io::Result {
use crate::agent::ui::input::read_input_with_file_picker;
- Ok(read_input_with_file_picker("You:", &self.project_path, self.plan_mode.is_planning()))
+ Ok(read_input_with_file_picker(
+ "You:",
+ &self.project_path,
+ self.plan_mode.is_planning(),
+ ))
}
/// Process a submitted input text - strips @ references and handles suggestion format
diff --git a/src/agent/tools/dclint.rs b/src/agent/tools/dclint.rs
new file mode 100644
index 00000000..851f353e
--- /dev/null
+++ b/src/agent/tools/dclint.rs
@@ -0,0 +1,552 @@
+//! Dclint tool - Native Docker Compose linting using Rig's Tool trait
+//!
+//! Provides native Docker Compose linting without requiring the external dclint binary.
+//! Implements docker-compose-linter rules with full pragma support.
+//!
+//! Output is optimized for AI agent decision-making with:
+//! - Categorized issues (security, best-practice, style, performance)
+//! - Priority rankings (critical, high, medium, low)
+//! - Actionable fix recommendations
+//! - Rule documentation links
+
+use rig::completion::ToolDefinition;
+use rig::tool::Tool;
+use serde::{Deserialize, Serialize};
+use serde_json::json;
+use std::path::PathBuf;
+
+use crate::analyzer::dclint::{DclintConfig, LintResult, RuleCategory, Severity, lint, lint_file};
+
+/// Arguments for the dclint tool
+#[derive(Debug, Deserialize)]
+pub struct DclintArgs {
+ /// Path to docker-compose.yml (relative to project root) or inline content
+ #[serde(default)]
+ pub compose_file: Option,
+
+ /// Inline Docker Compose content to lint (alternative to path)
+ #[serde(default)]
+ pub content: Option,
+
+ /// Rules to ignore (e.g., ["DCL001", "DCL006"])
+ #[serde(default)]
+ pub ignore: Vec,
+
+ /// Minimum severity threshold: "error", "warning", "info", "style"
+ #[serde(default)]
+ pub threshold: Option,
+
+ /// Whether to apply auto-fixes (if available)
+ #[serde(default)]
+ pub fix: bool,
+}
+
+/// Error type for dclint tool
+#[derive(Debug, thiserror::Error)]
+#[error("Dclint error: {0}")]
+pub struct DclintError(String);
+
+/// Tool to lint Docker Compose files natively
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct DclintTool {
+ project_path: PathBuf,
+}
+
+impl DclintTool {
+ pub fn new(project_path: PathBuf) -> Self {
+ Self { project_path }
+ }
+
+ fn parse_threshold(threshold: &str) -> Severity {
+ match threshold.to_lowercase().as_str() {
+ "error" => Severity::Error,
+ "warning" => Severity::Warning,
+ "info" => Severity::Info,
+ "style" => Severity::Style,
+ _ => Severity::Warning, // Default
+ }
+ }
+
+ /// Get priority based on severity and category
+ fn get_priority(severity: Severity, category: RuleCategory) -> &'static str {
+ match (severity, category) {
+ (Severity::Error, RuleCategory::Security) => "critical",
+ (Severity::Error, _) => "high",
+ (Severity::Warning, RuleCategory::Security) => "high",
+ (Severity::Warning, RuleCategory::BestPractice) => "medium",
+ (Severity::Warning, _) => "medium",
+ (Severity::Info, _) => "low",
+ (Severity::Style, _) => "low",
+ }
+ }
+
+ /// Get actionable fix recommendation for a rule
+ fn get_fix_recommendation(code: &str) -> &'static str {
+ match code {
+ "DCL001" => {
+ "Remove either the 'build' or 'image' field, or add 'pull_policy' if both are intentional."
+ }
+ "DCL002" => {
+ "Use unique container names for each service, or remove explicit container_name to use auto-generated names."
+ }
+ "DCL003" => {
+ "Use different host ports for each service, or bind to different interfaces (e.g., 127.0.0.1:8080:80)."
+ }
+ "DCL004" => "Remove quotes from volume paths. YAML doesn't require quotes for paths.",
+ "DCL005" => {
+ "Add explicit interface binding, e.g., '127.0.0.1:8080:80' instead of '8080:80' for local-only access."
+ }
+ "DCL006" => {
+ "Remove the 'version' field. Docker Compose now infers the version automatically."
+ }
+ "DCL007" => "Add 'name: myproject' at the top level for explicit project naming.",
+ "DCL008" => {
+ "Quote port mappings to prevent YAML parsing issues, e.g., \"8080:80\" instead of 8080:80."
+ }
+ "DCL009" => {
+ "Use lowercase container names with only letters, numbers, hyphens, and underscores."
+ }
+ "DCL010" => {
+ "Sort dependencies alphabetically for better readability and easier merges."
+ }
+ "DCL011" => {
+ "Use explicit version tags (e.g., nginx:1.25) instead of implicit latest or untagged images."
+ }
+ "DCL012" => {
+ "Reorder service keys to follow convention: image, build, container_name, ports, volumes, environment, etc."
+ }
+ "DCL013" => "Sort port mappings alphabetically/numerically for consistency.",
+ "DCL014" => "Sort services alphabetically for better navigation and easier merges.",
+ "DCL015" => {
+ "Reorder top-level keys: name, services, networks, volumes, configs, secrets."
+ }
+ _ => "Review the rule documentation for specific guidance.",
+ }
+ }
+
+ /// Get documentation URL for a rule
+ fn get_rule_url(code: &str) -> String {
+ if code.starts_with("DCL") {
+ let rule_name = match code {
+ "DCL001" => "no-build-and-image-rule",
+ "DCL002" => "no-duplicate-container-names-rule",
+ "DCL003" => "no-duplicate-exported-ports-rule",
+ "DCL004" => "no-quotes-in-volumes-rule",
+ "DCL005" => "no-unbound-port-interfaces-rule",
+ "DCL006" => "no-version-field-rule",
+ "DCL007" => "require-project-name-field-rule",
+ "DCL008" => "require-quotes-in-ports-rule",
+ "DCL009" => "service-container-name-regex-rule",
+ "DCL010" => "service-dependencies-alphabetical-order-rule",
+ "DCL011" => "service-image-require-explicit-tag-rule",
+ "DCL012" => "service-keys-order-rule",
+ "DCL013" => "service-ports-alphabetical-order-rule",
+ "DCL014" => "services-alphabetical-order-rule",
+ "DCL015" => "top-level-properties-order-rule",
+ _ => return String::new(),
+ };
+ format!(
+ "https://github.com/zavoloklom/docker-compose-linter/blob/main/docs/rules/{}.md",
+ rule_name
+ )
+ } else {
+ String::new()
+ }
+ }
+
+ /// Format result optimized for agent decision-making
+ fn format_result(result: &LintResult, filename: &str) -> String {
+ // Categorize and enrich failures
+ let enriched_failures: Vec = result
+ .failures
+ .iter()
+ .map(|f| {
+ let code = f.code.as_str();
+ let priority = Self::get_priority(f.severity, f.category);
+
+ json!({
+ "code": code,
+ "ruleName": f.rule_name,
+ "severity": f.severity.as_str(),
+ "priority": priority,
+ "category": f.category.as_str(),
+ "message": f.message,
+ "line": f.line,
+ "column": f.column,
+ "fixable": f.fixable,
+ "fix": Self::get_fix_recommendation(code),
+ "docs": Self::get_rule_url(code),
+ })
+ })
+ .collect();
+
+ // Group by priority for agent decision ordering
+ let critical: Vec<_> = enriched_failures
+ .iter()
+ .filter(|f| f["priority"] == "critical")
+ .cloned()
+ .collect();
+ let high: Vec<_> = enriched_failures
+ .iter()
+ .filter(|f| f["priority"] == "high")
+ .cloned()
+ .collect();
+ let medium: Vec<_> = enriched_failures
+ .iter()
+ .filter(|f| f["priority"] == "medium")
+ .cloned()
+ .collect();
+ let low: Vec<_> = enriched_failures
+ .iter()
+ .filter(|f| f["priority"] == "low")
+ .cloned()
+ .collect();
+
+ // Group by category for thematic fixes
+ let mut by_category: std::collections::HashMap<&str, Vec<_>> =
+ std::collections::HashMap::new();
+ for f in &enriched_failures {
+ let cat = f["category"].as_str().unwrap_or("other");
+ by_category.entry(cat).or_default().push(f.clone());
+ }
+
+ // Build decision context
+ let decision_context = if critical.is_empty() && high.is_empty() {
+ if medium.is_empty() && low.is_empty() {
+ "Docker Compose file follows best practices. No issues found."
+ } else if medium.is_empty() {
+ "Minor improvements possible. Low priority issues only (style/formatting)."
+ } else {
+ "Good baseline. Medium priority improvements recommended."
+ }
+ } else if !critical.is_empty() {
+ "Critical issues found. Address security/error issues first before deployment."
+ } else {
+ "High priority issues found. Review and fix before production use."
+ };
+
+ // Count fixable issues
+ let fixable_count = enriched_failures
+ .iter()
+ .filter(|f| f["fixable"] == true)
+ .count();
+
+ // Build agent-optimized output
+ let mut output = json!({
+ "file": filename,
+ "success": !result.has_errors(),
+ "decision_context": decision_context,
+ "summary": {
+ "total": result.failures.len(),
+ "by_priority": {
+ "critical": critical.len(),
+ "high": high.len(),
+ "medium": medium.len(),
+ "low": low.len(),
+ },
+ "by_severity": {
+ "errors": result.error_count,
+ "warnings": result.warning_count,
+ "info": result.failures.iter().filter(|f| f.severity == Severity::Info).count(),
+ "style": result.failures.iter().filter(|f| f.severity == Severity::Style).count(),
+ },
+ "by_category": by_category.iter().map(|(k, v)| (k.to_string(), v.len())).collect::>(),
+ "fixable": fixable_count,
+ },
+ "action_plan": {
+ "critical": critical,
+ "high": high,
+ "medium": medium,
+ "low": low,
+ },
+ });
+
+ // Add quick fixes summary for agent
+ if !enriched_failures.is_empty() {
+ let quick_fixes: Vec = enriched_failures
+ .iter()
+ .filter(|f| f["priority"] == "critical" || f["priority"] == "high")
+ .take(5)
+ .map(|f| {
+ format!(
+ "Line {}: {} - {}",
+ f["line"],
+ f["code"].as_str().unwrap_or(""),
+ f["fix"].as_str().unwrap_or("")
+ )
+ })
+ .collect();
+
+ if !quick_fixes.is_empty() {
+ output["quick_fixes"] = json!(quick_fixes);
+ }
+ }
+
+ if !result.parse_errors.is_empty() {
+ output["parse_errors"] = json!(result.parse_errors);
+ }
+
+ serde_json::to_string_pretty(&output).unwrap_or_else(|_| "{}".to_string())
+ }
+}
+
+impl Tool for DclintTool {
+ const NAME: &'static str = "dclint";
+
+ type Error = DclintError;
+ type Args = DclintArgs;
+ type Output = String;
+
+ async fn definition(&self, _prompt: String) -> ToolDefinition {
+ ToolDefinition {
+ name: Self::NAME.to_string(),
+ description: "Lint Docker Compose files for best practices, security issues, and style consistency. \
+ Returns AI-optimized JSON with issues categorized by priority (critical/high/medium/low) \
+ and type (security/best-practice/style/performance). \
+ Each issue includes an actionable fix recommendation. Use this to analyze docker-compose.yml \
+ files before deployment or to improve existing configurations. The 'decision_context' field provides \
+ a summary for quick assessment, and 'quick_fixes' lists the most important changes. \
+ Supports 15 rules including: build+image conflicts, duplicate names/ports, image tagging, \
+ port security, alphabetical ordering, and more."
+ .to_string(),
+ parameters: json!({
+ "type": "object",
+ "properties": {
+ "compose_file": {
+ "type": "string",
+ "description": "Path to docker-compose.yml relative to project root (e.g., 'docker-compose.yml', 'deploy/docker-compose.prod.yml')"
+ },
+ "content": {
+ "type": "string",
+ "description": "Inline Docker Compose YAML content to lint. Use this when you want to validate generated content before writing."
+ },
+ "ignore": {
+ "type": "array",
+ "items": { "type": "string" },
+ "description": "List of rule codes to ignore (e.g., ['DCL006', 'DCL014'])"
+ },
+ "threshold": {
+ "type": "string",
+ "enum": ["error", "warning", "info", "style"],
+ "description": "Minimum severity to report. Default is 'warning'."
+ },
+ "fix": {
+ "type": "boolean",
+ "description": "Apply auto-fixes where available (8 of 15 rules support auto-fix)."
+ }
+ }
+ }),
+ }
+ }
+
+ async fn call(&self, args: Self::Args) -> Result {
+ // Build configuration
+ let mut config = DclintConfig::default();
+
+ // Apply ignored rules
+ for rule in &args.ignore {
+ config = config.ignore(rule.as_str());
+ }
+
+ // Apply threshold
+ if let Some(threshold) = &args.threshold {
+ config = config.with_threshold(Self::parse_threshold(threshold));
+ }
+
+ // Determine source, filename, and lint
+ let (result, filename) = if let Some(content) = &args.content {
+ // Lint inline content
+ (lint(content, &config), "".to_string())
+ } else if let Some(compose_file) = &args.compose_file {
+ // Lint file
+ let path = self.project_path.join(compose_file);
+ (lint_file(&path, &config), compose_file.clone())
+ } else {
+ // Default: look for docker-compose.yml in project root
+ let default_files = [
+ "docker-compose.yml",
+ "docker-compose.yaml",
+ "compose.yml",
+ "compose.yaml",
+ ];
+
+ let mut found = None;
+ for file in &default_files {
+ let path = self.project_path.join(file);
+ if path.exists() {
+ found = Some((lint_file(&path, &config), file.to_string()));
+ break;
+ }
+ }
+
+ match found {
+ Some((result, filename)) => (result, filename),
+ None => {
+ return Err(DclintError(
+ "No Docker Compose file specified and no docker-compose.yml found in project root".to_string(),
+ ));
+ }
+ }
+ };
+
+ // Check for parse errors
+ if !result.parse_errors.is_empty() {
+ log::warn!("Docker Compose parse errors: {:?}", result.parse_errors);
+ }
+
+ Ok(Self::format_result(&result, &filename))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::env::temp_dir;
+ use std::fs;
+
+ #[tokio::test]
+ async fn test_dclint_inline_content() {
+ let tool = DclintTool::new(temp_dir());
+ let args = DclintArgs {
+ compose_file: None,
+ content: Some(
+ r#"
+services:
+ web:
+ build: .
+ image: nginx:latest
+"#
+ .to_string(),
+ ),
+ ignore: vec![],
+ threshold: None,
+ fix: false,
+ };
+
+ let result = tool.call(args).await.unwrap();
+ let parsed: serde_json::Value = serde_json::from_str(&result).unwrap();
+
+ // Should detect DCL001 (build+image)
+ assert!(!parsed["success"].as_bool().unwrap_or(true));
+ assert!(parsed["summary"]["total"].as_u64().unwrap_or(0) >= 1);
+
+ // Check new fields exist
+ assert!(parsed["decision_context"].is_string());
+ assert!(parsed["action_plan"].is_object());
+ }
+
+ #[tokio::test]
+ async fn test_dclint_ignore_rules() {
+ let tool = DclintTool::new(temp_dir());
+ let args = DclintArgs {
+ compose_file: None,
+ content: Some(
+ r#"
+version: "3.8"
+services:
+ web:
+ image: nginx:latest
+"#
+ .to_string(),
+ ),
+ ignore: vec!["DCL006".to_string(), "DCL011".to_string()],
+ threshold: None,
+ fix: false,
+ };
+
+ let result = tool.call(args).await.unwrap();
+ let parsed: serde_json::Value = serde_json::from_str(&result).unwrap();
+
+ // DCL006 and DCL011 should be ignored
+ let all_codes: Vec<&str> = parsed["action_plan"]
+ .as_object()
+ .unwrap()
+ .values()
+ .flat_map(|v| v.as_array().unwrap())
+ .filter_map(|v| v["code"].as_str())
+ .collect();
+
+ assert!(!all_codes.contains(&"DCL006"));
+ assert!(!all_codes.contains(&"DCL011"));
+ }
+
+ #[tokio::test]
+ async fn test_dclint_file() {
+ let temp = temp_dir().join("dclint_test");
+ fs::create_dir_all(&temp).unwrap();
+ let compose_file = temp.join("docker-compose.yml");
+ fs::write(
+ &compose_file,
+ r#"
+name: myproject
+services:
+ web:
+ image: nginx:1.25
+ ports:
+ - "8080:80"
+"#,
+ )
+ .unwrap();
+
+ let tool = DclintTool::new(temp.clone());
+ let args = DclintArgs {
+ compose_file: Some("docker-compose.yml".to_string()),
+ content: None,
+ ignore: vec![],
+ threshold: None,
+ fix: false,
+ };
+
+ let result = tool.call(args).await.unwrap();
+ let parsed: serde_json::Value = serde_json::from_str(&result).unwrap();
+
+ // Well-formed compose file should have few/no critical issues
+ assert_eq!(parsed["file"], "docker-compose.yml");
+
+ // Cleanup
+ fs::remove_dir_all(&temp).ok();
+ }
+
+ #[tokio::test]
+ async fn test_dclint_valid_compose() {
+ let tool = DclintTool::new(temp_dir());
+ let compose = r#"
+name: myproject
+services:
+ api:
+ image: node:20-alpine
+ ports:
+ - "127.0.0.1:3000:3000"
+ db:
+ image: postgres:16-alpine
+"#;
+
+ let args = DclintArgs {
+ compose_file: None,
+ content: Some(compose.to_string()),
+ ignore: vec![],
+ threshold: None,
+ fix: false,
+ };
+
+ let result = tool.call(args).await.unwrap();
+ let parsed: serde_json::Value = serde_json::from_str(&result).unwrap();
+
+ // Well-structured compose file should pass (no errors)
+ assert!(parsed["success"].as_bool().unwrap_or(false));
+ assert!(parsed["decision_context"].is_string());
+ // Should not have critical or high priority issues
+ assert_eq!(
+ parsed["summary"]["by_priority"]["critical"]
+ .as_u64()
+ .unwrap_or(99),
+ 0
+ );
+ assert_eq!(
+ parsed["summary"]["by_priority"]["high"]
+ .as_u64()
+ .unwrap_or(99),
+ 0
+ );
+ }
+}
diff --git a/src/agent/tools/diagnostics.rs b/src/agent/tools/diagnostics.rs
index 27a4ad35..22370f95 100644
--- a/src/agent/tools/diagnostics.rs
+++ b/src/agent/tools/diagnostics.rs
@@ -87,10 +87,7 @@ impl DiagnosticsTool {
}
/// Get diagnostics from IDE via MCP
- async fn get_ide_diagnostics(
- &self,
- file_path: Option<&str>,
- ) -> Option {
+ async fn get_ide_diagnostics(&self, file_path: Option<&str>) -> Option {
let client = self.ide_client.as_ref()?;
let guard = client.lock().await;
@@ -172,15 +169,26 @@ impl DiagnosticsTool {
// Get the primary span
let spans = message.get("spans")?.as_array()?;
- let span = spans.iter().find(|s| {
- s.get("is_primary").and_then(|v| v.as_bool()).unwrap_or(false)
- }).or_else(|| spans.first())?;
+ let span = spans
+ .iter()
+ .find(|s| {
+ s.get("is_primary")
+ .and_then(|v| v.as_bool())
+ .unwrap_or(false)
+ })
+ .or_else(|| spans.first())?;
let file = span.get("file_name")?.as_str()?;
let line = span.get("line_start")?.as_u64()? as u32;
let column = span.get("column_start")?.as_u64()? as u32;
- let end_line = span.get("line_end").and_then(|v| v.as_u64()).map(|v| v as u32);
- let end_column = span.get("column_end").and_then(|v| v.as_u64()).map(|v| v as u32);
+ let end_line = span
+ .get("line_end")
+ .and_then(|v| v.as_u64())
+ .map(|v| v as u32);
+ let end_column = span
+ .get("column_end")
+ .and_then(|v| v.as_u64())
+ .map(|v| v as u32);
let code = message
.get("code")
@@ -265,9 +273,18 @@ impl DiagnosticsTool {
.to_string();
let line = msg.get("line").and_then(|l| l.as_u64()).unwrap_or(1) as u32;
let column = msg.get("column").and_then(|c| c.as_u64()).unwrap_or(1) as u32;
- let end_line = msg.get("endLine").and_then(|l| l.as_u64()).map(|v| v as u32);
- let end_column = msg.get("endColumn").and_then(|c| c.as_u64()).map(|v| v as u32);
- let code = msg.get("ruleId").and_then(|r| r.as_str()).map(|s| s.to_string());
+ let end_line = msg
+ .get("endLine")
+ .and_then(|l| l.as_u64())
+ .map(|v| v as u32);
+ let end_column = msg
+ .get("endColumn")
+ .and_then(|c| c.as_u64())
+ .map(|v| v as u32);
+ let code = msg
+ .get("ruleId")
+ .and_then(|r| r.as_str())
+ .map(|s| s.to_string());
diagnostics.push(Diagnostic {
file: file.to_string(),
@@ -459,7 +476,9 @@ impl DiagnosticsTool {
// Filter out warnings if not requested
if !include_warnings {
- response.diagnostics.retain(|d| d.severity == DiagnosticSeverity::Error);
+ response
+ .diagnostics
+ .retain(|d| d.severity == DiagnosticSeverity::Error);
}
// Apply limit
diff --git a/src/agent/tools/file_ops.rs b/src/agent/tools/file_ops.rs
index 4a272946..9b28763a 100644
--- a/src/agent/tools/file_ops.rs
+++ b/src/agent/tools/file_ops.rs
@@ -15,10 +15,10 @@
//! - Directory listings: Max 500 entries
//! - Long lines: Truncated at 2000 characters
+use super::truncation::{TruncationLimits, truncate_dir_listing, truncate_file_content};
use crate::agent::ide::IdeClient;
use crate::agent::ui::confirmation::ConfirmationResult;
use crate::agent::ui::diff::{confirm_file_write, confirm_file_write_with_ide};
-use super::truncation::{truncate_file_content, truncate_dir_listing, TruncationLimits};
use rig::completion::ToolDefinition;
use rig::tool::Tool;
use serde::{Deserialize, Serialize};
@@ -54,20 +54,25 @@ impl ReadFileTool {
}
fn validate_path(&self, requested: &PathBuf) -> Result {
- let canonical_project = self.project_path.canonicalize()
+ let canonical_project = self
+ .project_path
+ .canonicalize()
.map_err(|e| ReadFileError(format!("Invalid project path: {}", e)))?;
-
+
let target = if requested.is_absolute() {
requested.clone()
} else {
self.project_path.join(requested)
};
- let canonical_target = target.canonicalize()
+ let canonical_target = target
+ .canonicalize()
.map_err(|e| ReadFileError(format!("File not found: {}", e)))?;
if !canonical_target.starts_with(&canonical_project) {
- return Err(ReadFileError("Access denied: path is outside project directory".to_string()));
+ return Err(ReadFileError(
+ "Access denied: path is outside project directory".to_string(),
+ ));
}
Ok(canonical_target)
@@ -127,12 +132,16 @@ impl Tool for ReadFileTool {
// User requested specific line range - respect it exactly
let lines: Vec<&str> = content.lines().collect();
let start_idx = (start as usize).saturating_sub(1);
- let end_idx = args.end_line.map(|e| (e as usize).min(lines.len())).unwrap_or(lines.len());
+ let end_idx = args
+ .end_line
+ .map(|e| (e as usize).min(lines.len()))
+ .unwrap_or(lines.len());
if start_idx >= lines.len() {
return Ok(json!({
"error": format!("Start line {} exceeds file length ({})", start, lines.len())
- }).to_string());
+ })
+ .to_string());
}
// Ensure end_idx >= start_idx to avoid slice panic when end_line < start_line
@@ -194,20 +203,25 @@ impl ListDirectoryTool {
}
fn validate_path(&self, requested: &PathBuf) -> Result {
- let canonical_project = self.project_path.canonicalize()
+ let canonical_project = self
+ .project_path
+ .canonicalize()
.map_err(|e| ListDirectoryError(format!("Invalid project path: {}", e)))?;
-
+
let target = if requested.is_absolute() {
requested.clone()
} else {
self.project_path.join(requested)
};
- let canonical_target = target.canonicalize()
+ let canonical_target = target
+ .canonicalize()
.map_err(|e| ListDirectoryError(format!("Directory not found: {}", e)))?;
if !canonical_target.starts_with(&canonical_project) {
- return Err(ListDirectoryError("Access denied: path is outside project directory".to_string()));
+ return Err(ListDirectoryError(
+ "Access denied: path is outside project directory".to_string(),
+ ));
}
Ok(canonical_target)
@@ -222,10 +236,22 @@ impl ListDirectoryTool {
max_depth: usize,
entries: &mut Vec,
) -> Result<(), ListDirectoryError> {
- let skip_dirs = ["node_modules", ".git", "target", "__pycache__", ".venv", "venv", "dist", "build"];
-
- let dir_name = current_path.file_name().and_then(|n| n.to_str()).unwrap_or("");
-
+ let skip_dirs = [
+ "node_modules",
+ ".git",
+ "target",
+ "__pycache__",
+ ".venv",
+ "venv",
+ "dist",
+ "build",
+ ];
+
+ let dir_name = current_path
+ .file_name()
+ .and_then(|n| n.to_str())
+ .unwrap_or("");
+
if depth > 0 && skip_dirs.contains(&dir_name) {
return Ok(());
}
@@ -234,11 +260,16 @@ impl ListDirectoryTool {
.map_err(|e| ListDirectoryError(format!("Cannot read directory: {}", e)))?;
for entry in read_dir {
- let entry = entry.map_err(|e| ListDirectoryError(format!("Error reading entry: {}", e)))?;
+ let entry =
+ entry.map_err(|e| ListDirectoryError(format!("Error reading entry: {}", e)))?;
let path = entry.path();
let metadata = entry.metadata().ok();
-
- let relative_path = path.strip_prefix(base_path).unwrap_or(&path).to_string_lossy().to_string();
+
+ let relative_path = path
+ .strip_prefix(base_path)
+ .unwrap_or(&path)
+ .to_string_lossy()
+ .to_string();
let is_dir = metadata.as_ref().map(|m| m.is_dir()).unwrap_or(false);
let size = metadata.as_ref().map(|m| m.len()).unwrap_or(0);
@@ -426,7 +457,9 @@ impl WriteFileTool {
}
fn validate_path(&self, requested: &PathBuf) -> Result {
- let canonical_project = self.project_path.canonicalize()
+ let canonical_project = self
+ .project_path
+ .canonicalize()
.map_err(|e| WriteFileError(format!("Invalid project path: {}", e)))?;
let target = if requested.is_absolute() {
@@ -436,22 +469,28 @@ impl WriteFileTool {
};
// For new files, we can't canonicalize yet, so check the parent
- let parent = target.parent()
+ let parent = target
+ .parent()
.ok_or_else(|| WriteFileError("Invalid path: no parent directory".to_string()))?;
// If parent exists, canonicalize it; otherwise check the path prefix
let is_within_project = if parent.exists() {
- let canonical_parent = parent.canonicalize()
+ let canonical_parent = parent
+ .canonicalize()
.map_err(|e| WriteFileError(format!("Invalid parent path: {}", e)))?;
canonical_parent.starts_with(&canonical_project)
} else {
// For nested new directories, check if the normalized path stays within project
let normalized = self.project_path.join(requested);
- !normalized.components().any(|c| c == std::path::Component::ParentDir)
+ !normalized
+ .components()
+ .any(|c| c == std::path::Component::ParentDir)
};
if !is_within_project {
- return Err(WriteFileError("Access denied: path is outside project directory".to_string()));
+ return Err(WriteFileError(
+ "Access denied: path is outside project directory".to_string(),
+ ));
}
Ok(target)
@@ -530,8 +569,8 @@ The tool will create parent directories automatically if they don't exist."#.to_
.unwrap_or_else(|| args.path.clone());
// Check if confirmation is needed
- let needs_confirmation = self.require_confirmation
- && !self.allowed_patterns.is_allowed(&filename);
+ let needs_confirmation =
+ self.require_confirmation && !self.allowed_patterns.is_allowed(&filename);
if needs_confirmation {
// Get IDE client reference if available
@@ -603,8 +642,9 @@ The tool will create parent directories automatically if they don't exist."#.to_
if create_dirs {
if let Some(parent) = file_path.parent() {
if !parent.exists() {
- fs::create_dir_all(parent)
- .map_err(|e| WriteFileError(format!("Failed to create directories: {}", e)))?;
+ fs::create_dir_all(parent).map_err(|e| {
+ WriteFileError(format!("Failed to create directories: {}", e))
+ })?;
}
}
}
@@ -697,13 +737,18 @@ impl WriteFilesTool {
}
/// Set the IDE client for native diff views
- pub fn with_ide_client(mut self, ide_client: std::sync::Arc>) -> Self {
+ pub fn with_ide_client(
+ mut self,
+ ide_client: std::sync::Arc>,
+ ) -> Self {
self.ide_client = Some(ide_client);
self
}
fn validate_path(&self, requested: &PathBuf) -> Result {
- let canonical_project = self.project_path.canonicalize()
+ let canonical_project = self
+ .project_path
+ .canonicalize()
.map_err(|e| WriteFilesError(format!("Invalid project path: {}", e)))?;
let target = if requested.is_absolute() {
@@ -712,20 +757,26 @@ impl WriteFilesTool {
self.project_path.join(requested)
};
- let parent = target.parent()
+ let parent = target
+ .parent()
.ok_or_else(|| WriteFilesError("Invalid path: no parent directory".to_string()))?;
let is_within_project = if parent.exists() {
- let canonical_parent = parent.canonicalize()
+ let canonical_parent = parent
+ .canonicalize()
.map_err(|e| WriteFilesError(format!("Invalid parent path: {}", e)))?;
canonical_parent.starts_with(&canonical_project)
} else {
let normalized = self.project_path.join(requested);
- !normalized.components().any(|c| c == std::path::Component::ParentDir)
+ !normalized
+ .components()
+ .any(|c| c == std::path::Component::ParentDir)
};
if !is_within_project {
- return Err(WriteFilesError("Access denied: path is outside project directory".to_string()));
+ return Err(WriteFilesError(
+ "Access denied: path is outside project directory".to_string(),
+ ));
}
Ok(target)
@@ -812,8 +863,8 @@ All files are written atomically. Parent directories are created automatically."
.unwrap_or_else(|| file.path.clone());
// Check if confirmation is needed
- let needs_confirmation = self.require_confirmation
- && !self.allowed_patterns.is_allowed(&filename);
+ let needs_confirmation =
+ self.require_confirmation && !self.allowed_patterns.is_allowed(&filename);
if needs_confirmation {
// Use IDE diff if client is connected, otherwise terminal diff
@@ -825,21 +876,14 @@ All files are written atomically. Parent directories are created automatically."
old_content.as_deref(),
&file.content,
Some(&*guard),
- ).await
+ )
+ .await
} else {
drop(guard);
- confirm_file_write(
- &file.path,
- old_content.as_deref(),
- &file.content,
- )
+ confirm_file_write(&file.path, old_content.as_deref(), &file.content)
}
} else {
- confirm_file_write(
- &file.path,
- old_content.as_deref(),
- &file.content,
- )
+ confirm_file_write(&file.path, old_content.as_deref(), &file.content)
};
match confirmation {
@@ -894,8 +938,12 @@ All files are written atomically. Parent directories are created automatically."
if create_dirs {
if let Some(parent) = file_path.parent() {
if !parent.exists() {
- fs::create_dir_all(parent)
- .map_err(|e| WriteFilesError(format!("Failed to create directories for {}: {}", file.path, e)))?;
+ fs::create_dir_all(parent).map_err(|e| {
+ WriteFilesError(format!(
+ "Failed to create directories for {}: {}",
+ file.path, e
+ ))
+ })?;
}
}
}
diff --git a/src/agent/tools/hadolint.rs b/src/agent/tools/hadolint.rs
index a187419d..2fd353d3 100644
--- a/src/agent/tools/hadolint.rs
+++ b/src/agent/tools/hadolint.rs
@@ -15,7 +15,7 @@ use serde::{Deserialize, Serialize};
use serde_json::json;
use std::path::PathBuf;
-use crate::analyzer::hadolint::{lint, lint_file, HadolintConfig, LintResult, Severity};
+use crate::analyzer::hadolint::{HadolintConfig, LintResult, Severity, lint, lint_file};
/// Arguments for the hadolint tool
#[derive(Debug, Deserialize)]
@@ -69,18 +69,19 @@ impl HadolintTool {
// Security rules
"DL3000" | "DL3002" | "DL3004" | "DL3047" => "security",
// Best practice rules
- "DL3003" | "DL3006" | "DL3007" | "DL3008" | "DL3009" | "DL3013" |
- "DL3014" | "DL3015" | "DL3016" | "DL3018" | "DL3019" | "DL3020" |
- "DL3025" | "DL3027" | "DL3028" | "DL3033" | "DL3042" | "DL3059" => "best-practice",
+ "DL3003" | "DL3006" | "DL3007" | "DL3008" | "DL3009" | "DL3013" | "DL3014"
+ | "DL3015" | "DL3016" | "DL3018" | "DL3019" | "DL3020" | "DL3025" | "DL3027"
+ | "DL3028" | "DL3033" | "DL3042" | "DL3059" => "best-practice",
// Maintainability rules
- "DL3005" | "DL3010" | "DL3021" | "DL3022" | "DL3023" | "DL3024" |
- "DL3026" | "DL3029" | "DL3030" | "DL3032" | "DL3034" | "DL3035" |
- "DL3036" | "DL3044" | "DL3045" | "DL3048" | "DL3049" | "DL3050" |
- "DL3051" | "DL3052" | "DL3053" | "DL3054" | "DL3055" | "DL3056" |
- "DL3057" | "DL3058" | "DL3060" | "DL3061" => "maintainability",
+ "DL3005" | "DL3010" | "DL3021" | "DL3022" | "DL3023" | "DL3024" | "DL3026"
+ | "DL3029" | "DL3030" | "DL3032" | "DL3034" | "DL3035" | "DL3036" | "DL3044"
+ | "DL3045" | "DL3048" | "DL3049" | "DL3050" | "DL3051" | "DL3052" | "DL3053"
+ | "DL3054" | "DL3055" | "DL3056" | "DL3057" | "DL3058" | "DL3060" | "DL3061" => {
+ "maintainability"
+ }
// Performance rules
- "DL3001" | "DL3011" | "DL3017" | "DL3031" | "DL3037" | "DL3038" |
- "DL3039" | "DL3040" | "DL3041" | "DL3046" | "DL3062" => "performance",
+ "DL3001" | "DL3011" | "DL3017" | "DL3031" | "DL3037" | "DL3038" | "DL3039"
+ | "DL3040" | "DL3041" | "DL3046" | "DL3062" => "performance",
// Deprecated instructions
"DL4000" | "DL4001" | "DL4003" | "DL4005" | "DL4006" => "deprecated",
// ShellCheck rules
@@ -108,14 +109,26 @@ impl HadolintTool {
match code {
"DL3000" => "Use absolute WORKDIR paths like '/app' instead of relative paths.",
"DL3001" => "Remove commands that have no effect in Docker (like 'ssh', 'mount').",
- "DL3002" => "Remove the last USER instruction setting root, or add 'USER ' at the end.",
+ "DL3002" => {
+ "Remove the last USER instruction setting root, or add 'USER ' at the end."
+ }
"DL3003" => "Use WORKDIR to change directories instead of 'cd' in RUN commands.",
- "DL3004" => "Remove 'sudo' from RUN commands. Docker runs as root by default, or use proper USER switching.",
- "DL3005" => "Remove 'apt-get upgrade' or 'dist-upgrade'. Pin packages instead for reproducibility.",
- "DL3006" => "Add explicit version tag to base image, e.g., 'FROM node:18-alpine' instead of 'FROM node'.",
+ "DL3004" => {
+ "Remove 'sudo' from RUN commands. Docker runs as root by default, or use proper USER switching."
+ }
+ "DL3005" => {
+ "Remove 'apt-get upgrade' or 'dist-upgrade'. Pin packages instead for reproducibility."
+ }
+ "DL3006" => {
+ "Add explicit version tag to base image, e.g., 'FROM node:18-alpine' instead of 'FROM node'."
+ }
"DL3007" => "Use specific version tag instead of ':latest', e.g., 'nginx:1.25-alpine'.",
- "DL3008" => "Pin apt package versions: 'apt-get install package=version' or use '--no-install-recommends'.",
- "DL3009" => "Add 'rm -rf /var/lib/apt/lists/*' after apt-get install to reduce image size.",
+ "DL3008" => {
+ "Pin apt package versions: 'apt-get install package=version' or use '--no-install-recommends'."
+ }
+ "DL3009" => {
+ "Add 'rm -rf /var/lib/apt/lists/*' after apt-get install to reduce image size."
+ }
"DL3010" => "Use ADD only for extracting archives. For other files, use COPY.",
"DL3011" => "Use valid port numbers (0-65535) in EXPOSE.",
"DL3013" => "Pin pip package versions: 'pip install package==version'.",
@@ -125,13 +138,19 @@ impl HadolintTool {
"DL3017" => "Remove 'apt-get upgrade'. Pin specific package versions instead.",
"DL3018" => "Pin apk package versions: 'apk add package=version'.",
"DL3019" => "Add '--no-cache' to apk add instead of separate cache cleanup.",
- "DL3020" => "Use COPY instead of ADD for files from build context. ADD is for URLs and archives.",
- "DL3021" => "Use COPY with --from for multi-stage builds instead of COPY from external images.",
+ "DL3020" => {
+ "Use COPY instead of ADD for files from build context. ADD is for URLs and archives."
+ }
+ "DL3021" => {
+ "Use COPY with --from for multi-stage builds instead of COPY from external images."
+ }
"DL3022" => "Use COPY --from=stage instead of --from=image for multi-stage builds.",
"DL3023" => "Reference build stage by name instead of number in COPY --from.",
"DL3024" => "Use lowercase for 'as' in multi-stage builds: 'FROM image AS builder'.",
"DL3025" => "Use JSON array format for CMD/ENTRYPOINT: CMD [\"executable\", \"arg1\"].",
- "DL3026" => "Use official Docker images when possible, or document why unofficial is needed.",
+ "DL3026" => {
+ "Use official Docker images when possible, or document why unofficial is needed."
+ }
"DL3027" => "Remove 'apt' and use 'apt-get' for scripting in Dockerfiles.",
"DL3028" => "Pin gem versions: 'gem install package:version'.",
"DL3029" => "Specify --platform explicitly for multi-arch builds.",
@@ -146,11 +165,15 @@ impl HadolintTool {
"DL3039" => "Add 'zypper clean' after zypper install.",
"DL3040" => "Add 'dnf clean all && rm -rf /var/cache/dnf' after dnf install.",
"DL3041" => "Add 'microdnf clean all' after microdnf install.",
- "DL3042" => "Avoid pip cache in builds. Use '--no-cache-dir' or set PIP_NO_CACHE_DIR=1.",
+ "DL3042" => {
+ "Avoid pip cache in builds. Use '--no-cache-dir' or set PIP_NO_CACHE_DIR=1."
+ }
"DL3044" => "Only use 'HEALTHCHECK' once per Dockerfile, or it won't work correctly.",
"DL3045" => "Use COPY instead of ADD for local files.",
"DL3046" => "Use 'useradd' instead of 'adduser' for better compatibility.",
- "DL3047" => "Add 'wget --progress=dot:giga' or 'curl --progress-bar' to show progress during download.",
+ "DL3047" => {
+ "Add 'wget --progress=dot:giga' or 'curl --progress-bar' to show progress during download."
+ }
"DL3048" => "Prefer setting flag with 'SHELL' instruction instead of inline in RUN.",
"DL3049" => "Add a 'LABEL maintainer=\"name\"' for documentation.",
"DL3050" => "Add 'LABEL version=\"x.y\"' for versioning.",
@@ -170,7 +193,9 @@ impl HadolintTool {
"DL4001" => "Use wget or curl instead of ADD for downloading from URLs.",
"DL4003" => "Use 'ENTRYPOINT' and 'CMD' together properly for container startup.",
"DL4005" => "Prefer JSON notation for SHELL: SHELL [\"/bin/bash\", \"-c\"].",
- "DL4006" => "Add 'SHELL [\"/bin/bash\", \"-o\", \"pipefail\", \"-c\"]' before RUN with pipes.",
+ "DL4006" => {
+ "Add 'SHELL [\"/bin/bash\", \"-o\", \"pipefail\", \"-c\"]' before RUN with pipes."
+ }
_ if code.starts_with("SC") => "See ShellCheck wiki for shell scripting fix.",
_ => "Review the rule documentation for specific guidance.",
}
@@ -192,40 +217,53 @@ impl HadolintTool {
/// Format result optimized for agent decision-making
fn format_result(result: &LintResult, filename: &str) -> String {
// Categorize and enrich failures
- let enriched_failures: Vec = result.failures.iter().map(|f| {
- let code = f.code.as_str();
- let category = Self::get_rule_category(code);
- let priority = Self::get_priority(f.severity, category);
-
- json!({
- "code": code,
- "severity": format!("{:?}", f.severity).to_lowercase(),
- "priority": priority,
- "category": category,
- "message": f.message,
- "line": f.line,
- "column": f.column,
- "fix": Self::get_fix_recommendation(code),
- "docs": Self::get_rule_url(code),
+ let enriched_failures: Vec = result
+ .failures
+ .iter()
+ .map(|f| {
+ let code = f.code.as_str();
+ let category = Self::get_rule_category(code);
+ let priority = Self::get_priority(f.severity, category);
+
+ json!({
+ "code": code,
+ "severity": format!("{:?}", f.severity).to_lowercase(),
+ "priority": priority,
+ "category": category,
+ "message": f.message,
+ "line": f.line,
+ "column": f.column,
+ "fix": Self::get_fix_recommendation(code),
+ "docs": Self::get_rule_url(code),
+ })
})
- }).collect();
+ .collect();
// Group by priority for agent decision ordering
- let critical: Vec<_> = enriched_failures.iter()
+ let critical: Vec<_> = enriched_failures
+ .iter()
.filter(|f| f["priority"] == "critical")
- .cloned().collect();
- let high: Vec<_> = enriched_failures.iter()
+ .cloned()
+ .collect();
+ let high: Vec<_> = enriched_failures
+ .iter()
.filter(|f| f["priority"] == "high")
- .cloned().collect();
- let medium: Vec<_> = enriched_failures.iter()
+ .cloned()
+ .collect();
+ let medium: Vec<_> = enriched_failures
+ .iter()
.filter(|f| f["priority"] == "medium")
- .cloned().collect();
- let low: Vec<_> = enriched_failures.iter()
+ .cloned()
+ .collect();
+ let low: Vec<_> = enriched_failures
+ .iter()
.filter(|f| f["priority"] == "low")
- .cloned().collect();
+ .cloned()
+ .collect();
// Group by category for thematic fixes
- let mut by_category: std::collections::HashMap<&str, Vec<_>> = std::collections::HashMap::new();
+ let mut by_category: std::collections::HashMap<&str, Vec<_>> =
+ std::collections::HashMap::new();
for f in &enriched_failures {
let cat = f["category"].as_str().unwrap_or("other");
by_category.entry(cat).or_default().push(f.clone());
@@ -276,14 +314,18 @@ impl HadolintTool {
// Add quick fixes summary for agent
if !enriched_failures.is_empty() {
- let quick_fixes: Vec = enriched_failures.iter()
+ let quick_fixes: Vec = enriched_failures
+ .iter()
.filter(|f| f["priority"] == "critical" || f["priority"] == "high")
.take(5)
- .map(|f| format!("Line {}: {} - {}",
- f["line"],
- f["code"].as_str().unwrap_or(""),
- f["fix"].as_str().unwrap_or("")
- ))
+ .map(|f| {
+ format!(
+ "Line {}: {} - {}",
+ f["line"],
+ f["code"].as_str().unwrap_or(""),
+ f["fix"].as_str().unwrap_or("")
+ )
+ })
.collect();
if !quick_fixes.is_empty() {
@@ -425,7 +467,11 @@ mod tests {
// Check issues have fix recommendations
let issues = collect_all_issues(&parsed);
- assert!(issues.iter().all(|i| i["fix"].is_string() && !i["fix"].as_str().unwrap().is_empty()));
+ assert!(
+ issues
+ .iter()
+ .all(|i| i["fix"].is_string() && !i["fix"].as_str().unwrap().is_empty())
+ );
}
#[tokio::test]
@@ -470,7 +516,11 @@ mod tests {
let temp = temp_dir().join("hadolint_test");
fs::create_dir_all(&temp).unwrap();
let dockerfile = temp.join("Dockerfile");
- fs::write(&dockerfile, "FROM node:18-alpine\nWORKDIR /app\nCOPY . .\nCMD [\"node\", \"app.js\"]").unwrap();
+ fs::write(
+ &dockerfile,
+ "FROM node:18-alpine\nWORKDIR /app\nCOPY . .\nCMD [\"node\", \"app.js\"]",
+ )
+ .unwrap();
let tool = HadolintTool::new(temp.clone());
let args = HadolintArgs {
@@ -525,8 +575,18 @@ CMD ["node", "dist/index.js"]
// Should have decision context
assert!(parsed["decision_context"].is_string());
// Should not have critical or high priority issues
- assert_eq!(parsed["summary"]["by_priority"]["critical"].as_u64().unwrap_or(99), 0);
- assert_eq!(parsed["summary"]["by_priority"]["high"].as_u64().unwrap_or(99), 0);
+ assert_eq!(
+ parsed["summary"]["by_priority"]["critical"]
+ .as_u64()
+ .unwrap_or(99),
+ 0
+ );
+ assert_eq!(
+ parsed["summary"]["by_priority"]["high"]
+ .as_u64()
+ .unwrap_or(99),
+ 0
+ );
}
#[tokio::test]
@@ -571,8 +631,15 @@ CMD ["node", "dist/index.js"]
let parsed: serde_json::Value = serde_json::from_str(&result).unwrap();
// Should have quick_fixes for high priority issues
- if parsed["summary"]["by_priority"]["high"].as_u64().unwrap_or(0) > 0
- || parsed["summary"]["by_priority"]["critical"].as_u64().unwrap_or(0) > 0 {
+ if parsed["summary"]["by_priority"]["high"]
+ .as_u64()
+ .unwrap_or(0)
+ > 0
+ || parsed["summary"]["by_priority"]["critical"]
+ .as_u64()
+ .unwrap_or(0)
+ > 0
+ {
assert!(parsed["quick_fixes"].is_array());
}
}
diff --git a/src/agent/tools/mod.rs b/src/agent/tools/mod.rs
index 8c5c6680..86298952 100644
--- a/src/agent/tools/mod.rs
+++ b/src/agent/tools/mod.rs
@@ -19,6 +19,7 @@
//!
//! ### Linting
//! - `HadolintTool` - Native Dockerfile linting (best practices, security)
+//! - `DclintTool` - Native Docker Compose linting (best practices, style, security)
//!
//! ### Diagnostics
//! - `DiagnosticsTool` - Check for code errors via IDE/LSP or language-specific commands
@@ -38,6 +39,7 @@
//! - `PlanListTool` - List all available plan files
//!
mod analyze;
+mod dclint;
mod diagnostics;
mod file_ops;
mod hadolint;
@@ -50,6 +52,7 @@ mod truncation;
pub use truncation::TruncationLimits;
pub use analyze::AnalyzeTool;
+pub use dclint::DclintTool;
pub use diagnostics::DiagnosticsTool;
pub use file_ops::{ListDirectoryTool, ReadFileTool, WriteFileTool, WriteFilesTool};
pub use hadolint::HadolintTool;
diff --git a/src/agent/tools/plan.rs b/src/agent/tools/plan.rs
index e79c6192..12e525ee 100644
--- a/src/agent/tools/plan.rs
+++ b/src/agent/tools/plan.rs
@@ -30,10 +30,10 @@ use std::path::PathBuf;
/// Task status in a plan file
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TaskStatus {
- Pending, // [ ]
- InProgress, // [~]
- Done, // [x]
- Failed, // [!]
+ Pending, // [ ]
+ InProgress, // [~]
+ Done, // [x]
+ Failed, // [!]
}
impl TaskStatus {
@@ -60,10 +60,10 @@ impl TaskStatus {
/// A task parsed from a plan file
#[derive(Debug, Clone)]
pub struct PlanTask {
- pub index: usize, // 1-based index
+ pub index: usize, // 1-based index
pub status: TaskStatus,
pub description: String,
- pub line_number: usize, // Line number in file (1-based)
+ pub line_number: usize, // Line number in file (1-based)
}
// ============================================================================
@@ -103,7 +103,12 @@ fn parse_plan_tasks(content: &str) -> Vec {
}
/// Update a task's status in the plan file content
-fn update_task_status(content: &str, task_index: usize, new_status: TaskStatus, note: Option<&str>) -> Option {
+fn update_task_status(
+ content: &str,
+ task_index: usize,
+ new_status: TaskStatus,
+ note: Option<&str>,
+) -> Option {
let task_regex = Regex::new(r"^(\s*)-\s*\[[ x~!]\]\s*(.+)$").unwrap();
let mut current_index = 0;
let mut lines: Vec = content.lines().map(String::from).collect();
@@ -120,7 +125,13 @@ fn update_task_status(content: &str, task_index: usize, new_status: TaskStatus,
// Build new line with updated status
let new_line = if new_status == TaskStatus::Failed {
let fail_note = note.unwrap_or("unknown reason");
- format!("{}- {} {} (FAILED: {})", indent, new_status.marker(), desc, fail_note)
+ format!(
+ "{}- {} {} (FAILED: {})",
+ indent,
+ new_status.marker(),
+ desc,
+ fail_note
+ )
} else {
format!("{}- {} {}", indent, new_status.marker(), desc)
};
@@ -233,7 +244,8 @@ The task status markers are:
let tasks = parse_plan_tasks(&args.content);
if tasks.is_empty() {
return Err(PlanCreateError(
- "Plan must contain at least one task with format: '- [ ] Task description'".to_string()
+ "Plan must contain at least one task with format: '- [ ] Task description'"
+ .to_string(),
));
}
@@ -263,7 +275,8 @@ The task status markers are:
.map_err(|e| PlanCreateError(format!("Failed to write plan file: {}", e)))?;
// Get relative path for display
- let rel_path = file_path.strip_prefix(&self.project_path)
+ let rel_path = file_path
+ .strip_prefix(&self.project_path)
.map(|p| p.display().to_string())
.unwrap_or_else(|_| file_path.display().to_string());
@@ -339,7 +352,8 @@ This tool:
After executing the task, use `plan_update` to mark it as done or failed.
-Returns null task if all tasks are complete."#.to_string(),
+Returns null task if all tasks are complete."#
+ .to_string(),
parameters: json!({
"type": "object",
"properties": {
@@ -372,17 +386,28 @@ Returns null task if all tasks are complete."#.to_string(),
match pending_task {
Some(task) => {
// Update task to in-progress
- let updated_content = update_task_status(&content, task.index, TaskStatus::InProgress, None)
- .ok_or_else(|| PlanNextError("Failed to update task status".to_string()))?;
+ let updated_content =
+ update_task_status(&content, task.index, TaskStatus::InProgress, None)
+ .ok_or_else(|| PlanNextError("Failed to update task status".to_string()))?;
// Write updated content
fs::write(&file_path, &updated_content)
.map_err(|e| PlanNextError(format!("Failed to write plan file: {}", e)))?;
// Count task states
- let done_count = tasks.iter().filter(|t| t.status == TaskStatus::Done).count();
- let pending_count = tasks.iter().filter(|t| t.status == TaskStatus::Pending).count() - 1; // -1 for current
- let failed_count = tasks.iter().filter(|t| t.status == TaskStatus::Failed).count();
+ let done_count = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::Done)
+ .count();
+ let pending_count = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::Pending)
+ .count()
+ - 1; // -1 for current
+ let failed_count = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::Failed)
+ .count();
let result = json!({
"has_task": true,
@@ -401,9 +426,18 @@ Returns null task if all tasks are complete."#.to_string(),
}
None => {
// No pending tasks - check if all done
- let done_count = tasks.iter().filter(|t| t.status == TaskStatus::Done).count();
- let failed_count = tasks.iter().filter(|t| t.status == TaskStatus::Failed).count();
- let in_progress = tasks.iter().filter(|t| t.status == TaskStatus::InProgress).count();
+ let done_count = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::Done)
+ .count();
+ let failed_count = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::Failed)
+ .count();
+ let in_progress = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::InProgress)
+ .count();
let result = json!({
"has_task": false,
@@ -484,7 +518,8 @@ Use this after completing or failing a task to update its status:
- "failed" - Mark task as failed `[!]` (include a note explaining why)
- "pending" - Reset task to pending `[ ]`
-After marking a task done, call `plan_next` to get the next task."#.to_string(),
+After marking a task done, call `plan_next` to get the next task."#
+ .to_string(),
parameters: json!({
"type": "object",
"properties": {
@@ -523,29 +558,27 @@ After marking a task done, call `plan_next` to get the next task."#.to_string(),
"done" => TaskStatus::Done,
"failed" => TaskStatus::Failed,
"pending" => TaskStatus::Pending,
- _ => return Err(PlanUpdateError(format!(
- "Invalid status '{}'. Use: done, failed, or pending",
- args.status
- ))),
+ _ => {
+ return Err(PlanUpdateError(format!(
+ "Invalid status '{}'. Use: done, failed, or pending",
+ args.status
+ )));
+ }
};
// Require note for failed status
if new_status == TaskStatus::Failed && args.note.is_none() {
return Err(PlanUpdateError(
- "A note is required when marking a task as failed".to_string()
+ "A note is required when marking a task as failed".to_string(),
));
}
// Update task status
- let updated_content = update_task_status(
- &content,
- args.task_index,
- new_status,
- args.note.as_deref(),
- ).ok_or_else(|| PlanUpdateError(format!(
- "Task {} not found in plan",
- args.task_index
- )))?;
+ let updated_content =
+ update_task_status(&content, args.task_index, new_status, args.note.as_deref())
+ .ok_or_else(|| {
+ PlanUpdateError(format!("Task {} not found in plan", args.task_index))
+ })?;
// Write updated content
fs::write(&file_path, &updated_content)
@@ -553,9 +586,18 @@ After marking a task done, call `plan_next` to get the next task."#.to_string(),
// Parse updated tasks for summary
let tasks = parse_plan_tasks(&updated_content);
- let done_count = tasks.iter().filter(|t| t.status == TaskStatus::Done).count();
- let pending_count = tasks.iter().filter(|t| t.status == TaskStatus::Pending).count();
- let failed_count = tasks.iter().filter(|t| t.status == TaskStatus::Failed).count();
+ let done_count = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::Done)
+ .count();
+ let pending_count = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::Pending)
+ .count();
+ let failed_count = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::Failed)
+ .count();
let result = json!({
"success": true,
@@ -622,7 +664,8 @@ impl Tool for PlanListTool {
Shows each plan with:
- Filename and path
- Task counts (done/pending/failed)
-- Overall status"#.to_string(),
+- Overall status"#
+ .to_string(),
parameters: json!({
"type": "object",
"properties": {
@@ -659,10 +702,22 @@ Shows each plan with:
if path.extension().map(|e| e == "md").unwrap_or(false) {
if let Ok(content) = fs::read_to_string(&path) {
let tasks = parse_plan_tasks(&content);
- let done = tasks.iter().filter(|t| t.status == TaskStatus::Done).count();
- let pending = tasks.iter().filter(|t| t.status == TaskStatus::Pending).count();
- let in_progress = tasks.iter().filter(|t| t.status == TaskStatus::InProgress).count();
- let failed = tasks.iter().filter(|t| t.status == TaskStatus::Failed).count();
+ let done = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::Done)
+ .count();
+ let pending = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::Pending)
+ .count();
+ let in_progress = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::InProgress)
+ .count();
+ let failed = tasks
+ .iter()
+ .filter(|t| t.status == TaskStatus::Failed)
+ .count();
// Apply filter
let include = match filter {
@@ -672,7 +727,8 @@ Shows each plan with:
};
if include {
- let rel_path = path.strip_prefix(&self.project_path)
+ let rel_path = path
+ .strip_prefix(&self.project_path)
.map(|p| p.display().to_string())
.unwrap_or_else(|_| path.display().to_string());
diff --git a/src/agent/tools/security.rs b/src/agent/tools/security.rs
index 7ddc545f..c1034e74 100644
--- a/src/agent/tools/security.rs
+++ b/src/agent/tools/security.rs
@@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize};
use serde_json::json;
use std::path::PathBuf;
-use crate::analyzer::security::turbo::{TurboSecurityAnalyzer, TurboConfig, ScanMode};
+use crate::analyzer::security::turbo::{ScanMode, TurboConfig, TurboSecurityAnalyzer};
// ============================================================================
// Security Scan Tool
@@ -82,8 +82,9 @@ impl Tool for SecurityScanTool {
let scanner = TurboSecurityAnalyzer::new(config)
.map_err(|e| SecurityScanError(format!("Failed to create scanner: {}", e)))?;
-
- let report = scanner.analyze_project(&path)
+
+ let report = scanner
+ .analyze_project(&path)
.map_err(|e| SecurityScanError(format!("Scan failed: {}", e)))?;
let result = json!({
@@ -145,7 +146,9 @@ impl Tool for VulnerabilitiesTool {
async fn definition(&self, _prompt: String) -> ToolDefinition {
ToolDefinition {
name: Self::NAME.to_string(),
- description: "Check the project's dependencies for known security vulnerabilities (CVEs).".to_string(),
+ description:
+ "Check the project's dependencies for known security vulnerabilities (CVEs)."
+ .to_string(),
parameters: json!({
"type": "object",
"properties": {
@@ -173,7 +176,8 @@ impl Tool for VulnerabilitiesTool {
return Ok(json!({
"message": "No dependencies found in project",
"total_vulnerabilities": 0
- }).to_string());
+ })
+ .to_string());
}
let checker = crate::analyzer::vulnerability::VulnerabilityChecker::new();
diff --git a/src/agent/tools/shell.rs b/src/agent/tools/shell.rs
index a004ad18..40bf525b 100644
--- a/src/agent/tools/shell.rs
+++ b/src/agent/tools/shell.rs
@@ -15,9 +15,9 @@
//! - Middle content is summarized with line count
//! - Long lines (>2000 chars) are truncated
-use crate::agent::ui::confirmation::{confirm_shell_command, AllowedCommands, ConfirmationResult};
+use super::truncation::{TruncationLimits, truncate_shell_output};
+use crate::agent::ui::confirmation::{AllowedCommands, ConfirmationResult, confirm_shell_command};
use crate::agent::ui::shell_output::StreamingShellOutput;
-use super::truncation::{truncate_shell_output, TruncationLimits};
use rig::completion::ToolDefinition;
use rig::tool::Tool;
use serde::Deserialize;
@@ -136,7 +136,10 @@ impl ShellTool {
}
/// Create with shared allowed commands state (for session persistence)
- pub fn with_allowed_commands(project_path: PathBuf, allowed_commands: Arc) -> Self {
+ pub fn with_allowed_commands(
+ project_path: PathBuf,
+ allowed_commands: Arc,
+ ) -> Self {
Self {
project_path,
allowed_commands,
@@ -159,9 +162,9 @@ impl ShellTool {
fn is_command_allowed(&self, command: &str) -> bool {
let trimmed = command.trim();
- ALLOWED_COMMANDS.iter().any(|allowed| {
- trimmed.starts_with(allowed) || trimmed == *allowed
- })
+ ALLOWED_COMMANDS
+ .iter()
+ .any(|allowed| trimmed.starts_with(allowed) || trimmed == *allowed)
}
/// Check if a command is read-only (safe for plan mode)
@@ -174,7 +177,20 @@ impl ShellTool {
}
// Block dangerous commands explicitly
- let dangerous = ["rm ", "rm\t", "rmdir", "mv ", "cp ", "mkdir ", "touch ", "chmod ", "chown ", "npm install", "yarn install", "pnpm install"];
+ let dangerous = [
+ "rm ",
+ "rm\t",
+ "rmdir",
+ "mv ",
+ "cp ",
+ "mkdir ",
+ "touch ",
+ "chmod ",
+ "chown ",
+ "npm install",
+ "yarn install",
+ "pnpm install",
+ ];
for d in dangerous {
if trimmed.contains(d) {
return false;
@@ -186,9 +202,7 @@ impl ShellTool {
let separators = ["&&", "||", "|", ";"];
let mut parts: Vec<&str> = vec![trimmed];
for sep in separators {
- parts = parts.iter()
- .flat_map(|p| p.split(sep))
- .collect();
+ parts = parts.iter().flat_map(|p| p.split(sep)).collect();
}
// Each part must be a read-only command
@@ -204,9 +218,9 @@ impl ShellTool {
}
// Check if this part starts with a read-only command
- let is_allowed = READ_ONLY_COMMANDS.iter().any(|allowed| {
- part.starts_with(allowed) || part == *allowed
- });
+ let is_allowed = READ_ONLY_COMMANDS
+ .iter()
+ .any(|allowed| part.starts_with(allowed) || part == *allowed);
if !is_allowed {
return false;
@@ -217,7 +231,9 @@ impl ShellTool {
}
fn validate_working_dir(&self, dir: &Option) -> Result {
- let canonical_project = self.project_path.canonicalize()
+ let canonical_project = self
+ .project_path
+ .canonicalize()
.map_err(|e| ShellError(format!("Invalid project path: {}", e)))?;
let target = match dir {
@@ -232,11 +248,14 @@ impl ShellTool {
None => self.project_path.clone(),
};
- let canonical_target = target.canonicalize()
+ let canonical_target = target
+ .canonicalize()
.map_err(|e| ShellError(format!("Invalid working directory: {}", e)))?;
if !canonical_target.starts_with(&canonical_project) {
- return Err(ShellError("Working directory must be within project".to_string()));
+ return Err(ShellError(
+ "Working directory must be within project".to_string(),
+ ));
}
Ok(canonical_target)
@@ -321,8 +340,8 @@ Use this to validate generated configurations:
let timeout_secs = args.timeout_secs.unwrap_or(60).min(300);
// Check if confirmation is needed
- let needs_confirmation = self.require_confirmation
- && !self.allowed_commands.is_allowed(&args.command);
+ let needs_confirmation =
+ self.require_confirmation && !self.allowed_commands.is_allowed(&args.command);
if needs_confirmation {
// Show confirmation prompt
diff --git a/src/agent/tools/terraform.rs b/src/agent/tools/terraform.rs
index 2a622415..a3e62dbb 100644
--- a/src/agent/tools/terraform.rs
+++ b/src/agent/tools/terraform.rs
@@ -43,7 +43,10 @@ pub fn get_installation_instructions() -> (&'static str, &'static str, Vec<&'sta
(
"macOS",
"Install Terraform using Homebrew",
- vec!["brew tap hashicorp/tap", "brew install hashicorp/tap/terraform"],
+ vec![
+ "brew tap hashicorp/tap",
+ "brew install hashicorp/tap/terraform",
+ ],
)
}
@@ -428,7 +431,12 @@ impl TerraformValidateTool {
}
}
- fn format_result(&self, validation_output: &str, success: bool, init_output: Option<&str>) -> String {
+ fn format_result(
+ &self,
+ validation_output: &str,
+ success: bool,
+ init_output: Option<&str>,
+ ) -> String {
// Try to parse JSON output from terraform validate -json
if let Ok(tf_json) = serde_json::from_str::(validation_output) {
let valid = tf_json["valid"].as_bool().unwrap_or(false);
diff --git a/src/agent/ui/autocomplete.rs b/src/agent/ui/autocomplete.rs
index 131e3a02..e3bc70ab 100644
--- a/src/agent/ui/autocomplete.rs
+++ b/src/agent/ui/autocomplete.rs
@@ -4,8 +4,8 @@
//! - Slash command suggestions when user types "/"
//! - File path suggestions when user types "@"
-use inquire::autocompletion::{Autocomplete, Replacement};
use crate::agent::commands::SLASH_COMMANDS;
+use inquire::autocompletion::{Autocomplete, Replacement};
use std::path::PathBuf;
/// Autocomplete provider for slash commands and file references
@@ -58,7 +58,13 @@ impl SlashCommandAutocomplete {
for (i, c) in input.char_indices().rev() {
if c == '@' {
// Check if it's at the start or after a space
- if i == 0 || input.chars().nth(i - 1).map(|c| c.is_whitespace()).unwrap_or(false) {
+ if i == 0
+ || input
+ .chars()
+ .nth(i - 1)
+ .map(|c| c.is_whitespace())
+ .unwrap_or(false)
+ {
return Some(i);
}
}
@@ -71,7 +77,10 @@ impl SlashCommandAutocomplete {
if let Some(at_pos) = self.find_at_trigger(input) {
let after_at = &input[at_pos + 1..];
// Get everything until next space or end
- let filter: String = after_at.chars().take_while(|c| !c.is_whitespace()).collect();
+ let filter: String = after_at
+ .chars()
+ .take_while(|c| !c.is_whitespace())
+ .collect();
return Some(filter);
}
None
@@ -83,7 +92,13 @@ impl SlashCommandAutocomplete {
let filter_lower = filter.to_lowercase();
// Walk directory tree (limited depth)
- self.walk_dir(&self.project_path.clone(), &filter_lower, &mut results, 0, 4);
+ self.walk_dir(
+ &self.project_path.clone(),
+ &filter_lower,
+ &mut results,
+ 0,
+ 4,
+ );
// Sort by relevance (exact matches first, then by length)
results.sort_by(|a, b| {
@@ -101,13 +116,30 @@ impl SlashCommandAutocomplete {
}
/// Recursively walk directory for matching files
- fn walk_dir(&self, dir: &PathBuf, filter: &str, results: &mut Vec, depth: usize, max_depth: usize) {
+ fn walk_dir(
+ &self,
+ dir: &PathBuf,
+ filter: &str,
+ results: &mut Vec,
+ depth: usize,
+ max_depth: usize,
+ ) {
if depth > max_depth || results.len() >= 20 {
return;
}
// Skip common non-relevant directories
- let skip_dirs = ["node_modules", ".git", "target", "__pycache__", ".venv", "venv", "dist", "build", ".next"];
+ let skip_dirs = [
+ "node_modules",
+ ".git",
+ "target",
+ "__pycache__",
+ ".venv",
+ "venv",
+ "dist",
+ "build",
+ ".next",
+ ];
let entries = match std::fs::read_dir(dir) {
Ok(e) => e,
@@ -119,7 +151,10 @@ impl SlashCommandAutocomplete {
let file_name = entry.file_name().to_string_lossy().to_string();
// Skip hidden files/dirs (except .env, .gitignore, etc.)
- if file_name.starts_with('.') && !file_name.starts_with(".env") && !file_name.starts_with(".git") {
+ if file_name.starts_with('.')
+ && !file_name.starts_with(".env")
+ && !file_name.starts_with(".git")
+ {
continue;
}
@@ -129,12 +164,16 @@ impl SlashCommandAutocomplete {
}
} else {
// Get relative path from project root
- let rel_path = path.strip_prefix(&self.project_path)
+ let rel_path = path
+ .strip_prefix(&self.project_path)
.map(|p| p.to_string_lossy().to_string())
.unwrap_or_else(|_| file_name.clone());
// Match against filter
- if filter.is_empty() || rel_path.to_lowercase().contains(filter) || file_name.to_lowercase().contains(filter) {
+ if filter.is_empty()
+ || rel_path.to_lowercase().contains(filter)
+ || file_name.to_lowercase().contains(filter)
+ {
results.push(rel_path);
}
}
@@ -149,7 +188,8 @@ impl Autocomplete for SlashCommandAutocomplete {
self.mode = AutocompleteMode::File;
self.cached_files = self.search_files(&filter);
- let suggestions: Vec = self.cached_files
+ let suggestions: Vec = self
+ .cached_files
.iter()
.map(|f| format!("@{}", f))
.collect();
@@ -163,20 +203,28 @@ impl Autocomplete for SlashCommandAutocomplete {
let filter = input.trim_start_matches('/').to_lowercase();
// Store the command names for use in get_completion
- self.filtered_commands = SLASH_COMMANDS.iter()
+ self.filtered_commands = SLASH_COMMANDS
+ .iter()
.filter(|cmd| {
- cmd.name.to_lowercase().starts_with(&filter) ||
- cmd.alias.map(|a| a.to_lowercase().starts_with(&filter)).unwrap_or(false)
+ cmd.name.to_lowercase().starts_with(&filter)
+ || cmd
+ .alias
+ .map(|a| a.to_lowercase().starts_with(&filter))
+ .unwrap_or(false)
})
.take(6)
.map(|cmd| cmd.name)
.collect();
// Return formatted suggestions for display
- let suggestions: Vec = SLASH_COMMANDS.iter()
+ let suggestions: Vec = SLASH_COMMANDS
+ .iter()
.filter(|cmd| {
- cmd.name.to_lowercase().starts_with(&filter) ||
- cmd.alias.map(|a| a.to_lowercase().starts_with(&filter)).unwrap_or(false)
+ cmd.name.to_lowercase().starts_with(&filter)
+ || cmd
+ .alias
+ .map(|a| a.to_lowercase().starts_with(&filter))
+ .unwrap_or(false)
})
.take(6)
.map(|cmd| format!("/{:<12} {}", cmd.name, cmd.description))
diff --git a/src/agent/ui/colors.rs b/src/agent/ui/colors.rs
index 216d4bd6..0e8efcf2 100644
--- a/src/agent/ui/colors.rs
+++ b/src/agent/ui/colors.rs
@@ -60,13 +60,13 @@ pub mod ansi {
pub const SUCCESS: &str = "\x1b[38;5;114m"; // Green for success
// Hadolint/Docker specific colors (teal/docker-blue theme)
- pub const DOCKER_BLUE: &str = "\x1b[38;5;39m"; // Docker brand blue
- pub const TEAL: &str = "\x1b[38;5;30m"; // Teal for hadolint
- pub const CRITICAL: &str = "\x1b[38;5;196m"; // Bright red
- pub const HIGH: &str = "\x1b[38;5;208m"; // Orange
- pub const MEDIUM: &str = "\x1b[38;5;220m"; // Yellow
- pub const LOW: &str = "\x1b[38;5;114m"; // Green
- pub const INFO_BLUE: &str = "\x1b[38;5;75m"; // Light blue for info
+ pub const DOCKER_BLUE: &str = "\x1b[38;5;39m"; // Docker brand blue
+ pub const TEAL: &str = "\x1b[38;5;30m"; // Teal for hadolint
+ pub const CRITICAL: &str = "\x1b[38;5;196m"; // Bright red
+ pub const HIGH: &str = "\x1b[38;5;208m"; // Orange
+ pub const MEDIUM: &str = "\x1b[38;5;220m"; // Yellow
+ pub const LOW: &str = "\x1b[38;5;114m"; // Green
+ pub const INFO_BLUE: &str = "\x1b[38;5;75m"; // Light blue for info
}
/// Format a tool name for display
@@ -96,11 +96,7 @@ pub fn format_elapsed(seconds: u64) -> String {
/// Format a thinking/reasoning message
pub fn format_thinking(subject: &str) -> String {
- format!(
- "{} {}",
- icons::THINKING,
- subject.cyan().italic()
- )
+ format!("{} {}", icons::THINKING, subject.cyan().italic())
}
/// Format an info message
diff --git a/src/agent/ui/confirmation.rs b/src/agent/ui/confirmation.rs
index defe4498..edafc1f5 100644
--- a/src/agent/ui/confirmation.rs
+++ b/src/agent/ui/confirmation.rs
@@ -72,7 +72,15 @@ fn extract_command_prefix(command: &str) -> String {
}
// For compound commands like "docker build", "npm run", use first two words
- let compound_commands = ["docker", "terraform", "helm", "kubectl", "npm", "cargo", "go"];
+ let compound_commands = [
+ "docker",
+ "terraform",
+ "helm",
+ "kubectl",
+ "npm",
+ "cargo",
+ "go",
+ ];
if parts.len() >= 2 && compound_commands.contains(&parts[0]) {
format!("{} {}", parts[0], parts[1])
} else {
@@ -137,10 +145,7 @@ fn display_command_box(command: &str, working_dir: &str) {
/// 1. Yes - proceed once
/// 2. Yes, and don't ask again for this command type
/// 3. Type feedback to tell the agent what to do differently
-pub fn confirm_shell_command(
- command: &str,
- working_dir: &str,
-) -> ConfirmationResult {
+pub fn confirm_shell_command(command: &str, working_dir: &str) -> ConfirmationResult {
display_command_box(command, working_dir);
let prefix = extract_command_prefix(command);
@@ -151,7 +156,10 @@ pub fn confirm_shell_command(
let options = vec![
format!("Yes"),
- format!("Yes, and don't ask again for `{}` commands in {}", prefix, short_dir),
+ format!(
+ "Yes, and don't ask again for `{}` commands in {}",
+ prefix, short_dir
+ ),
format!("Type here to tell Syncable Agent what to do differently"),
];
@@ -196,7 +204,10 @@ mod tests {
#[test]
fn test_extract_command_prefix() {
- assert_eq!(extract_command_prefix("docker build -t test ."), "docker build");
+ assert_eq!(
+ extract_command_prefix("docker build -t test ."),
+ "docker build"
+ );
assert_eq!(extract_command_prefix("npm run test"), "npm run");
assert_eq!(extract_command_prefix("cargo build"), "cargo build");
assert_eq!(extract_command_prefix("make"), "make");
diff --git a/src/agent/ui/diff.rs b/src/agent/ui/diff.rs
index 18623090..15321154 100644
--- a/src/agent/ui/diff.rs
+++ b/src/agent/ui/diff.rs
@@ -43,7 +43,9 @@ pub fn render_diff(old_content: &str, new_content: &str, filename: &str) {
let header = format!(" {} ", filename);
let header_len = header.len();
let left_dashes = (inner_width.saturating_sub(header_len)) / 2;
- let right_dashes = inner_width.saturating_sub(header_len).saturating_sub(left_dashes);
+ let right_dashes = inner_width
+ .saturating_sub(header_len)
+ .saturating_sub(left_dashes);
println!(
"{}{}{}{}{}",
@@ -129,7 +131,9 @@ pub fn render_new_file(content: &str, filename: &str) {
let header = format!(" {} (new file) ", filename);
let header_len = header.len();
let left_dashes = (inner_width.saturating_sub(header_len)) / 2;
- let right_dashes = inner_width.saturating_sub(header_len).saturating_sub(left_dashes);
+ let right_dashes = inner_width
+ .saturating_sub(header_len)
+ .saturating_sub(left_dashes);
println!(
"{}{}{}{}{}",
diff --git a/src/agent/ui/hadolint_display.rs b/src/agent/ui/hadolint_display.rs
index 2dbd2168..5c370657 100644
--- a/src/agent/ui/hadolint_display.rs
+++ b/src/agent/ui/hadolint_display.rs
@@ -151,7 +151,13 @@ impl HadolintDisplay {
}
// Critical and High priority issues with details
- Self::print_priority_section(&mut handle, result, "critical", "Critical Issues", ansi::CRITICAL);
+ Self::print_priority_section(
+ &mut handle,
+ result,
+ "critical",
+ "Critical Issues",
+ ansi::CRITICAL,
+ );
Self::print_priority_section(&mut handle, result, "high", "High Priority", ansi::HIGH);
// Optionally show medium (collapsed)
@@ -228,13 +234,7 @@ impl HadolintDisplay {
// Show fix recommendation
if let Some(fix) = issue["fix"].as_str() {
- let _ = writeln!(
- handle,
- " {}ā {}{}",
- ansi::INFO_BLUE,
- fix,
- ansi::RESET
- );
+ let _ = writeln!(handle, " {}ā {}{}", ansi::INFO_BLUE, fix, ansi::RESET);
}
}
@@ -265,8 +265,12 @@ impl HadolintDisplay {
ansi::RESET
)
} else {
- let critical = parsed["summary"]["by_priority"]["critical"].as_u64().unwrap_or(0);
- let high = parsed["summary"]["by_priority"]["high"].as_u64().unwrap_or(0);
+ let critical = parsed["summary"]["by_priority"]["critical"]
+ .as_u64()
+ .unwrap_or(0);
+ let high = parsed["summary"]["by_priority"]["high"]
+ .as_u64()
+ .unwrap_or(0);
if critical > 0 {
format!(
diff --git a/src/agent/ui/hooks.rs b/src/agent/ui/hooks.rs
index ca48c686..a01ee3b3 100644
--- a/src/agent/ui/hooks.rs
+++ b/src/agent/ui/hooks.rs
@@ -153,7 +153,8 @@ where
async move {
// Print tool result and get the output info
- let (status_ok, output_lines, is_collapsible) = print_tool_result(&name, &args_str, &result_str);
+ let (status_ok, output_lines, is_collapsible) =
+ print_tool_result(&name, &args_str, &result_str);
// Update state
let mut s = state.lock().await;
@@ -187,12 +188,15 @@ where
// Check if response contains tool calls - if so, any text is "thinking"
// If no tool calls, this is the final response - don't show as thinking
- let has_tool_calls = response.choice.iter().any(|content| {
- matches!(content, AssistantContent::ToolCall(_))
- });
+ let has_tool_calls = response
+ .choice
+ .iter()
+ .any(|content| matches!(content, AssistantContent::ToolCall(_)));
// Extract reasoning content (GPT-5.2 thinking summaries)
- let reasoning_parts: Vec = response.choice.iter()
+ let reasoning_parts: Vec = response
+ .choice
+ .iter()
.filter_map(|content| {
if let AssistantContent::Reasoning(Reasoning { reasoning, .. }) = content {
// Join all reasoning strings
@@ -209,7 +213,9 @@ where
.collect();
// Extract text content from the response (for non-reasoning models)
- let text_parts: Vec = response.choice.iter()
+ let text_parts: Vec = response
+ .choice
+ .iter()
.filter_map(|content| {
if let AssistantContent::Text(text) = content {
// Filter out empty or whitespace-only text
@@ -285,14 +291,22 @@ fn print_agent_thinking(text: &str) {
// Handle code blocks
if trimmed.starts_with("```") {
if in_code_block {
- println!("{} āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā{}", brand::LIGHT_PEACH, brand::RESET);
+ println!(
+ "{} āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā{}",
+ brand::LIGHT_PEACH,
+ brand::RESET
+ );
in_code_block = false;
} else {
let lang = trimmed.strip_prefix("```").unwrap_or("");
let lang_display = if lang.is_empty() { "code" } else { lang };
println!(
"{} āā {}{}{} āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā{}",
- brand::LIGHT_PEACH, brand::CYAN, lang_display, brand::LIGHT_PEACH, brand::RESET
+ brand::LIGHT_PEACH,
+ brand::CYAN,
+ lang_display,
+ brand::LIGHT_PEACH,
+ brand::RESET
);
in_code_block = true;
}
@@ -300,22 +314,46 @@ fn print_agent_thinking(text: &str) {
}
if in_code_block {
- println!("{} ā {}{}{} ā", brand::LIGHT_PEACH, brand::CYAN, line, brand::RESET);
+ println!(
+ "{} ā {}{}{} ā",
+ brand::LIGHT_PEACH,
+ brand::CYAN,
+ line,
+ brand::RESET
+ );
continue;
}
// Handle bullet points
if trimmed.starts_with("- ") || trimmed.starts_with("* ") {
- let content = trimmed.strip_prefix("- ").or_else(|| trimmed.strip_prefix("* ")).unwrap_or(trimmed);
- println!("{} {} {}{}", brand::PEACH, "ā¢", format_thinking_inline(content), brand::RESET);
+ let content = trimmed
+ .strip_prefix("- ")
+ .or_else(|| trimmed.strip_prefix("* "))
+ .unwrap_or(trimmed);
+ println!(
+ "{} {} {}{}",
+ brand::PEACH,
+ "ā¢",
+ format_thinking_inline(content),
+ brand::RESET
+ );
continue;
}
// Handle numbered lists
- if trimmed.chars().next().map(|c| c.is_ascii_digit()).unwrap_or(false)
+ if trimmed
+ .chars()
+ .next()
+ .map(|c| c.is_ascii_digit())
+ .unwrap_or(false)
&& trimmed.chars().nth(1) == Some('.')
{
- println!("{} {}{}", brand::PEACH, format_thinking_inline(trimmed), brand::RESET);
+ println!(
+ "{} {}{}",
+ brand::PEACH,
+ format_thinking_inline(trimmed),
+ brand::RESET
+ );
continue;
}
@@ -326,7 +364,12 @@ fn print_agent_thinking(text: &str) {
// Word wrap long lines
let wrapped = wrap_text(trimmed, 76);
for wrapped_line in wrapped {
- println!("{} {}{}", brand::PEACH, format_thinking_inline(&wrapped_line), brand::RESET);
+ println!(
+ "{} {}{}",
+ brand::PEACH,
+ format_thinking_inline(&wrapped_line),
+ brand::RESET
+ );
}
}
}
@@ -432,7 +475,12 @@ fn print_tool_header(name: &str, args: &str) {
if args_display.is_empty() {
println!("\n{} {}", "ā".yellow(), name.cyan().bold());
} else {
- println!("\n{} {}({})", "ā".yellow(), name.cyan().bold(), args_display.dimmed());
+ println!(
+ "\n{} {}({})",
+ "ā".yellow(),
+ name.cyan().bold(),
+ args_display.dimmed()
+ );
}
// Print running indicator
@@ -449,8 +497,8 @@ fn print_tool_result(name: &str, args: &str, result: &str) -> (bool, Vec
let _ = io::stdout().flush();
// Parse the result - handle potential double-encoding from Rig
- let parsed: Result = serde_json::from_str(result)
- .map(|v: serde_json::Value| {
+ let parsed: Result =
+ serde_json::from_str(result).map(|v: serde_json::Value| {
// If the parsed value is a string, it might be double-encoded JSON
// Try to parse the inner string, but fall back to original if it fails
if let Some(inner_str) = v.as_str() {
@@ -476,7 +524,11 @@ fn print_tool_result(name: &str, args: &str, result: &str) -> (bool, Vec
print!("{}{}", ansi::CURSOR_UP, ansi::CLEAR_LINE);
// Reprint header with green/red dot and args
- let dot = if status_ok { "ā".green() } else { "ā".red() };
+ let dot = if status_ok {
+ "ā".green()
+ } else {
+ "ā".red()
+ };
// Format args for display (same logic as print_tool_header)
let args_parsed: Result = serde_json::from_str(args);
@@ -515,18 +567,27 @@ fn print_tool_result(name: &str, args: &str, result: &str) -> (bool, Vec
}
/// Format args for display based on tool type
-fn format_args_display(name: &str, parsed: &Result) -> String {
+fn format_args_display(
+ name: &str,
+ parsed: &Result,
+) -> String {
match name {
"shell" => {
if let Ok(v) = parsed {
- v.get("command").and_then(|c| c.as_str()).unwrap_or("").to_string()
+ v.get("command")
+ .and_then(|c| c.as_str())
+ .unwrap_or("")
+ .to_string()
} else {
String::new()
}
}
"write_file" => {
if let Ok(v) = parsed {
- v.get("path").and_then(|p| p.as_str()).unwrap_or("").to_string()
+ v.get("path")
+ .and_then(|p| p.as_str())
+ .unwrap_or("")
+ .to_string()
} else {
String::new()
}
@@ -554,14 +615,20 @@ fn format_args_display(name: &str, parsed: &Result {
if let Ok(v) = parsed {
- v.get("path").and_then(|p| p.as_str()).unwrap_or("").to_string()
+ v.get("path")
+ .and_then(|p| p.as_str())
+ .unwrap_or("")
+ .to_string()
} else {
String::new()
}
}
"list_directory" => {
if let Ok(v) = parsed {
- v.get("path").and_then(|p| p.as_str()).unwrap_or(".").to_string()
+ v.get("path")
+ .and_then(|p| p.as_str())
+ .unwrap_or(".")
+ .to_string()
} else {
".".to_string()
}
@@ -571,7 +638,9 @@ fn format_args_display(name: &str, parsed: &Result) -> (bool, Vec) {
+fn format_shell_result(
+ parsed: &Result,
+) -> (bool, Vec) {
if let Ok(v) = parsed {
let success = v.get("success").and_then(|s| s.as_bool()).unwrap_or(false);
let stdout = v.get("stdout").and_then(|s| s.as_str()).unwrap_or("");
@@ -600,7 +669,11 @@ fn format_shell_result(parsed: &Result) ->
}
if lines.is_empty() {
- lines.push(if success { "completed".to_string() } else { "failed".to_string() });
+ lines.push(if success {
+ "completed".to_string()
+ } else {
+ "failed".to_string()
+ });
}
(success, lines)
@@ -610,18 +683,24 @@ fn format_shell_result(parsed: &Result) ->
}
/// Format write file result
-fn format_write_result(parsed: &Result) -> (bool, Vec) {
+fn format_write_result(
+ parsed: &Result,
+) -> (bool, Vec) {
if let Ok(v) = parsed {
let success = v.get("success").and_then(|s| s.as_bool()).unwrap_or(false);
let action = v.get("action").and_then(|a| a.as_str()).unwrap_or("wrote");
- let lines_written = v.get("lines_written")
+ let lines_written = v
+ .get("lines_written")
.or_else(|| v.get("total_lines"))
.and_then(|n| n.as_u64())
.unwrap_or(0);
let files_written = v.get("files_written").and_then(|n| n.as_u64()).unwrap_or(1);
let msg = if files_written > 1 {
- format!("{} {} files ({} lines)", action, files_written, lines_written)
+ format!(
+ "{} {} files ({} lines)",
+ action, files_written, lines_written
+ )
} else {
format!("{} ({} lines)", action, lines_written)
};
@@ -633,11 +712,16 @@ fn format_write_result(parsed: &Result) ->
}
/// Format read file result
-fn format_read_result(parsed: &Result) -> (bool, Vec) {
+fn format_read_result(
+ parsed: &Result,
+) -> (bool, Vec) {
if let Ok(v) = parsed {
// Handle error field
if v.get("error").is_some() {
- let error_msg = v.get("error").and_then(|e| e.as_str()).unwrap_or("file not found");
+ let error_msg = v
+ .get("error")
+ .and_then(|e| e.as_str())
+ .unwrap_or("file not found");
return (false, vec![error_msg.to_string()]);
}
@@ -671,7 +755,9 @@ fn format_read_result(parsed: &Result) ->
}
/// Format list directory result
-fn format_list_result(parsed: &Result) -> (bool, Vec) {
+fn format_list_result(
+ parsed: &Result,
+) -> (bool, Vec) {
if let Ok(v) = parsed {
let entries = v.get("entries").and_then(|e| e.as_array());
@@ -682,7 +768,11 @@ fn format_list_result(parsed: &Result) ->
for entry in entries.iter().take(PREVIEW_LINES + 2) {
let name = entry.get("name").and_then(|n| n.as_str()).unwrap_or("?");
let entry_type = entry.get("type").and_then(|t| t.as_str()).unwrap_or("file");
- let prefix = if entry_type == "directory" { "š" } else { "š" };
+ let prefix = if entry_type == "directory" {
+ "š"
+ } else {
+ "š"
+ };
lines.push(format!("{} {}", prefix, name));
}
// Add count if there are more entries than shown
@@ -702,7 +792,9 @@ fn format_list_result(parsed: &Result) ->
}
/// Format analyze result
-fn format_analyze_result(parsed: &Result) -> (bool, Vec) {
+fn format_analyze_result(
+ parsed: &Result,
+) -> (bool, Vec) {
if let Ok(v) = parsed {
let mut lines = Vec::new();
@@ -741,9 +833,12 @@ fn format_analyze_result(parsed: &Result)
}
/// Format security scan result
-fn format_security_result(parsed: &Result) -> (bool, Vec) {
+fn format_security_result(
+ parsed: &Result,
+) -> (bool, Vec) {
if let Ok(v) = parsed {
- let findings = v.get("findings")
+ let findings = v
+ .get("findings")
.or_else(|| v.get("vulnerabilities"))
.and_then(|f| f.as_array())
.map(|a| a.len())
@@ -760,7 +855,9 @@ fn format_security_result(parsed: &Result)
}
/// Format hadolint result - uses new priority-based format with Docker styling
-fn format_hadolint_result(parsed: &Result) -> (bool, Vec) {
+fn format_hadolint_result(
+ parsed: &Result,
+) -> (bool, Vec) {
if let Ok(v) = parsed {
let success = v.get("success").and_then(|s| s.as_bool()).unwrap_or(true);
let summary = v.get("summary");
@@ -778,7 +875,8 @@ fn format_hadolint_result(parsed: &Result)
if total == 0 {
lines.push(format!(
"{}š³ Dockerfile OK - no issues found{}",
- ansi::SUCCESS, ansi::RESET
+ ansi::SUCCESS,
+ ansi::RESET
));
return (true, lines);
}
@@ -808,13 +906,23 @@ fn format_hadolint_result(parsed: &Result)
// Summary with priority breakdown
let mut priority_parts = Vec::new();
if critical > 0 {
- priority_parts.push(format!("{}š“ {} critical{}", ansi::CRITICAL, critical, ansi::RESET));
+ priority_parts.push(format!(
+ "{}š“ {} critical{}",
+ ansi::CRITICAL,
+ critical,
+ ansi::RESET
+ ));
}
if high > 0 {
priority_parts.push(format!("{}š {} high{}", ansi::HIGH, high, ansi::RESET));
}
if medium > 0 {
- priority_parts.push(format!("{}š” {} medium{}", ansi::MEDIUM, medium, ansi::RESET));
+ priority_parts.push(format!(
+ "{}š” {} medium{}",
+ ansi::MEDIUM,
+ medium,
+ ansi::RESET
+ ));
}
if low > 0 {
priority_parts.push(format!("{}š¢ {} low{}", ansi::LOW, low, ansi::RESET));
@@ -875,7 +983,9 @@ fn format_hadolint_result(parsed: &Result)
};
lines.push(format!(
"{} ā Fix: {}{}",
- ansi::INFO_BLUE, truncated, ansi::RESET
+ ansi::INFO_BLUE,
+ truncated,
+ ansi::RESET
));
}
}
@@ -923,8 +1033,14 @@ fn format_hadolint_issue(issue: &serde_json::Value, icon: &str, color: &str) ->
format!(
"{}{} L{}:{} {}{}[{}]{} {} {}",
- color, icon, line_num, ansi::RESET,
- ansi::DOCKER_BLUE, ansi::BOLD, code, ansi::RESET,
+ color,
+ icon,
+ line_num,
+ ansi::RESET,
+ ansi::DOCKER_BLUE,
+ ansi::BOLD,
+ code,
+ ansi::RESET,
badge,
msg_display
)
diff --git a/src/agent/ui/input.rs b/src/agent/ui/input.rs
index 6b1987d8..f50b31fb 100644
--- a/src/agent/ui/input.rs
+++ b/src/agent/ui/input.rs
@@ -92,8 +92,13 @@ impl InputState {
// Check if we should trigger completion
if c == '@' {
- let valid_trigger = self.cursor == 1 ||
- self.text.chars().nth(self.cursor - 2).map(|c| c.is_whitespace()).unwrap_or(false);
+ let valid_trigger = self.cursor == 1
+ || self
+ .text
+ .chars()
+ .nth(self.cursor - 2)
+ .map(|c| c.is_whitespace())
+ .unwrap_or(false);
if valid_trigger {
self.completion_start = Some(self.cursor - 1);
self.refresh_suggestions();
@@ -202,7 +207,8 @@ impl InputState {
/// Convert character position to byte position
fn char_to_byte_pos(&self, char_pos: usize) -> usize {
- self.text.char_indices()
+ self.text
+ .char_indices()
.nth(char_pos)
.map(|(i, _)| i)
.unwrap_or(self.text.len())
@@ -213,7 +219,11 @@ impl InputState {
self.completion_start.map(|start| {
let filter_start = start + 1; // Skip the @ or /
if filter_start <= self.cursor {
- self.text.chars().skip(filter_start).take(self.cursor - filter_start).collect()
+ self.text
+ .chars()
+ .skip(filter_start)
+ .take(self.cursor - filter_start)
+ .collect()
} else {
String::new()
}
@@ -223,7 +233,8 @@ impl InputState {
/// Refresh suggestions based on current filter
fn refresh_suggestions(&mut self) {
let filter = self.get_filter().unwrap_or_default();
- let trigger = self.completion_start
+ let trigger = self
+ .completion_start
.and_then(|pos| self.text.chars().nth(pos));
self.suggestions = match trigger {
@@ -241,15 +252,19 @@ impl InputState {
let mut results = Vec::new();
let filter_lower = filter.to_lowercase();
- self.walk_dir(&self.project_path.clone(), &filter_lower, &mut results, 0, 4);
+ self.walk_dir(
+ &self.project_path.clone(),
+ &filter_lower,
+ &mut results,
+ 0,
+ 4,
+ );
// Sort: directories first, then by path length
- results.sort_by(|a, b| {
- match (a.is_dir, b.is_dir) {
- (true, false) => std::cmp::Ordering::Less,
- (false, true) => std::cmp::Ordering::Greater,
- _ => a.value.len().cmp(&b.value.len()),
- }
+ results.sort_by(|a, b| match (a.is_dir, b.is_dir) {
+ (true, false) => std::cmp::Ordering::Less,
+ (false, true) => std::cmp::Ordering::Greater,
+ _ => a.value.len().cmp(&b.value.len()),
});
results.truncate(8);
@@ -257,12 +272,29 @@ impl InputState {
}
/// Walk directory tree for matching files
- fn walk_dir(&self, dir: &PathBuf, filter: &str, results: &mut Vec, depth: usize, max_depth: usize) {
+ fn walk_dir(
+ &self,
+ dir: &PathBuf,
+ filter: &str,
+ results: &mut Vec,
+ depth: usize,
+ max_depth: usize,
+ ) {
if depth > max_depth || results.len() >= 20 {
return;
}
- let skip_dirs = ["node_modules", ".git", "target", "__pycache__", ".venv", "venv", "dist", "build", ".next"];
+ let skip_dirs = [
+ "node_modules",
+ ".git",
+ "target",
+ "__pycache__",
+ ".venv",
+ "venv",
+ "dist",
+ "build",
+ ".next",
+ ];
let entries = match std::fs::read_dir(dir) {
Ok(e) => e,
@@ -274,17 +306,24 @@ impl InputState {
let file_name = entry.file_name().to_string_lossy().to_string();
// Skip hidden files (except some)
- if file_name.starts_with('.') && !file_name.starts_with(".env") && file_name != ".gitignore" {
+ if file_name.starts_with('.')
+ && !file_name.starts_with(".env")
+ && file_name != ".gitignore"
+ {
continue;
}
- let rel_path = path.strip_prefix(&self.project_path)
+ let rel_path = path
+ .strip_prefix(&self.project_path)
.map(|p| p.to_string_lossy().to_string())
.unwrap_or_else(|_| file_name.clone());
let is_dir = path.is_dir();
- if filter.is_empty() || rel_path.to_lowercase().contains(filter) || file_name.to_lowercase().contains(filter) {
+ if filter.is_empty()
+ || rel_path.to_lowercase().contains(filter)
+ || file_name.to_lowercase().contains(filter)
+ {
let display = if is_dir {
format!("{}/", rel_path)
} else {
@@ -307,10 +346,14 @@ impl InputState {
fn search_commands(&self, filter: &str) -> Vec {
let filter_lower = filter.to_lowercase();
- SLASH_COMMANDS.iter()
+ SLASH_COMMANDS
+ .iter()
.filter(|cmd| {
- cmd.name.to_lowercase().starts_with(&filter_lower) ||
- cmd.alias.map(|a| a.to_lowercase().starts_with(&filter_lower)).unwrap_or(false)
+ cmd.name.to_lowercase().starts_with(&filter_lower)
+ || cmd
+ .alias
+ .map(|a| a.to_lowercase().starts_with(&filter_lower))
+ .unwrap_or(false)
})
.take(8)
.map(|cmd| Suggestion {
@@ -486,7 +529,10 @@ fn render(state: &mut InputState, prompt: &str, stdout: &mut io::Stdout) -> io::
// Move up to clear previous rendered lines, then to column 0
if state.prev_wrapped_lines > 1 {
- execute!(stdout, cursor::MoveUp((state.prev_wrapped_lines - 1) as u16))?;
+ execute!(
+ stdout,
+ cursor::MoveUp((state.prev_wrapped_lines - 1) as u16)
+ )?;
}
execute!(stdout, cursor::MoveToColumn(0))?;
@@ -497,9 +543,23 @@ fn render(state: &mut InputState, prompt: &str, stdout: &mut io::Stdout) -> io::
// In raw mode, \n doesn't return to column 0, so we need \r\n
let display_text = state.text.replace('\n', "\r\n");
if state.plan_mode {
- print!("{}ā
{} {}{}{} {}", ansi::ORANGE, ansi::RESET, ansi::SUCCESS, prompt, ansi::RESET, display_text);
+ print!(
+ "{}ā
{} {}{}{} {}",
+ ansi::ORANGE,
+ ansi::RESET,
+ ansi::SUCCESS,
+ prompt,
+ ansi::RESET,
+ display_text
+ );
} else {
- print!("{}{}{} {}", ansi::SUCCESS, prompt, ansi::RESET, display_text);
+ print!(
+ "{}{}{} {}",
+ ansi::SUCCESS,
+ prompt,
+ ansi::RESET,
+ display_text
+ );
}
stdout.flush()?;
@@ -534,18 +594,40 @@ fn render(state: &mut InputState, prompt: &str, stdout: &mut io::Stdout) -> io::
if is_selected {
if suggestion.is_dir {
- print!(" {}{} {}{}\r\n", ansi::CYAN, prefix, suggestion.display, ansi::RESET);
+ print!(
+ " {}{} {}{}\r\n",
+ ansi::CYAN,
+ prefix,
+ suggestion.display,
+ ansi::RESET
+ );
} else {
- print!(" {}{} {}{}\r\n", ansi::WHITE, prefix, suggestion.display, ansi::RESET);
+ print!(
+ " {}{} {}{}\r\n",
+ ansi::WHITE,
+ prefix,
+ suggestion.display,
+ ansi::RESET
+ );
}
} else {
- print!(" {}{} {}{}\r\n", ansi::DIM, prefix, suggestion.display, ansi::RESET);
+ print!(
+ " {}{} {}{}\r\n",
+ ansi::DIM,
+ prefix,
+ suggestion.display,
+ ansi::RESET
+ );
}
lines_rendered += 1;
}
// Print hint
- print!(" {}[āā navigate, Enter select, Esc cancel]{}\r\n", ansi::DIM, ansi::RESET);
+ print!(
+ " {}[āā navigate, Enter select, Esc cancel]{}\r\n",
+ ansi::DIM,
+ ansi::RESET
+ );
lines_rendered += 1;
}
@@ -586,10 +668,7 @@ fn clear_suggestions(num_lines: usize, stdout: &mut io::Stdout) -> io::Result<()
if num_lines > 0 {
// Save position, clear lines below, restore
for _ in 0..num_lines {
- execute!(stdout,
- cursor::MoveDown(1),
- Clear(ClearType::CurrentLine)
- )?;
+ execute!(stdout, cursor::MoveDown(1), Clear(ClearType::CurrentLine))?;
}
execute!(stdout, MoveUp(num_lines as u16))?;
}
@@ -598,7 +677,11 @@ fn clear_suggestions(num_lines: usize, stdout: &mut io::Stdout) -> io::Result<()
/// Read user input with Claude Code-style @ file picker
/// If `plan_mode` is true, shows the plan mode indicator below the prompt
-pub fn read_input_with_file_picker(prompt: &str, project_path: &PathBuf, plan_mode: bool) -> InputResult {
+pub fn read_input_with_file_picker(
+ prompt: &str,
+ project_path: &PathBuf,
+ plan_mode: bool,
+) -> InputResult {
let mut stdout = io::stdout();
// Enable raw mode
@@ -611,7 +694,14 @@ pub fn read_input_with_file_picker(prompt: &str, project_path: &PathBuf, plan_mo
// Print prompt with mode indicator inline (no separate line)
if plan_mode {
- print!("{}ā
{} {}{}{} ", ansi::ORANGE, ansi::RESET, ansi::SUCCESS, prompt, ansi::RESET);
+ print!(
+ "{}ā
{} {}{}{} ",
+ ansi::ORANGE,
+ ansi::RESET,
+ ansi::SUCCESS,
+ prompt,
+ ansi::RESET
+ );
} else {
print!("{}{}{} ", ansi::SUCCESS, prompt, ansi::RESET);
}
@@ -636,8 +726,9 @@ pub fn read_input_with_file_picker(prompt: &str, project_path: &PathBuf, plan_mo
match key_event.code {
KeyCode::Enter => {
// Shift+Enter or Alt+Enter inserts newline instead of submitting
- if key_event.modifiers.contains(KeyModifiers::SHIFT) ||
- key_event.modifiers.contains(KeyModifiers::ALT) {
+ if key_event.modifiers.contains(KeyModifiers::SHIFT)
+ || key_event.modifiers.contains(KeyModifiers::ALT)
+ {
state.insert_char('\n');
} else if state.showing_suggestions && state.selected >= 0 {
// Accept selection, don't submit
@@ -707,11 +798,15 @@ pub fn read_input_with_file_picker(prompt: &str, project_path: &PathBuf, plan_mo
KeyCode::Right => {
state.cursor_right();
}
- KeyCode::Home | KeyCode::Char('a') if key_event.modifiers.contains(KeyModifiers::CONTROL) => {
+ KeyCode::Home | KeyCode::Char('a')
+ if key_event.modifiers.contains(KeyModifiers::CONTROL) =>
+ {
state.cursor_home();
state.close_suggestions();
}
- KeyCode::End | KeyCode::Char('e') if key_event.modifiers.contains(KeyModifiers::CONTROL) => {
+ KeyCode::End | KeyCode::Char('e')
+ if key_event.modifiers.contains(KeyModifiers::CONTROL) =>
+ {
state.cursor_end();
}
// Ctrl+U - Clear entire input
@@ -723,8 +818,10 @@ pub fn read_input_with_file_picker(prompt: &str, project_path: &PathBuf, plan_mo
state.delete_to_line_start();
}
// Ctrl+Shift+Backspace - Delete to beginning of current line (cross-platform)
- KeyCode::Backspace if key_event.modifiers.contains(KeyModifiers::CONTROL)
- && key_event.modifiers.contains(KeyModifiers::SHIFT) => {
+ KeyCode::Backspace
+ if key_event.modifiers.contains(KeyModifiers::CONTROL)
+ && key_event.modifiers.contains(KeyModifiers::SHIFT) =>
+ {
state.delete_to_line_start();
}
// Cmd+Backspace (Mac) - Delete to beginning of current line (if terminal passes it)
@@ -760,7 +857,8 @@ pub fn read_input_with_file_picker(prompt: &str, project_path: &PathBuf, plan_mo
// Only render if no more events are pending (batches rapid input like paste)
// This prevents thousands of renders during paste operations
- let should_render = !event::poll(std::time::Duration::from_millis(0)).unwrap_or(false);
+ let should_render =
+ !event::poll(std::time::Duration::from_millis(0)).unwrap_or(false);
if should_render {
state.rendered_lines = render(&mut state, prompt, &mut stdout).unwrap_or(0);
}
diff --git a/src/agent/ui/plan_menu.rs b/src/agent/ui/plan_menu.rs
index ce1a436e..5fc5e35e 100644
--- a/src/agent/ui/plan_menu.rs
+++ b/src/agent/ui/plan_menu.rs
@@ -111,7 +111,10 @@ pub fn show_plan_action_menu(plan_path: &str, task_count: usize) -> PlanActionRe
println!("{}", "ā Will execute plan with auto-accept".green());
PlanActionResult::ExecuteAutoAccept
} else if answer == options[1] {
- println!("{}", "ā Will execute plan with review for each change".yellow());
+ println!(
+ "{}",
+ "ā Will execute plan with review for each change".yellow()
+ );
PlanActionResult::ExecuteWithReview
} else {
// User wants to change the plan
diff --git a/src/agent/ui/response.rs b/src/agent/ui/response.rs
index e694cd95..47cce059 100644
--- a/src/agent/ui/response.rs
+++ b/src/agent/ui/response.rs
@@ -113,7 +113,11 @@ impl CodeBlockParser {
in_code_block = false;
} else {
// Start of code block
- current_lang = line.trim_start().strip_prefix("```").unwrap_or("").to_string();
+ current_lang = line
+ .trim_start()
+ .strip_prefix("```")
+ .unwrap_or("")
+ .to_string();
in_code_block = true;
}
} else if in_code_block {
@@ -133,7 +137,10 @@ impl CodeBlockParser {
});
}
- Self { markdown: result, blocks }
+ Self {
+ markdown: result,
+ blocks,
+ }
}
/// Get the processed markdown with placeholders
@@ -223,7 +230,10 @@ impl MarkdownFormat {
let rendered = self.skin.term_text(parsed.markdown()).to_string();
// Restore highlighted code blocks
- parsed.restore(&self.highlighter, rendered).trim().to_string()
+ parsed
+ .restore(&self.highlighter, rendered)
+ .trim()
+ .to_string()
}
}
@@ -254,11 +264,7 @@ impl ResponseFormatter {
/// Print the response header with Syncable styling
fn print_header() {
- print!(
- "{}{}āā š¤ Syncable AI ",
- brand::PURPLE,
- brand::BOLD
- );
+ print!("{}{}āā š¤ Syncable AI ", brand::PURPLE, brand::BOLD);
println!(
"{}āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®{}",
brand::DIM,
@@ -283,7 +289,12 @@ impl SimpleResponse {
/// Print a simple AI response with minimal formatting
pub fn print(text: &str) {
println!();
- println!("{}{} Syncable AI:{}", brand::PURPLE, brand::BOLD, brand::RESET);
+ println!(
+ "{}{} Syncable AI:{}",
+ brand::PURPLE,
+ brand::BOLD,
+ brand::RESET
+ );
let formatter = MarkdownFormat::new();
println!("{}", formatter.render(text));
println!();
@@ -329,7 +340,11 @@ impl ToolProgress {
/// Mark the last tool as complete
pub fn tool_complete(&mut self, success: bool) {
if let Some(tool) = self.tools_executed.last_mut() {
- tool.status = if success { ToolStatus::Success } else { ToolStatus::Error };
+ tool.status = if success {
+ ToolStatus::Success
+ } else {
+ ToolStatus::Error
+ };
}
self.redraw();
}
@@ -358,7 +373,8 @@ impl ToolProgress {
/// Print final summary after all tools complete
pub fn print_summary(&self) {
if !self.tools_executed.is_empty() {
- let success_count = self.tools_executed
+ let success_count = self
+ .tools_executed
.iter()
.filter(|t| matches!(t.status, ToolStatus::Success))
.count();
diff --git a/src/agent/ui/shell_output.rs b/src/agent/ui/shell_output.rs
index c4fd1198..667db7b0 100644
--- a/src/agent/ui/shell_output.rs
+++ b/src/agent/ui/shell_output.rs
@@ -112,11 +112,7 @@ impl StreamingShellOutput {
line.clone()
};
- println!(
- " {} {}",
- prefix.dimmed(),
- display
- );
+ println!(" {} {}", prefix.dimmed(), display);
}
// Note: Removed the "Running..." status line - elapsed time is shown in header
}
@@ -127,7 +123,11 @@ impl StreamingShellOutput {
let mut stdout = io::stdout();
// Move cursor up and clear lines
for _ in 0..self.lines_rendered {
- let _ = execute!(stdout, cursor::MoveUp(1), terminal::Clear(terminal::ClearType::CurrentLine));
+ let _ = execute!(
+ stdout,
+ cursor::MoveUp(1),
+ terminal::Clear(terminal::ClearType::CurrentLine)
+ );
}
}
}
diff --git a/src/agent/ui/spinner.rs b/src/agent/ui/spinner.rs
index bc8eebe3..20489847 100644
--- a/src/agent/ui/spinner.rs
+++ b/src/agent/ui/spinner.rs
@@ -5,8 +5,8 @@
use crate::agent::ui::colors::{ansi, format_elapsed};
use std::io::{self, Write};
-use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
+use std::sync::atomic::{AtomicBool, Ordering};
use std::time::{Duration, Instant};
use tokio::sync::mpsc;
@@ -96,7 +96,10 @@ impl Spinner {
/// Update the spinner text
pub async fn set_text(&self, text: &str) {
- let _ = self.sender.send(SpinnerMessage::UpdateText(text.to_string())).await;
+ let _ = self
+ .sender
+ .send(SpinnerMessage::UpdateText(text.to_string()))
+ .await;
}
/// Show tool executing status
@@ -122,7 +125,10 @@ impl Spinner {
/// Show thinking status
pub async fn thinking(&self, subject: &str) {
- let _ = self.sender.send(SpinnerMessage::Thinking(subject.to_string())).await;
+ let _ = self
+ .sender
+ .send(SpinnerMessage::Thinking(subject.to_string()))
+ .await;
}
/// Stop the spinner and clear the line
@@ -144,9 +150,9 @@ async fn run_spinner(
is_running: Arc,
initial_text: String,
) {
- use rand::{Rng, SeedableRng};
use rand::rngs::StdRng;
-
+ use rand::{Rng, SeedableRng};
+
let start_time = Instant::now();
let mut frame_index = 0;
let mut current_text = initial_text;
@@ -274,11 +280,13 @@ async fn run_spinner(
} else {
print!("\r{}", ansi::CLEAR_LINE);
}
-
+
// Print summary
if tools_completed > 0 {
- println!(" {}ā{} {} tool{} used",
- ansi::SUCCESS, ansi::RESET,
+ println!(
+ " {}ā{} {} tool{} used",
+ ansi::SUCCESS,
+ ansi::RESET,
tools_completed,
if tools_completed == 1 { "" } else { "s" }
);
diff --git a/src/agent/ui/streaming.rs b/src/agent/ui/streaming.rs
index 146f0b94..adbdbfe8 100644
--- a/src/agent/ui/streaming.rs
+++ b/src/agent/ui/streaming.rs
@@ -159,9 +159,7 @@ impl StreamingDisplay {
/// Get elapsed time since start
pub fn elapsed_secs(&self) -> u64 {
- self.start_time
- .map(|t| t.elapsed().as_secs())
- .unwrap_or(0)
+ self.start_time.map(|t| t.elapsed().as_secs()).unwrap_or(0)
}
/// Get the accumulated text
diff --git a/src/agent/ui/tool_display.rs b/src/agent/ui/tool_display.rs
index a01e18b2..367e12d1 100644
--- a/src/agent/ui/tool_display.rs
+++ b/src/agent/ui/tool_display.rs
@@ -156,8 +156,14 @@ impl ToolCallDisplay {
return;
}
- let success_count = tools.iter().filter(|t| t.status == ToolCallStatus::Success).count();
- let error_count = tools.iter().filter(|t| t.status == ToolCallStatus::Error).count();
+ let success_count = tools
+ .iter()
+ .filter(|t| t.status == ToolCallStatus::Success)
+ .count();
+ let error_count = tools
+ .iter()
+ .filter(|t| t.status == ToolCallStatus::Error)
+ .count();
println!();
if error_count == 0 {
@@ -199,7 +205,12 @@ pub fn print_tool_inline(status: ToolCallStatus, name: &str, description: &str)
/// Print a tool group header
pub fn print_tool_group_header(count: usize) {
- println!("\n{} {} tool{}:", icons::TOOL, count, if count == 1 { "" } else { "s" });
+ println!(
+ "\n{} {} tool{}:",
+ icons::TOOL,
+ count,
+ if count == 1 { "" } else { "s" }
+ );
}
// ============================================================================
@@ -207,7 +218,7 @@ pub fn print_tool_group_header(count: usize) {
// ============================================================================
/// Forge-style tool display that shows:
-/// ```
+/// ```text
/// ā tool_name(arg1=value1, arg2=value2)
/// ā Running...
/// ```
@@ -260,7 +271,7 @@ impl ForgeToolDisplay {
}
/// Print tool start in forge style
- /// ```
+ /// ```text
/// ā tool_name(args)
/// ā Running...
/// ```
@@ -334,7 +345,10 @@ impl ForgeToolDisplay {
// Check for files written
if let Some(files) = json.get("files_written").and_then(|v| v.as_u64()) {
- let lines = json.get("total_lines").and_then(|v| v.as_u64()).unwrap_or(0);
+ let lines = json
+ .get("total_lines")
+ .and_then(|v| v.as_u64())
+ .unwrap_or(0);
return format!("wrote {} file(s) ({} lines)", files, lines);
}
diff --git a/src/analyzer/context/analysis.rs b/src/analyzer/context/analysis.rs
index ce2883db..95e867de 100644
--- a/src/analyzer/context/analysis.rs
+++ b/src/analyzer/context/analysis.rs
@@ -39,19 +39,53 @@ pub fn analyze_context(
for language in languages {
match language.name.as_str() {
"JavaScript" | "TypeScript" => {
- javascript::analyze_node_project(project_root, &mut entry_points, &mut ports, &mut env_vars, &mut build_scripts, config)?;
+ javascript::analyze_node_project(
+ project_root,
+ &mut entry_points,
+ &mut ports,
+ &mut env_vars,
+ &mut build_scripts,
+ config,
+ )?;
}
"Python" => {
- python::analyze_python_project(project_root, &mut entry_points, &mut ports, &mut env_vars, &mut build_scripts, config)?;
+ python::analyze_python_project(
+ project_root,
+ &mut entry_points,
+ &mut ports,
+ &mut env_vars,
+ &mut build_scripts,
+ config,
+ )?;
}
"Rust" => {
- rust::analyze_rust_project(project_root, &mut entry_points, &mut ports, &mut env_vars, &mut build_scripts, config)?;
+ rust::analyze_rust_project(
+ project_root,
+ &mut entry_points,
+ &mut ports,
+ &mut env_vars,
+ &mut build_scripts,
+ config,
+ )?;
}
"Go" => {
- go::analyze_go_project(project_root, &mut entry_points, &mut ports, &mut env_vars, &mut build_scripts, config)?;
+ go::analyze_go_project(
+ project_root,
+ &mut entry_points,
+ &mut ports,
+ &mut env_vars,
+ &mut build_scripts,
+ config,
+ )?;
}
"Java" | "Kotlin" => {
- jvm::analyze_jvm_project(project_root, &mut ports, &mut env_vars, &mut build_scripts, config)?;
+ jvm::analyze_jvm_project(
+ project_root,
+ &mut ports,
+ &mut env_vars,
+ &mut build_scripts,
+ config,
+ )?;
}
_ => {}
}
@@ -64,7 +98,12 @@ pub fn analyze_context(
// Technology-specific analysis
for technology in technologies {
- tech_specific::analyze_technology_specifics(technology, project_root, &mut entry_points, &mut ports)?;
+ tech_specific::analyze_technology_specifics(
+ technology,
+ project_root,
+ &mut entry_points,
+ &mut ports,
+ )?;
}
// Detect microservices structure
@@ -99,4 +138,4 @@ pub fn analyze_context(
project_type,
build_scripts,
})
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/context/file_analyzers/docker.rs b/src/analyzer/context/file_analyzers/docker.rs
index c4bacf54..fb414a63 100644
--- a/src/analyzer/context/file_analyzers/docker.rs
+++ b/src/analyzer/context/file_analyzers/docker.rs
@@ -1,4 +1,4 @@
-use crate::analyzer::{context::helpers::create_regex, Port, Protocol};
+use crate::analyzer::{Port, Protocol, context::helpers::create_regex};
use crate::common::file_utils::is_readable_file;
use crate::error::{AnalysisError, Result};
use std::collections::{HashMap, HashSet};
@@ -20,7 +20,8 @@ pub(crate) fn analyze_docker_files(
for cap in expose_regex.captures_iter(&content) {
if let Some(port_str) = cap.get(1) {
if let Ok(port) = port_str.as_str().parse::() {
- let protocol = cap.get(2)
+ let protocol = cap
+ .get(2)
.and_then(|p| match p.as_str().to_lowercase().as_str() {
"tcp" => Some(Protocol::Tcp),
"udp" => Some(Protocol::Udp),
@@ -43,13 +44,20 @@ pub(crate) fn analyze_docker_files(
if let (Some(name), Some(value)) = (cap.get(1), cap.get(2)) {
let var_name = name.as_str().to_string();
let var_value = value.as_str().trim().to_string();
- env_vars.entry(var_name).or_insert((Some(var_value), false, None));
+ env_vars
+ .entry(var_name)
+ .or_insert((Some(var_value), false, None));
}
}
}
// Check docker-compose files
- let compose_files = ["docker-compose.yml", "docker-compose.yaml", "compose.yml", "compose.yaml"];
+ let compose_files = [
+ "docker-compose.yml",
+ "docker-compose.yaml",
+ "compose.yml",
+ "compose.yaml",
+ ];
for compose_file in &compose_files {
let path = root.join(compose_file);
if is_readable_file(&path) {
@@ -68,7 +76,8 @@ fn analyze_docker_compose(
env_vars: &mut HashMap, bool, Option)>,
) -> Result<()> {
let content = std::fs::read_to_string(path)?;
- let value: serde_yaml::Value = serde_yaml::from_str(&content).map_err(|e| AnalysisError::InvalidStructure(format!("Invalid YAML: {}", e)))?;
+ let value: serde_yaml::Value = serde_yaml::from_str(&content)
+ .map_err(|e| AnalysisError::InvalidStructure(format!("Invalid YAML: {}", e)))?;
if let Some(services) = value.get("services").and_then(|s| s.as_mapping()) {
for (service_name, service) in services {
@@ -107,7 +116,12 @@ fn analyze_docker_compose(
// Create descriptive port entry
if let Ok(port) = external_port.parse::() {
- let description = create_port_description(&service_type, service_name_str, external_port, internal_port);
+ let description = create_port_description(
+ &service_type,
+ service_name_str,
+ external_port,
+ internal_port,
+ );
ports.insert(Port {
number: port,
@@ -128,8 +142,11 @@ fn analyze_docker_compose(
if let Some(key_str) = key.as_str() {
let val_str = value.as_str().map(|s| s.to_string());
let description = get_env_var_description(key_str, &service_type);
- env_vars.entry(key_str.to_string())
- .or_insert((val_str, false, description.or_else(|| Some(env_context.clone()))));
+ env_vars.entry(key_str.to_string()).or_insert((
+ val_str,
+ false,
+ description.or_else(|| Some(env_context.clone())),
+ ));
}
}
} else if let Some(env_list) = env.as_sequence() {
@@ -139,8 +156,11 @@ fn analyze_docker_compose(
let (key, value) = env_str.split_at(eq_pos);
let value = &value[1..]; // Skip the '='
let description = get_env_var_description(key, &service_type);
- env_vars.entry(key.to_string())
- .or_insert((Some(value.to_string()), false, description.or_else(|| Some(env_context.clone()))));
+ env_vars.entry(key.to_string()).or_insert((
+ Some(value.to_string()),
+ false,
+ description.or_else(|| Some(env_context.clone())),
+ ));
}
}
}
@@ -255,7 +275,12 @@ fn determine_service_type(name: &str, service: &serde_yaml::Value) -> ServiceTyp
}
/// Creates a descriptive port description based on service type
-fn create_port_description(service_type: &ServiceType, service_name: &str, external: &str, internal: &str) -> String {
+fn create_port_description(
+ service_type: &ServiceType,
+ service_name: &str,
+ external: &str,
+ internal: &str,
+) -> String {
let base_desc = match service_type {
ServiceType::PostgreSQL => format!("PostgreSQL database ({})", service_name),
ServiceType::MySQL => format!("MySQL database ({})", service_name),
@@ -270,7 +295,10 @@ fn create_port_description(service_type: &ServiceType, service_name: &str, exter
};
if external != internal {
- format!("{} - external:{}, internal:{}", base_desc, external, internal)
+ format!(
+ "{} - external:{}, internal:{}",
+ base_desc, external, internal
+ )
} else {
format!("{} - port {}", base_desc, external)
}
@@ -279,19 +307,23 @@ fn create_port_description(service_type: &ServiceType, service_name: &str, exter
/// Gets a descriptive context for environment variables based on service type
fn get_env_var_description(var_name: &str, _service_type: &ServiceType) -> Option {
match var_name {
- "POSTGRES_PASSWORD" | "POSTGRES_USER" | "POSTGRES_DB" =>
- Some("PostgreSQL configuration".to_string()),
- "MYSQL_ROOT_PASSWORD" | "MYSQL_PASSWORD" | "MYSQL_USER" | "MYSQL_DATABASE" =>
- Some("MySQL configuration".to_string()),
- "MONGO_INITDB_ROOT_USERNAME" | "MONGO_INITDB_ROOT_PASSWORD" =>
- Some("MongoDB configuration".to_string()),
+ "POSTGRES_PASSWORD" | "POSTGRES_USER" | "POSTGRES_DB" => {
+ Some("PostgreSQL configuration".to_string())
+ }
+ "MYSQL_ROOT_PASSWORD" | "MYSQL_PASSWORD" | "MYSQL_USER" | "MYSQL_DATABASE" => {
+ Some("MySQL configuration".to_string())
+ }
+ "MONGO_INITDB_ROOT_USERNAME" | "MONGO_INITDB_ROOT_PASSWORD" => {
+ Some("MongoDB configuration".to_string())
+ }
"REDIS_PASSWORD" => Some("Redis configuration".to_string()),
- "RABBITMQ_DEFAULT_USER" | "RABBITMQ_DEFAULT_PASS" =>
- Some("RabbitMQ configuration".to_string()),
- "DATABASE_URL" | "DB_CONNECTION_STRING" =>
- Some("Database connection string".to_string()),
- "GOOGLE_APPLICATION_CREDENTIALS" =>
- Some("Google Cloud service account credentials".to_string()),
+ "RABBITMQ_DEFAULT_USER" | "RABBITMQ_DEFAULT_PASS" => {
+ Some("RabbitMQ configuration".to_string())
+ }
+ "DATABASE_URL" | "DB_CONNECTION_STRING" => Some("Database connection string".to_string()),
+ "GOOGLE_APPLICATION_CREDENTIALS" => {
+ Some("Google Cloud service account credentials".to_string())
+ }
_ => None,
}
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/context/file_analyzers/env.rs b/src/analyzer/context/file_analyzers/env.rs
index 49b0dcab..eee5b950 100644
--- a/src/analyzer/context/file_analyzers/env.rs
+++ b/src/analyzer/context/file_analyzers/env.rs
@@ -8,7 +8,13 @@ pub(crate) fn analyze_env_files(
root: &Path,
env_vars: &mut HashMap, bool, Option)>,
) -> Result<()> {
- let env_files = [".env", ".env.example", ".env.local", ".env.development", ".env.production"];
+ let env_files = [
+ ".env",
+ ".env.example",
+ ".env.local",
+ ".env.development",
+ ".env.production",
+ ];
for env_file in &env_files {
let path = root.join(env_file);
@@ -28,13 +34,19 @@ pub(crate) fn analyze_env_files(
// Check if it's marked as required (common convention)
let required = value.is_empty() || value == "required" || value == "REQUIRED";
- let actual_value = if required { None } else { Some(value.to_string()) };
+ let actual_value = if required {
+ None
+ } else {
+ Some(value.to_string())
+ };
- env_vars.entry(key.to_string()).or_insert((actual_value, required, None));
+ env_vars
+ .entry(key.to_string())
+ .or_insert((actual_value, required, None));
}
}
}
}
Ok(())
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/context/file_analyzers/makefile.rs b/src/analyzer/context/file_analyzers/makefile.rs
index a3206927..1a34a048 100644
--- a/src/analyzer/context/file_analyzers/makefile.rs
+++ b/src/analyzer/context/file_analyzers/makefile.rs
@@ -1,13 +1,10 @@
-use crate::analyzer::{context::helpers::create_regex, BuildScript};
+use crate::analyzer::{BuildScript, context::helpers::create_regex};
use crate::common::file_utils::is_readable_file;
use crate::error::Result;
use std::path::Path;
/// Analyzes Makefile for build scripts
-pub(crate) fn analyze_makefile(
- root: &Path,
- build_scripts: &mut Vec,
-) -> Result<()> {
+pub(crate) fn analyze_makefile(root: &Path, build_scripts: &mut Vec) -> Result<()> {
let makefiles = ["Makefile", "makefile"];
for makefile in &makefiles {
@@ -62,4 +59,4 @@ pub(crate) fn analyze_makefile(
}
Ok(())
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/context/file_analyzers/mod.rs b/src/analyzer/context/file_analyzers/mod.rs
index 3c5df224..4f288e87 100644
--- a/src/analyzer/context/file_analyzers/mod.rs
+++ b/src/analyzer/context/file_analyzers/mod.rs
@@ -1,3 +1,3 @@
pub(crate) mod docker;
pub(crate) mod env;
-pub(crate) mod makefile;
\ No newline at end of file
+pub(crate) mod makefile;
diff --git a/src/analyzer/context/helpers.rs b/src/analyzer/context/helpers.rs
index 456c5669..4bf71062 100644
--- a/src/analyzer/context/helpers.rs
+++ b/src/analyzer/context/helpers.rs
@@ -6,7 +6,8 @@ use std::collections::HashSet;
/// Helper function to create a regex with proper error handling
pub fn create_regex(pattern: &str) -> Result {
Regex::new(pattern).map_err(|e| {
- AnalysisError::InvalidStructure(format!("Invalid regex pattern '{}': {}", pattern, e)).into()
+ AnalysisError::InvalidStructure(format!("Invalid regex pattern '{}': {}", pattern, e))
+ .into()
})
}
@@ -48,4 +49,4 @@ pub fn get_script_description(name: &str) -> Option {
"format" => Some("Format code".to_string()),
_ => None,
}
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/context/language_analyzers/go.rs b/src/analyzer/context/language_analyzers/go.rs
index 2f508701..753adb80 100644
--- a/src/analyzer/context/language_analyzers/go.rs
+++ b/src/analyzer/context/language_analyzers/go.rs
@@ -1,4 +1,6 @@
-use crate::analyzer::{context::helpers::create_regex, AnalysisConfig, BuildScript, EntryPoint, Port, Protocol};
+use crate::analyzer::{
+ AnalysisConfig, BuildScript, EntryPoint, Port, Protocol, context::helpers::create_regex,
+};
use crate::common::file_utils::{is_readable_file, read_file_safe};
use crate::error::Result;
use std::collections::{HashMap, HashSet};
@@ -127,4 +129,4 @@ fn scan_go_file_for_context(
}
Ok(())
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/context/language_analyzers/javascript.rs b/src/analyzer/context/language_analyzers/javascript.rs
index f604fdf6..ba9167f7 100644
--- a/src/analyzer/context/language_analyzers/javascript.rs
+++ b/src/analyzer/context/language_analyzers/javascript.rs
@@ -1,4 +1,7 @@
-use crate::analyzer::{context::helpers::{create_regex, extract_ports_from_command, get_script_description}, AnalysisConfig, BuildScript, EntryPoint, Port, Protocol};
+use crate::analyzer::{
+ AnalysisConfig, BuildScript, EntryPoint, Port, Protocol,
+ context::helpers::{create_regex, extract_ports_from_command, get_script_description},
+};
use crate::common::file_utils::{is_readable_file, read_file_safe};
use crate::error::{AnalysisError, Result};
use regex::Regex;
@@ -48,7 +51,16 @@ pub(crate) fn analyze_node_project(
}
// Check common entry files
- let common_entries = ["index.js", "index.ts", "app.js", "app.ts", "server.js", "server.ts", "main.js", "main.ts"];
+ let common_entries = [
+ "index.js",
+ "index.ts",
+ "app.js",
+ "app.ts",
+ "server.js",
+ "server.ts",
+ "main.js",
+ "main.ts",
+ ];
for entry in &common_entries {
let path = root.join(entry);
if is_readable_file(&path) {
@@ -81,8 +93,9 @@ fn scan_js_file_for_context(
let content = read_file_safe(path, config.max_file_size)?;
// Look for port assignments
- let port_regex = Regex::new(r"(?:PORT|port)\s*[=:]\s*(?:process\.env\.PORT\s*\|\|\s*)?(\d{1,5})")
- .map_err(|e| AnalysisError::InvalidStructure(format!("Invalid regex: {}", e)))?;
+ let port_regex =
+ Regex::new(r"(?:PORT|port)\s*[=:]\s*(?:process\.env\.PORT\s*\|\|\s*)?(\d{1,5})")
+ .map_err(|e| AnalysisError::InvalidStructure(format!("Invalid regex: {}", e)))?;
for cap in port_regex.captures_iter(&content) {
if let Some(port_str) = cap.get(1) {
if let Ok(port) = port_str.as_str().parse::() {
@@ -116,7 +129,8 @@ fn scan_js_file_for_context(
for cap in env_regex.captures_iter(&content) {
if let Some(var_name) = cap.get(1) {
let name = var_name.as_str().to_string();
- if !name.starts_with("NODE_") { // Skip Node.js internal vars
+ if !name.starts_with("NODE_") {
+ // Skip Node.js internal vars
env_vars.entry(name.clone()).or_insert((None, false, None));
}
}
@@ -126,7 +140,10 @@ fn scan_js_file_for_context(
if content.contains("encore.dev") {
// Encore uses specific patterns for config and database
let encore_patterns = [
- (r#"secret\s*\(\s*['"]([A-Z_][A-Z0-9_]*)['"]"#, "Encore secret configuration"),
+ (
+ r#"secret\s*\(\s*['"]([A-Z_][A-Z0-9_]*)['"]"#,
+ "Encore secret configuration",
+ ),
(r#"SQLDatabase\s*\(\s*['"](\w+)['"]"#, "Encore database"),
];
@@ -136,8 +153,11 @@ fn scan_js_file_for_context(
if let Some(match_str) = cap.get(1) {
let name = match_str.as_str();
if pattern.contains("secret") {
- env_vars.entry(name.to_string())
- .or_insert((None, true, Some(description.to_string())));
+ env_vars.entry(name.to_string()).or_insert((
+ None,
+ true,
+ Some(description.to_string()),
+ ));
}
}
}
@@ -145,4 +165,4 @@ fn scan_js_file_for_context(
}
Ok(())
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/context/language_analyzers/jvm.rs b/src/analyzer/context/language_analyzers/jvm.rs
index 9e83156e..0a65bc0e 100644
--- a/src/analyzer/context/language_analyzers/jvm.rs
+++ b/src/analyzer/context/language_analyzers/jvm.rs
@@ -1,4 +1,6 @@
-use crate::analyzer::{context::helpers::create_regex, AnalysisConfig, BuildScript, Port, Protocol};
+use crate::analyzer::{
+ AnalysisConfig, BuildScript, Port, Protocol, context::helpers::create_regex,
+};
use crate::common::file_utils::{is_readable_file, read_file_safe};
use crate::error::Result;
use std::collections::{HashMap, HashSet};
@@ -115,4 +117,4 @@ fn analyze_application_properties(
}
Ok(())
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/context/language_analyzers/mod.rs b/src/analyzer/context/language_analyzers/mod.rs
index ca3d42f0..5c3f0b3d 100644
--- a/src/analyzer/context/language_analyzers/mod.rs
+++ b/src/analyzer/context/language_analyzers/mod.rs
@@ -2,4 +2,4 @@ pub(crate) mod go;
pub(crate) mod javascript;
pub(crate) mod jvm;
pub(crate) mod python;
-pub(crate) mod rust;
\ No newline at end of file
+pub(crate) mod rust;
diff --git a/src/analyzer/context/language_analyzers/python.rs b/src/analyzer/context/language_analyzers/python.rs
index d1947106..ecc2afdc 100644
--- a/src/analyzer/context/language_analyzers/python.rs
+++ b/src/analyzer/context/language_analyzers/python.rs
@@ -1,4 +1,6 @@
-use crate::analyzer::{context::helpers::create_regex, AnalysisConfig, BuildScript, EntryPoint, Port, Protocol};
+use crate::analyzer::{
+ AnalysisConfig, BuildScript, EntryPoint, Port, Protocol, context::helpers::create_regex,
+};
use crate::common::file_utils::{is_readable_file, read_file_safe};
use crate::error::Result;
use std::collections::{HashMap, HashSet};
@@ -14,7 +16,15 @@ pub(crate) fn analyze_python_project(
config: &AnalysisConfig,
) -> Result<()> {
// Check for common Python entry points
- let common_entries = ["main.py", "app.py", "wsgi.py", "asgi.py", "manage.py", "run.py", "__main__.py"];
+ let common_entries = [
+ "main.py",
+ "app.py",
+ "wsgi.py",
+ "asgi.py",
+ "manage.py",
+ "run.py",
+ "__main__.py",
+ ];
for entry in &common_entries {
let path = root.join(entry);
@@ -35,9 +45,13 @@ pub(crate) fn analyze_python_project(
let script_regex = create_regex(r#"['"](\w+)\s*=\s*([\w\.]+):(\w+)"#)?;
for script_cap in script_regex.captures_iter(scripts.as_str()) {
if let (Some(name), Some(module), Some(func)) =
- (script_cap.get(1), script_cap.get(2), script_cap.get(3)) {
+ (script_cap.get(1), script_cap.get(2), script_cap.get(3))
+ {
entry_points.push(EntryPoint {
- file: PathBuf::from(format!("{}.py", module.as_str().replace('.', "/"))),
+ file: PathBuf::from(format!(
+ "{}.py",
+ module.as_str().replace('.', "/")
+ )),
function: Some(func.as_str().to_string()),
command: Some(name.as_str().to_string()),
});
@@ -53,10 +67,12 @@ pub(crate) fn analyze_python_project(
let content = read_file_safe(&pyproject, config.max_file_size)?;
if let Ok(toml_value) = toml::from_str::(&content) {
// Extract build scripts from poetry
- if let Some(scripts) = toml_value.get("tool")
+ if let Some(scripts) = toml_value
+ .get("tool")
.and_then(|t| t.get("poetry"))
.and_then(|p| p.get("scripts"))
- .and_then(|s| s.as_table()) {
+ .and_then(|s| s.as_table())
+ {
for (name, cmd) in scripts {
if let Some(command) = cmd.as_str() {
build_scripts.push(BuildScript {
@@ -133,14 +149,18 @@ fn scan_python_file_for_context(
}
// Check if this is a main entry point
- if content.contains("if __name__ == '__main__':") ||
- content.contains("if __name__ == \"__main__\":") {
+ if content.contains("if __name__ == '__main__':")
+ || content.contains("if __name__ == \"__main__\":")
+ {
entry_points.push(EntryPoint {
file: path.to_path_buf(),
function: Some("main".to_string()),
- command: Some(format!("python {}", path.file_name().unwrap().to_string_lossy())),
+ command: Some(format!(
+ "python {}",
+ path.file_name().unwrap().to_string_lossy()
+ )),
});
}
Ok(())
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/context/language_analyzers/rust.rs b/src/analyzer/context/language_analyzers/rust.rs
index 6c3eb825..2b4b52c3 100644
--- a/src/analyzer/context/language_analyzers/rust.rs
+++ b/src/analyzer/context/language_analyzers/rust.rs
@@ -1,4 +1,6 @@
-use crate::analyzer::{context::helpers::create_regex, AnalysisConfig, BuildScript, EntryPoint, Port, Protocol};
+use crate::analyzer::{
+ AnalysisConfig, BuildScript, EntryPoint, Port, Protocol, context::helpers::create_regex,
+};
use crate::common::file_utils::{is_readable_file, read_file_safe};
use crate::error::Result;
use std::collections::{HashMap, HashSet};
@@ -22,10 +24,13 @@ pub(crate) fn analyze_rust_project(
if let Some(bins) = toml_value.get("bin").and_then(|b| b.as_array()) {
for bin in bins {
if let Some(name) = bin.get("name").and_then(|n| n.as_str()) {
- let path = bin.get("path")
+ let path = bin
+ .get("path")
.and_then(|p| p.as_str())
.map(PathBuf::from)
- .unwrap_or_else(|| root.join("src").join("bin").join(format!("{}.rs", name)));
+ .unwrap_or_else(|| {
+ root.join("src").join("bin").join(format!("{}.rs", name))
+ });
entry_points.push(EntryPoint {
file: path,
@@ -37,9 +42,11 @@ pub(crate) fn analyze_rust_project(
}
// Default binary
- if let Some(_package_name) = toml_value.get("package")
+ if let Some(_package_name) = toml_value
+ .get("package")
.and_then(|p| p.get("name"))
- .and_then(|n| n.as_str()) {
+ .and_then(|n| n.as_str())
+ {
let main_rs = root.join("src").join("main.rs");
if is_readable_file(&main_rs) {
entry_points.push(EntryPoint {
@@ -136,4 +143,4 @@ fn scan_rust_file_for_context(
}
Ok(())
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/context/microservices.rs b/src/analyzer/context/microservices.rs
index 1aadf00c..cd679571 100644
--- a/src/analyzer/context/microservices.rs
+++ b/src/analyzer/context/microservices.rs
@@ -14,7 +14,14 @@ pub(crate) fn detect_microservices_structure(project_root: &Path) -> Result Result Result 1;
+ .count()
+ > 1;
- let has_orchestration_framework = technologies.iter()
+ let has_orchestration_framework = technologies
+ .iter()
.any(|t| t.name == "Encore" || t.name == "Dapr" || t.name == "Temporal");
// Check for web frameworks
- let web_frameworks = ["Express", "Fastify", "Koa", "Next.js", "React", "Vue", "Angular",
- "Django", "Flask", "FastAPI", "Spring Boot", "Actix Web", "Rocket",
- "Gin", "Echo", "Fiber", "Svelte", "SvelteKit", "SolidJS", "Astro",
- "Encore", "Hono", "Elysia", "React Router v7", "Tanstack Start",
- "SolidStart", "Qwik", "Nuxt.js", "Gatsby"];
+ let web_frameworks = [
+ "Express",
+ "Fastify",
+ "Koa",
+ "Next.js",
+ "React",
+ "Vue",
+ "Angular",
+ "Django",
+ "Flask",
+ "FastAPI",
+ "Spring Boot",
+ "Actix Web",
+ "Rocket",
+ "Gin",
+ "Echo",
+ "Fiber",
+ "Svelte",
+ "SvelteKit",
+ "SolidJS",
+ "Astro",
+ "Encore",
+ "Hono",
+ "Elysia",
+ "React Router v7",
+ "Tanstack Start",
+ "SolidStart",
+ "Qwik",
+ "Nuxt.js",
+ "Gatsby",
+ ];
- let has_web_framework = technologies.iter()
+ let has_web_framework = technologies
+ .iter()
.any(|t| web_frameworks.contains(&t.name.as_str()));
// Check for CLI indicators
let cli_indicators = ["cobra", "clap", "argparse", "commander"];
- let has_cli_framework = technologies.iter()
+ let has_cli_framework = technologies
+ .iter()
.any(|t| cli_indicators.contains(&t.name.to_lowercase().as_str()));
// Check for API indicators
- let api_frameworks = ["FastAPI", "Express", "Gin", "Echo", "Actix Web", "Spring Boot",
- "Fastify", "Koa", "Nest.js", "Encore", "Hono", "Elysia"];
- let has_api_framework = technologies.iter()
+ let api_frameworks = [
+ "FastAPI",
+ "Express",
+ "Gin",
+ "Echo",
+ "Actix Web",
+ "Spring Boot",
+ "Fastify",
+ "Koa",
+ "Nest.js",
+ "Encore",
+ "Hono",
+ "Elysia",
+ ];
+ let has_api_framework = technologies
+ .iter()
.any(|t| api_frameworks.contains(&t.name.as_str()));
// Check for static site generators
let static_generators = ["Gatsby", "Hugo", "Jekyll", "Eleventy", "Astro"];
- let has_static_generator = technologies.iter()
+ let has_static_generator = technologies
+ .iter()
.any(|t| static_generators.contains(&t.name.as_str()));
// Determine type based on indicators
- if (has_database_ports || has_multiple_services) && (has_orchestration_framework || has_api_framework) {
+ if (has_database_ports || has_multiple_services)
+ && (has_orchestration_framework || has_api_framework)
+ {
ProjectType::Microservice
} else if has_static_generator {
ProjectType::StaticSite
@@ -87,13 +136,17 @@ fn determine_project_type(
ProjectType::CliTool
} else if entry_points.is_empty() && ports.is_empty() {
// Check if it's a library
- let has_lib_indicators = languages.iter().any(|l| {
- match l.name.as_str() {
- "Rust" => l.files.iter().any(|f| f.to_string_lossy().contains("lib.rs")),
- "Python" => l.files.iter().any(|f| f.to_string_lossy().contains("__init__.py")),
- "JavaScript" | "TypeScript" => l.main_dependencies.is_empty(),
- _ => false,
- }
+ let has_lib_indicators = languages.iter().any(|l| match l.name.as_str() {
+ "Rust" => l
+ .files
+ .iter()
+ .any(|f| f.to_string_lossy().contains("lib.rs")),
+ "Python" => l
+ .files
+ .iter()
+ .any(|f| f.to_string_lossy().contains("__init__.py")),
+ "JavaScript" | "TypeScript" => l.main_dependencies.is_empty(),
+ _ => false,
});
if has_lib_indicators {
@@ -104,4 +157,4 @@ fn determine_project_type(
} else {
ProjectType::Unknown
}
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/context/tech_specific.rs b/src/analyzer/context/tech_specific.rs
index 7b69b2e9..c859861f 100644
--- a/src/analyzer/context/tech_specific.rs
+++ b/src/analyzer/context/tech_specific.rs
@@ -1,7 +1,7 @@
use crate::analyzer::{DetectedTechnology, EntryPoint, Port, Protocol};
use crate::error::Result;
use std::collections::HashSet;
-use std::path::{Path};
+use std::path::Path;
/// Analyzes technology-specific configurations
pub(crate) fn analyze_technology_specifics(
@@ -117,4 +117,4 @@ pub(crate) fn analyze_technology_specifics(
}
Ok(())
-}
\ No newline at end of file
+}
diff --git a/src/analyzer/dclint/config.rs b/src/analyzer/dclint/config.rs
new file mode 100644
index 00000000..6056468d
--- /dev/null
+++ b/src/analyzer/dclint/config.rs
@@ -0,0 +1,423 @@
+//! Configuration for the dclint Docker Compose linter.
+//!
+//! Provides configuration options matching the TypeScript docker-compose-linter:
+//! - Rule-level configuration (off/warn/error)
+//! - Per-rule options
+//! - Global settings (quiet, debug, exclude patterns)
+
+use std::collections::HashMap;
+
+use crate::analyzer::dclint::types::{ConfigLevel, RuleCode, Severity};
+
+/// Configuration for a single rule.
+#[derive(Debug, Clone)]
+pub struct RuleConfig {
+ /// The configuration level (off, warn, error).
+ pub level: ConfigLevel,
+ /// Optional rule-specific options.
+ pub options: HashMap,
+}
+
+impl Default for RuleConfig {
+ fn default() -> Self {
+ Self {
+ level: ConfigLevel::Error,
+ options: HashMap::new(),
+ }
+ }
+}
+
+impl RuleConfig {
+ /// Create a new rule config with the given level.
+ pub fn with_level(level: ConfigLevel) -> Self {
+ Self {
+ level,
+ options: HashMap::new(),
+ }
+ }
+
+ /// Create a rule config that's disabled.
+ pub fn off() -> Self {
+ Self::with_level(ConfigLevel::Off)
+ }
+
+ /// Create a rule config that produces warnings.
+ pub fn warn() -> Self {
+ Self::with_level(ConfigLevel::Warn)
+ }
+
+ /// Create a rule config that produces errors.
+ pub fn error() -> Self {
+ Self::with_level(ConfigLevel::Error)
+ }
+
+ /// Add an option to the rule config.
+ pub fn with_option(mut self, key: impl Into, value: serde_json::Value) -> Self {
+ self.options.insert(key.into(), value);
+ self
+ }
+
+ /// Get an option value.
+ pub fn get_option(&self, key: &str) -> Option<&serde_json::Value> {
+ self.options.get(key)
+ }
+
+ /// Get a boolean option with a default value.
+ pub fn get_bool_option(&self, key: &str, default: bool) -> bool {
+ self.options
+ .get(key)
+ .and_then(|v| v.as_bool())
+ .unwrap_or(default)
+ }
+
+ /// Get a string option.
+ pub fn get_string_option(&self, key: &str) -> Option<&str> {
+ self.options.get(key).and_then(|v| v.as_str())
+ }
+
+ /// Get an array option as a vector of strings.
+ pub fn get_string_array_option(&self, key: &str) -> Vec {
+ self.options
+ .get(key)
+ .and_then(|v| v.as_array())
+ .map(|arr| {
+ arr.iter()
+ .filter_map(|v| v.as_str().map(String::from))
+ .collect()
+ })
+ .unwrap_or_default()
+ }
+}
+
+/// Main configuration for dclint.
+#[derive(Debug, Clone)]
+pub struct DclintConfig {
+ /// Per-rule configuration.
+ pub rules: HashMap,
+ /// Suppress non-error output.
+ pub quiet: bool,
+ /// Enable debug output.
+ pub debug: bool,
+ /// File patterns to exclude from linting.
+ pub exclude: Vec,
+ /// Minimum severity threshold for reporting.
+ pub threshold: Severity,
+ /// Whether to disable pragma (comment-based) ignores.
+ pub disable_ignore_pragma: bool,
+ /// Whether to report fixable issues only.
+ pub fixable_only: bool,
+}
+
+impl Default for DclintConfig {
+ fn default() -> Self {
+ Self {
+ rules: HashMap::new(),
+ quiet: false,
+ debug: false,
+ exclude: Vec::new(),
+ threshold: Severity::Style,
+ disable_ignore_pragma: false,
+ fixable_only: false,
+ }
+ }
+}
+
+impl DclintConfig {
+ /// Create a new default configuration.
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ /// Set quiet mode.
+ pub fn with_quiet(mut self, quiet: bool) -> Self {
+ self.quiet = quiet;
+ self
+ }
+
+ /// Set debug mode.
+ pub fn with_debug(mut self, debug: bool) -> Self {
+ self.debug = debug;
+ self
+ }
+
+ /// Add an exclude pattern.
+ pub fn with_exclude(mut self, pattern: impl Into) -> Self {
+ self.exclude.push(pattern.into());
+ self
+ }
+
+ /// Set multiple exclude patterns.
+ pub fn with_excludes(mut self, patterns: Vec) -> Self {
+ self.exclude = patterns;
+ self
+ }
+
+ /// Set the severity threshold.
+ pub fn with_threshold(mut self, threshold: Severity) -> Self {
+ self.threshold = threshold;
+ self
+ }
+
+ /// Configure a specific rule.
+ pub fn with_rule(mut self, rule: impl Into, config: RuleConfig) -> Self {
+ self.rules.insert(rule.into(), config);
+ self
+ }
+
+ /// Disable a rule.
+ pub fn ignore(mut self, rule: impl Into) -> Self {
+ self.rules.insert(rule.into(), RuleConfig::off());
+ self
+ }
+
+ /// Set a rule to warn level.
+ pub fn warn(mut self, rule: impl Into) -> Self {
+ self.rules.insert(rule.into(), RuleConfig::warn());
+ self
+ }
+
+ /// Set a rule to error level.
+ pub fn error(mut self, rule: impl Into) -> Self {
+ self.rules.insert(rule.into(), RuleConfig::error());
+ self
+ }
+
+ /// Disable pragma (comment-based) ignores.
+ pub fn with_disable_ignore_pragma(mut self, disable: bool) -> Self {
+ self.disable_ignore_pragma = disable;
+ self
+ }
+
+ /// Check if a rule is ignored (disabled).
+ pub fn is_rule_ignored(&self, code: &RuleCode) -> bool {
+ self.rules
+ .get(code.as_str())
+ .map(|c| c.level == ConfigLevel::Off)
+ .unwrap_or(false)
+ }
+
+ /// Get the configuration for a specific rule.
+ pub fn get_rule_config(&self, code: &str) -> Option<&RuleConfig> {
+ self.rules.get(code)
+ }
+
+ /// Get the effective severity for a rule, applying any overrides.
+ pub fn effective_severity(&self, code: &RuleCode, default: Severity) -> Severity {
+ self.rules
+ .get(code.as_str())
+ .and_then(|c| c.level.to_severity())
+ .unwrap_or(default)
+ }
+
+ /// Check if an issue should be reported based on threshold.
+ pub fn should_report(&self, severity: Severity) -> bool {
+ severity >= self.threshold
+ }
+
+ /// Check if a file path should be excluded.
+ pub fn is_excluded(&self, path: &str) -> bool {
+ for pattern in &self.exclude {
+ // Simple glob matching
+ if pattern.contains('*') {
+ let pattern_regex = pattern.replace('.', "\\.").replace('*', ".*");
+ if let Ok(re) = regex::Regex::new(&format!("^{}$", pattern_regex)) {
+ if re.is_match(path) {
+ return true;
+ }
+ }
+ } else if path.contains(pattern) {
+ return true;
+ }
+ }
+ false
+ }
+}
+
+/// Builder for creating DclintConfig from various sources.
+pub struct DclintConfigBuilder {
+ config: DclintConfig,
+}
+
+impl DclintConfigBuilder {
+ pub fn new() -> Self {
+ Self {
+ config: DclintConfig::default(),
+ }
+ }
+
+ /// Load configuration from a JSON value (matching TypeScript config format).
+ pub fn from_json(mut self, json: &serde_json::Value) -> Self {
+ if let Some(rules) = json.get("rules").and_then(|v| v.as_object()) {
+ for (name, value) in rules {
+ let rule_config = match value {
+ // Simple numeric level: 0, 1, or 2
+ serde_json::Value::Number(n) => {
+ if let Some(level) = n.as_u64().and_then(|n| ConfigLevel::from_u8(n as u8))
+ {
+ RuleConfig::with_level(level)
+ } else {
+ continue;
+ }
+ }
+ // Array format: [level, options]
+ serde_json::Value::Array(arr) => {
+ let level = arr
+ .first()
+ .and_then(|v| v.as_u64())
+ .and_then(|n| ConfigLevel::from_u8(n as u8))
+ .unwrap_or(ConfigLevel::Error);
+
+ let mut config = RuleConfig::with_level(level);
+
+ if let Some(opts) = arr.get(1).and_then(|v| v.as_object()) {
+ for (k, v) in opts {
+ config.options.insert(k.clone(), v.clone());
+ }
+ }
+
+ config
+ }
+ _ => continue,
+ };
+
+ self.config.rules.insert(name.clone(), rule_config);
+ }
+ }
+
+ if let Some(quiet) = json.get("quiet").and_then(|v| v.as_bool()) {
+ self.config.quiet = quiet;
+ }
+
+ if let Some(debug) = json.get("debug").and_then(|v| v.as_bool()) {
+ self.config.debug = debug;
+ }
+
+ if let Some(exclude) = json.get("exclude").and_then(|v| v.as_array()) {
+ self.config.exclude = exclude
+ .iter()
+ .filter_map(|v| v.as_str().map(String::from))
+ .collect();
+ }
+
+ self
+ }
+
+ /// Build the final configuration.
+ pub fn build(self) -> DclintConfig {
+ self.config
+ }
+}
+
+impl Default for DclintConfigBuilder {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_default_config() {
+ let config = DclintConfig::default();
+ assert!(!config.quiet);
+ assert!(!config.debug);
+ assert!(config.exclude.is_empty());
+ assert!(config.rules.is_empty());
+ }
+
+ #[test]
+ fn test_rule_config() {
+ let config = DclintConfig::default()
+ .ignore("DCL001")
+ .warn("DCL002")
+ .error("DCL003");
+
+ assert!(config.is_rule_ignored(&RuleCode::new("DCL001")));
+ assert!(!config.is_rule_ignored(&RuleCode::new("DCL002")));
+ assert!(!config.is_rule_ignored(&RuleCode::new("DCL003")));
+ assert!(!config.is_rule_ignored(&RuleCode::new("DCL004"))); // Not configured
+ }
+
+ #[test]
+ fn test_effective_severity() {
+ let config = DclintConfig::default().warn("DCL001").error("DCL002");
+
+ assert_eq!(
+ config.effective_severity(&RuleCode::new("DCL001"), Severity::Error),
+ Severity::Warning
+ );
+ assert_eq!(
+ config.effective_severity(&RuleCode::new("DCL002"), Severity::Warning),
+ Severity::Error
+ );
+ // Non-configured rule uses default
+ assert_eq!(
+ config.effective_severity(&RuleCode::new("DCL003"), Severity::Info),
+ Severity::Info
+ );
+ }
+
+ #[test]
+ fn test_threshold() {
+ let config = DclintConfig::default().with_threshold(Severity::Warning);
+
+ assert!(config.should_report(Severity::Error));
+ assert!(config.should_report(Severity::Warning));
+ assert!(!config.should_report(Severity::Info));
+ assert!(!config.should_report(Severity::Style));
+ }
+
+ #[test]
+ fn test_exclude_patterns() {
+ let config = DclintConfig::default()
+ .with_exclude("node_modules")
+ .with_exclude("*.test.yml");
+
+ assert!(config.is_excluded("path/to/node_modules/file.yml"));
+ assert!(config.is_excluded("docker-compose.test.yml"));
+ assert!(!config.is_excluded("docker-compose.yml"));
+ }
+
+ #[test]
+ fn test_rule_options() {
+ let rule_config = RuleConfig::default()
+ .with_option("checkPullPolicy", serde_json::json!(true))
+ .with_option("pattern", serde_json::json!("^[a-z]+$"));
+
+ assert!(rule_config.get_bool_option("checkPullPolicy", false));
+ assert_eq!(rule_config.get_string_option("pattern"), Some("^[a-z]+$"));
+ assert!(rule_config.get_bool_option("nonexistent", false) == false);
+ }
+
+ #[test]
+ fn test_config_from_json() {
+ let json = serde_json::json!({
+ "rules": {
+ "no-build-and-image": 2,
+ "no-version-field": [1, { "allowEmpty": true }],
+ "services-alphabetical-order": 0
+ },
+ "quiet": true,
+ "exclude": ["*.test.yml"]
+ });
+
+ let config = DclintConfigBuilder::new().from_json(&json).build();
+
+ assert!(config.quiet);
+ assert_eq!(config.exclude, vec!["*.test.yml"]);
+
+ let rule1 = config.get_rule_config("no-build-and-image").unwrap();
+ assert_eq!(rule1.level, ConfigLevel::Error);
+
+ let rule2 = config.get_rule_config("no-version-field").unwrap();
+ assert_eq!(rule2.level, ConfigLevel::Warn);
+ assert!(rule2.get_bool_option("allowEmpty", false));
+
+ let rule3 = config
+ .get_rule_config("services-alphabetical-order")
+ .unwrap();
+ assert_eq!(rule3.level, ConfigLevel::Off);
+ }
+}
diff --git a/src/analyzer/dclint/formatter/github.rs b/src/analyzer/dclint/formatter/github.rs
new file mode 100644
index 00000000..29489d04
--- /dev/null
+++ b/src/analyzer/dclint/formatter/github.rs
@@ -0,0 +1,118 @@
+//! GitHub Actions output formatter for dclint.
+//!
+//! Produces output in GitHub Actions workflow command format:
+//! ::error file={name},line={line},col={col}::{message}
+
+use crate::analyzer::dclint::lint::LintResult;
+use crate::analyzer::dclint::types::Severity;
+
+/// Format lint results for GitHub Actions.
+pub fn format(results: &[LintResult]) -> String {
+ let mut output = String::new();
+
+ for result in results {
+ // Parse errors
+ for err in &result.parse_errors {
+ output.push_str(&format!(
+ "::error file={}::Parse error: {}\n",
+ result.file_path,
+ escape_github(err)
+ ));
+ }
+
+ // Failures
+ for failure in &result.failures {
+ let level = match failure.severity {
+ Severity::Error => "error",
+ Severity::Warning => "warning",
+ Severity::Info | Severity::Style => "notice",
+ };
+
+ output.push_str(&format!(
+ "::{} file={},line={},col={},title={}::{}\n",
+ level,
+ result.file_path,
+ failure.line,
+ failure.column,
+ failure.code,
+ escape_github(&failure.message)
+ ));
+ }
+ }
+
+ output
+}
+
+/// Escape special characters for GitHub Actions.
+fn escape_github(s: &str) -> String {
+ s.replace('%', "%25")
+ .replace('\r', "%0D")
+ .replace('\n', "%0A")
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::analyzer::dclint::types::{CheckFailure, RuleCategory};
+
+ #[test]
+ fn test_github_format() {
+ let mut result = LintResult::new("docker-compose.yml");
+ result.failures.push(CheckFailure::new(
+ "DCL001",
+ "no-build-and-image",
+ Severity::Error,
+ RuleCategory::BestPractice,
+ "Service has both build and image",
+ 5,
+ 1,
+ ));
+
+ let output = format(&[result]);
+ assert!(output.contains("::error"));
+ assert!(output.contains("file=docker-compose.yml"));
+ assert!(output.contains("line=5"));
+ assert!(output.contains("col=1"));
+ assert!(output.contains("title=DCL001"));
+ }
+
+ #[test]
+ fn test_github_format_warning() {
+ let mut result = LintResult::new("docker-compose.yml");
+ result.failures.push(CheckFailure::new(
+ "DCL006",
+ "no-version-field",
+ Severity::Warning,
+ RuleCategory::Style,
+ "Version field is deprecated",
+ 1,
+ 1,
+ ));
+
+ let output = format(&[result]);
+ assert!(output.contains("::warning"));
+ }
+
+ #[test]
+ fn test_github_format_info() {
+ let mut result = LintResult::new("docker-compose.yml");
+ result.failures.push(CheckFailure::new(
+ "DCL007",
+ "require-project-name",
+ Severity::Info,
+ RuleCategory::BestPractice,
+ "Consider adding name field",
+ 1,
+ 1,
+ ));
+
+ let output = format(&[result]);
+ assert!(output.contains("::notice"));
+ }
+
+ #[test]
+ fn test_escape_github() {
+ assert_eq!(escape_github("hello\nworld"), "hello%0Aworld");
+ assert_eq!(escape_github("100%"), "100%25");
+ }
+}
diff --git a/src/analyzer/dclint/formatter/json.rs b/src/analyzer/dclint/formatter/json.rs
new file mode 100644
index 00000000..8dea7f4c
--- /dev/null
+++ b/src/analyzer/dclint/formatter/json.rs
@@ -0,0 +1,99 @@
+//! JSON output formatter for dclint.
+
+use serde_json::json;
+
+use crate::analyzer::dclint::lint::LintResult;
+
+/// Format lint results as JSON.
+pub fn format(results: &[LintResult]) -> String {
+ let output: Vec = results
+ .iter()
+ .map(|result| {
+ let messages: Vec = result
+ .failures
+ .iter()
+ .map(|f| {
+ json!({
+ "ruleId": f.code.as_str(),
+ "ruleName": f.rule_name,
+ "severity": match f.severity {
+ crate::analyzer::dclint::types::Severity::Error => 2,
+ crate::analyzer::dclint::types::Severity::Warning => 1,
+ crate::analyzer::dclint::types::Severity::Info => 0,
+ crate::analyzer::dclint::types::Severity::Style => 0,
+ },
+ "severityName": f.severity.as_str(),
+ "category": f.category.as_str(),
+ "message": f.message,
+ "line": f.line,
+ "column": f.column,
+ "endLine": f.end_line,
+ "endColumn": f.end_column,
+ "fixable": f.fixable,
+ "data": f.data
+ })
+ })
+ .collect();
+
+ json!({
+ "filePath": result.file_path,
+ "messages": messages,
+ "errorCount": result.error_count,
+ "warningCount": result.warning_count,
+ "fixableErrorCount": result.fixable_error_count,
+ "fixableWarningCount": result.fixable_warning_count,
+ "parseErrors": result.parse_errors
+ })
+ })
+ .collect();
+
+ serde_json::to_string_pretty(&output).unwrap_or_else(|_| "[]".to_string())
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::analyzer::dclint::types::{CheckFailure, RuleCategory, Severity};
+
+ #[test]
+ fn test_json_format() {
+ let mut result = LintResult::new("docker-compose.yml");
+ result.failures.push(CheckFailure::new(
+ "DCL001",
+ "no-build-and-image",
+ Severity::Error,
+ RuleCategory::BestPractice,
+ "Test message",
+ 5,
+ 1,
+ ));
+ result.error_count = 1;
+
+ let output = format(&[result]);
+ let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
+
+ assert!(parsed.is_array());
+ let arr = parsed.as_array().unwrap();
+ assert_eq!(arr.len(), 1);
+
+ let file_result = &arr[0];
+ assert_eq!(file_result["filePath"], "docker-compose.yml");
+ assert_eq!(file_result["errorCount"], 1);
+
+ let messages = file_result["messages"].as_array().unwrap();
+ assert_eq!(messages.len(), 1);
+ assert_eq!(messages[0]["ruleId"], "DCL001");
+ assert_eq!(messages[0]["line"], 5);
+ }
+
+ #[test]
+ fn test_json_format_empty() {
+ let result = LintResult::new("docker-compose.yml");
+ let output = format(&[result]);
+ let parsed: serde_json::Value = serde_json::from_str(&output).unwrap();
+
+ let arr = parsed.as_array().unwrap();
+ let messages = arr[0]["messages"].as_array().unwrap();
+ assert!(messages.is_empty());
+ }
+}
diff --git a/src/analyzer/dclint/formatter/mod.rs b/src/analyzer/dclint/formatter/mod.rs
new file mode 100644
index 00000000..519e7750
--- /dev/null
+++ b/src/analyzer/dclint/formatter/mod.rs
@@ -0,0 +1,230 @@
+//! Output formatters for dclint results.
+//!
+//! Provides various output formats for lint results:
+//! - JSON - Machine-readable JSON output
+//! - Stylish - Colored terminal output (default)
+//! - Compact - Single line per issue
+//! - GitHub - GitHub Actions annotations
+//! - CodeClimate - CodeClimate format
+//! - JUnit - JUnit XML format
+
+pub mod github;
+pub mod json;
+pub mod stylish;
+
+use crate::analyzer::dclint::lint::LintResult;
+
+/// Output format for lint results.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
+pub enum OutputFormat {
+ /// JSON format for machine processing
+ Json,
+ /// Stylish colored terminal output (default)
+ #[default]
+ Stylish,
+ /// Single line per issue
+ Compact,
+ /// GitHub Actions annotations
+ GitHub,
+ /// CodeClimate format
+ CodeClimate,
+ /// JUnit XML format
+ JUnit,
+}
+
+impl OutputFormat {
+ /// Parse from string (case-insensitive).
+ pub fn from_str(s: &str) -> Option {
+ match s.to_lowercase().as_str() {
+ "json" => Some(Self::Json),
+ "stylish" => Some(Self::Stylish),
+ "compact" => Some(Self::Compact),
+ "github" | "github-actions" => Some(Self::GitHub),
+ "codeclimate" | "code-climate" => Some(Self::CodeClimate),
+ "junit" => Some(Self::JUnit),
+ _ => None,
+ }
+ }
+}
+
+/// Format lint results according to the specified format.
+pub fn format_results(results: &[LintResult], format: OutputFormat) -> String {
+ match format {
+ OutputFormat::Json => json::format(results),
+ OutputFormat::Stylish => stylish::format(results),
+ OutputFormat::Compact => format_compact(results),
+ OutputFormat::GitHub => github::format(results),
+ OutputFormat::CodeClimate => format_codeclimate(results),
+ OutputFormat::JUnit => format_junit(results),
+ }
+}
+
+/// Format a single result.
+pub fn format_result(result: &LintResult, format: OutputFormat) -> String {
+ format_results(&[result.clone()], format)
+}
+
+/// Format results as a string.
+pub fn format_result_to_string(result: &LintResult, format: OutputFormat) -> String {
+ format_result(result, format)
+}
+
+/// Compact format (one line per issue).
+fn format_compact(results: &[LintResult]) -> String {
+ let mut output = String::new();
+
+ for result in results {
+ for failure in &result.failures {
+ output.push_str(&format!(
+ "{}:{}:{}: {} [{}] {}\n",
+ result.file_path,
+ failure.line,
+ failure.column,
+ failure.severity,
+ failure.code,
+ failure.message
+ ));
+ }
+ }
+
+ output
+}
+
+/// CodeClimate format.
+fn format_codeclimate(results: &[LintResult]) -> String {
+ let mut issues = Vec::new();
+
+ for result in results {
+ for failure in &result.failures {
+ issues.push(serde_json::json!({
+ "type": "issue",
+ "check_name": failure.code.as_str(),
+ "description": failure.message,
+ "content": {
+ "body": failure.message
+ },
+ "categories": [failure.category.as_str()],
+ "location": {
+ "path": result.file_path,
+ "lines": {
+ "begin": failure.line,
+ "end": failure.end_line.unwrap_or(failure.line)
+ }
+ },
+ "severity": match failure.severity {
+ crate::analyzer::dclint::types::Severity::Error => "critical",
+ crate::analyzer::dclint::types::Severity::Warning => "major",
+ crate::analyzer::dclint::types::Severity::Info => "minor",
+ crate::analyzer::dclint::types::Severity::Style => "info",
+ },
+ "fingerprint": format!("{}-{}-{}", failure.code, result.file_path, failure.line)
+ }));
+ }
+ }
+
+ serde_json::to_string_pretty(&issues).unwrap_or_else(|_| "[]".to_string())
+}
+
+/// JUnit XML format.
+fn format_junit(results: &[LintResult]) -> String {
+ let mut output = String::from(r#""#);
+ output.push('\n');
+
+ let total_tests: usize = results.iter().map(|r| r.failures.len().max(1)).sum();
+ let total_failures: usize = results.iter().map(|r| r.failures.len()).sum();
+
+ output.push_str(&format!(
+ r#""#,
+ total_tests, total_failures
+ ));
+ output.push('\n');
+
+ for result in results {
+ if result.failures.is_empty() {
+ output.push_str(&format!(
+ r#" "#,
+ escape_xml(&result.file_path)
+ ));
+ output.push('\n');
+ } else {
+ for failure in &result.failures {
+ output.push_str(&format!(
+ r#" "#,
+ escape_xml(&result.file_path),
+ failure.line,
+ failure.code
+ ));
+ output.push('\n');
+ output.push_str(&format!(
+ r#" "#,
+ escape_xml(&failure.message),
+ failure.severity
+ ));
+ output.push('\n');
+ output.push_str(" \n");
+ }
+ }
+ }
+
+ output.push_str(" \n");
+ output
+}
+
+/// Escape XML special characters.
+fn escape_xml(s: &str) -> String {
+ s.replace('&', "&")
+ .replace('<', "<")
+ .replace('>', ">")
+ .replace('"', """)
+ .replace('\'', "'")
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::analyzer::dclint::types::{CheckFailure, RuleCategory, Severity};
+
+ fn make_result() -> LintResult {
+ let mut result = LintResult::new("docker-compose.yml");
+ result.failures.push(CheckFailure::new(
+ "DCL001",
+ "no-build-and-image",
+ Severity::Error,
+ RuleCategory::BestPractice,
+ "Test message",
+ 5,
+ 1,
+ ));
+ result
+ }
+
+ #[test]
+ fn test_compact_format() {
+ let result = make_result();
+ let output = format_compact(&[result]);
+ assert!(output.contains("docker-compose.yml"));
+ assert!(output.contains("DCL001"));
+ assert!(output.contains("5:1"));
+ }
+
+ #[test]
+ fn test_junit_format() {
+ let result = make_result();
+ let output = format_junit(&[result]);
+ assert!(output.contains(" String {
+ let mut output = String::new();
+ let mut total_errors = 0;
+ let mut total_warnings = 0;
+ let mut total_fixable = 0;
+
+ for result in results {
+ if result.failures.is_empty() && result.parse_errors.is_empty() {
+ continue;
+ }
+
+ // File header
+ output.push_str(&format!("\n{}\n", result.file_path));
+
+ // Parse errors
+ for err in &result.parse_errors {
+ output.push_str(&format!(" error {}\n", err));
+ total_errors += 1;
+ }
+
+ // Failures
+ for failure in &result.failures {
+ let severity_str = match failure.severity {
+ Severity::Error => "error",
+ Severity::Warning => "warning",
+ Severity::Info => "info",
+ Severity::Style => "style",
+ };
+
+ let fixable_str = if failure.fixable { " (fixable)" } else { "" };
+
+ output.push_str(&format!(
+ " {}:{} {} {} {}{}\n",
+ failure.line,
+ failure.column,
+ severity_str,
+ failure.message,
+ failure.code,
+ fixable_str
+ ));
+
+ match failure.severity {
+ Severity::Error => total_errors += 1,
+ Severity::Warning => total_warnings += 1,
+ _ => {}
+ }
+
+ if failure.fixable {
+ total_fixable += 1;
+ }
+ }
+ }
+
+ // Summary
+ if total_errors > 0 || total_warnings > 0 {
+ output.push('\n');
+
+ let mut parts = Vec::new();
+ if total_errors > 0 {
+ parts.push(format!(
+ "{} {}",
+ total_errors,
+ if total_errors == 1 { "error" } else { "errors" }
+ ));
+ }
+ if total_warnings > 0 {
+ parts.push(format!(
+ "{} {}",
+ total_warnings,
+ if total_warnings == 1 {
+ "warning"
+ } else {
+ "warnings"
+ }
+ ));
+ }
+
+ output.push_str(&format!(
+ " {} problem{}\n",
+ parts.join(" and "),
+ if total_errors + total_warnings == 1 {
+ ""
+ } else {
+ "s"
+ }
+ ));
+
+ if total_fixable > 0 {
+ output.push_str(&format!(
+ " {} {} potentially fixable with --fix\n",
+ total_fixable,
+ if total_fixable == 1 { "is" } else { "are" }
+ ));
+ }
+ }
+
+ output
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::analyzer::dclint::types::{CheckFailure, RuleCategory};
+
+ #[test]
+ fn test_stylish_format() {
+ let mut result = LintResult::new("docker-compose.yml");
+ result.failures.push(CheckFailure::new(
+ "DCL001",
+ "no-build-and-image",
+ Severity::Error,
+ RuleCategory::BestPractice,
+ "Service has both build and image",
+ 5,
+ 1,
+ ));
+ result.error_count = 1;
+
+ let output = format(&[result]);
+ assert!(output.contains("docker-compose.yml"));
+ assert!(output.contains("5:1"));
+ assert!(output.contains("error"));
+ assert!(output.contains("DCL001"));
+ assert!(output.contains("1 error"));
+ }
+
+ #[test]
+ fn test_stylish_format_multiple() {
+ let mut result = LintResult::new("docker-compose.yml");
+ result.failures.push(CheckFailure::new(
+ "DCL001",
+ "test",
+ Severity::Error,
+ RuleCategory::BestPractice,
+ "Error 1",
+ 5,
+ 1,
+ ));
+ result.failures.push(
+ CheckFailure::new(
+ "DCL006",
+ "test",
+ Severity::Warning,
+ RuleCategory::Style,
+ "Warning 1",
+ 1,
+ 1,
+ )
+ .with_fixable(true),
+ );
+ result.error_count = 1;
+ result.warning_count = 1;
+
+ let output = format(&[result]);
+ assert!(output.contains("1 error"));
+ assert!(output.contains("1 warning"));
+ assert!(output.contains("fixable"));
+ }
+
+ #[test]
+ fn test_stylish_format_empty() {
+ let result = LintResult::new("docker-compose.yml");
+ let output = format(&[result]);
+ assert!(output.is_empty());
+ }
+}
diff --git a/src/analyzer/dclint/lint.rs b/src/analyzer/dclint/lint.rs
new file mode 100644
index 00000000..8305ea81
--- /dev/null
+++ b/src/analyzer/dclint/lint.rs
@@ -0,0 +1,427 @@
+//! Main linting orchestration for dclint.
+//!
+//! This module ties together parsing, rules, and pragmas to provide
+//! the main linting API.
+
+use std::path::Path;
+
+use crate::analyzer::dclint::config::DclintConfig;
+use crate::analyzer::dclint::parser::{ComposeFile, parse_compose};
+use crate::analyzer::dclint::pragma::{
+ PragmaState, extract_pragmas, starts_with_disable_file_comment,
+};
+use crate::analyzer::dclint::rules::{LintContext, all_rules};
+use crate::analyzer::dclint::types::{CheckFailure, Severity};
+
+/// Result of linting a Docker Compose file.
+#[derive(Debug, Clone)]
+pub struct LintResult {
+ /// The file path that was linted.
+ pub file_path: String,
+ /// Rule violations found.
+ pub failures: Vec,
+ /// Parse errors (if any).
+ pub parse_errors: Vec,
+ /// Number of errors.
+ pub error_count: usize,
+ /// Number of warnings.
+ pub warning_count: usize,
+ /// Number of fixable errors.
+ pub fixable_error_count: usize,
+ /// Number of fixable warnings.
+ pub fixable_warning_count: usize,
+}
+
+impl LintResult {
+ /// Create a new empty result.
+ pub fn new(file_path: impl Into) -> Self {
+ Self {
+ file_path: file_path.into(),
+ failures: Vec::new(),
+ parse_errors: Vec::new(),
+ error_count: 0,
+ warning_count: 0,
+ fixable_error_count: 0,
+ fixable_warning_count: 0,
+ }
+ }
+
+ /// Update counts based on failures.
+ fn update_counts(&mut self) {
+ self.error_count = self
+ .failures
+ .iter()
+ .filter(|f| f.severity == Severity::Error)
+ .count();
+ self.warning_count = self
+ .failures
+ .iter()
+ .filter(|f| f.severity == Severity::Warning)
+ .count();
+ self.fixable_error_count = self
+ .failures
+ .iter()
+ .filter(|f| f.fixable && f.severity == Severity::Error)
+ .count();
+ self.fixable_warning_count = self
+ .failures
+ .iter()
+ .filter(|f| f.fixable && f.severity == Severity::Warning)
+ .count();
+ }
+
+ /// Check if there are any failures.
+ pub fn has_failures(&self) -> bool {
+ !self.failures.is_empty()
+ }
+
+ /// Check if there are any errors (failure with Error severity).
+ pub fn has_errors(&self) -> bool {
+ self.error_count > 0
+ }
+
+ /// Check if there are any warnings (failure with Warning severity).
+ pub fn has_warnings(&self) -> bool {
+ self.warning_count > 0
+ }
+
+ /// Get the maximum severity in the results.
+ pub fn max_severity(&self) -> Option {
+ self.failures.iter().map(|f| f.severity).max()
+ }
+
+ /// Check if the results should cause a non-zero exit.
+ pub fn should_fail(&self, threshold: Severity) -> bool {
+ if let Some(max) = self.max_severity() {
+ max >= threshold
+ } else {
+ false
+ }
+ }
+
+ /// Sort failures by line number.
+ pub fn sort(&mut self) {
+ self.failures.sort();
+ }
+}
+
+/// Lint a Docker Compose file string.
+pub fn lint(content: &str, config: &DclintConfig) -> LintResult {
+ lint_with_path(content, "", config)
+}
+
+/// Lint a Docker Compose file string with a path for error messages.
+pub fn lint_with_path(content: &str, path: &str, config: &DclintConfig) -> LintResult {
+ let mut result = LintResult::new(path);
+
+ // Check for disable-file pragma
+ if !config.disable_ignore_pragma && starts_with_disable_file_comment(content) {
+ return result; // File is completely disabled
+ }
+
+ // Parse the compose file
+ let compose = match parse_compose(content) {
+ Ok(c) => c,
+ Err(err) => {
+ result.parse_errors.push(err.to_string());
+ return result;
+ }
+ };
+
+ // Extract pragmas
+ let pragmas = if config.disable_ignore_pragma {
+ PragmaState::new()
+ } else {
+ extract_pragmas(content)
+ };
+
+ // Run all rules
+ let failures = run_rules(&compose, content, path, config, &pragmas);
+
+ // Apply config filters
+ result.failures = failures
+ .into_iter()
+ .filter(|f| {
+ // Check severity threshold
+ let effective_severity = config.effective_severity(&f.code, f.severity);
+ config.should_report(effective_severity)
+ })
+ .filter(|f| !config.is_rule_ignored(&f.code))
+ .filter(|f| !pragmas.is_ignored(&f.code, f.line))
+ .filter(|f| {
+ // Filter fixable-only if requested
+ if config.fixable_only { f.fixable } else { true }
+ })
+ .map(|mut f| {
+ // Apply severity overrides
+ f.severity = config.effective_severity(&f.code, f.severity);
+ f
+ })
+ .collect();
+
+ // Sort and update counts
+ result.sort();
+ result.update_counts();
+
+ result
+}
+
+/// Lint a Docker Compose file from a file path.
+pub fn lint_file(path: &Path, config: &DclintConfig) -> LintResult {
+ let path_str = path.display().to_string();
+
+ // Check if excluded
+ if config.is_excluded(&path_str) {
+ return LintResult::new(path_str);
+ }
+
+ match std::fs::read_to_string(path) {
+ Ok(content) => lint_with_path(&content, &path_str, config),
+ Err(err) => {
+ let mut result = LintResult::new(path_str);
+ result
+ .parse_errors
+ .push(format!("Failed to read file: {}", err));
+ result
+ }
+ }
+}
+
+/// Run all enabled rules on the compose file.
+fn run_rules(
+ compose: &ComposeFile,
+ source: &str,
+ path: &str,
+ config: &DclintConfig,
+ _pragmas: &PragmaState,
+) -> Vec {
+ let rules = all_rules();
+ let ctx = LintContext::new(compose, source, path);
+ let mut all_failures = Vec::new();
+
+ for rule in rules {
+ // Skip ignored rules
+ if config.is_rule_ignored(rule.code()) {
+ continue;
+ }
+
+ // Run the rule
+ let failures = rule.check(&ctx);
+ all_failures.extend(failures);
+ }
+
+ all_failures
+}
+
+/// Apply auto-fixes to source content.
+pub fn fix_content(content: &str, config: &DclintConfig) -> String {
+ // Check for disable-file pragma
+ if !config.disable_ignore_pragma && starts_with_disable_file_comment(content) {
+ return content.to_string();
+ }
+
+ let rules = all_rules();
+ let mut fixed = content.to_string();
+
+ // Apply fixes from all fixable rules
+ for rule in rules {
+ if rule.is_fixable() && !config.is_rule_ignored(rule.code()) {
+ if let Some(new_content) = rule.fix(&fixed) {
+ fixed = new_content;
+ }
+ }
+ }
+
+ fixed
+}
+
+/// Apply auto-fixes to a file.
+pub fn fix_file(
+ path: &Path,
+ config: &DclintConfig,
+ dry_run: bool,
+) -> Result, String> {
+ let path_str = path.display().to_string();
+
+ // Check if excluded
+ if config.is_excluded(&path_str) {
+ return Ok(None);
+ }
+
+ let content =
+ std::fs::read_to_string(path).map_err(|e| format!("Failed to read file: {}", e))?;
+
+ let fixed = fix_content(&content, config);
+
+ if fixed == content {
+ return Ok(None); // No changes
+ }
+
+ if !dry_run {
+ std::fs::write(path, &fixed).map_err(|e| format!("Failed to write file: {}", e))?;
+ }
+
+ Ok(Some(fixed))
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_lint_empty() {
+ let result = lint("", &DclintConfig::default());
+ // Empty content should fail to parse or have no services
+ assert!(result.failures.is_empty() || !result.parse_errors.is_empty());
+ }
+
+ #[test]
+ fn test_lint_valid_compose() {
+ let yaml = r#"
+name: myproject
+services:
+ web:
+ image: nginx:1.25
+ ports:
+ - "8080:80"
+"#;
+ let result = lint(yaml, &DclintConfig::default());
+ assert!(result.parse_errors.is_empty());
+ // May have some style warnings
+ }
+
+ #[test]
+ fn test_lint_with_violations() {
+ let yaml = r#"
+services:
+ web:
+ build: .
+ image: nginx:latest
+"#;
+ let result = lint(yaml, &DclintConfig::default());
+ assert!(result.parse_errors.is_empty());
+
+ // Should catch DCL001 (build+image) and DCL011 (latest tag)
+ let codes: Vec<&str> = result.failures.iter().map(|f| f.code.as_str()).collect();
+ assert!(
+ codes.contains(&"DCL001"),
+ "Should detect build+image violation"
+ );
+ }
+
+ #[test]
+ fn test_lint_with_ignore() {
+ let yaml = r#"
+services:
+ web:
+ build: .
+ image: nginx:latest
+"#;
+ let config = DclintConfig::default().ignore("DCL001");
+ let result = lint(yaml, &config);
+
+ // DCL001 should be ignored
+ let codes: Vec<&str> = result.failures.iter().map(|f| f.code.as_str()).collect();
+ assert!(!codes.contains(&"DCL001"));
+ }
+
+ #[test]
+ fn test_lint_with_pragma_ignore() {
+ let yaml = r#"
+# dclint-disable DCL001
+services:
+ web:
+ build: .
+ image: nginx:latest
+"#;
+ let result = lint(yaml, &DclintConfig::default());
+
+ // DCL001 should be ignored via pragma
+ let codes: Vec<&str> = result.failures.iter().map(|f| f.code.as_str()).collect();
+ assert!(!codes.contains(&"DCL001"));
+ }
+
+ #[test]
+ fn test_lint_disable_file() {
+ let yaml = r#"
+# dclint-disable-file
+services:
+ web:
+ build: .
+ image: nginx:latest
+"#;
+ let result = lint(yaml, &DclintConfig::default());
+
+ // All rules disabled for file
+ assert!(result.failures.is_empty());
+ }
+
+ #[test]
+ fn test_counts() {
+ let yaml = r#"
+services:
+ web:
+ build: .
+ image: nginx:latest
+ db:
+ image: postgres
+"#;
+ let result = lint(yaml, &DclintConfig::default());
+
+ // Should have at least one error (DCL001) and some warnings
+ assert!(result.error_count + result.warning_count > 0);
+ }
+
+ #[test]
+ fn test_fix_content() {
+ let yaml = r#"version: "3.8"
+
+services:
+ web:
+ image: nginx
+"#;
+ let config = DclintConfig::default();
+ let fixed = fix_content(yaml, &config);
+
+ // DCL006 fix should remove version field
+ assert!(!fixed.contains("version"));
+ }
+
+ #[test]
+ fn test_result_sort() {
+ let mut result = LintResult::new("test.yml");
+ result.failures.push(CheckFailure::new(
+ "DCL001",
+ "test",
+ Severity::Error,
+ crate::analyzer::dclint::types::RuleCategory::BestPractice,
+ "msg",
+ 10,
+ 1,
+ ));
+ result.failures.push(CheckFailure::new(
+ "DCL002",
+ "test",
+ Severity::Warning,
+ crate::analyzer::dclint::types::RuleCategory::Style,
+ "msg",
+ 5,
+ 1,
+ ));
+ result.failures.push(CheckFailure::new(
+ "DCL003",
+ "test",
+ Severity::Info,
+ crate::analyzer::dclint::types::RuleCategory::Style,
+ "msg",
+ 1,
+ 1,
+ ));
+
+ result.sort();
+
+ assert_eq!(result.failures[0].line, 1);
+ assert_eq!(result.failures[1].line, 5);
+ assert_eq!(result.failures[2].line, 10);
+ }
+}
diff --git a/src/analyzer/dclint/mod.rs b/src/analyzer/dclint/mod.rs
new file mode 100644
index 00000000..761526fe
--- /dev/null
+++ b/src/analyzer/dclint/mod.rs
@@ -0,0 +1,129 @@
+//! Dclint-RS: Native Rust Docker Compose Linter
+//!
+//! A Rust translation of the docker-compose-linter project.
+//!
+//! # Attribution
+//!
+//! This module is a derivative work based on [docker-compose-linter](https://github.com/zavoloklom/docker-compose-linter),
+//! originally written in TypeScript by Sergey Kupletsky.
+//!
+//! **Original Project:**
+//! **Original License:** MIT
+//!
+//! # Features
+//!
+//! - Docker Compose YAML parsing with position tracking
+//! - 15 configurable linting rules (DCL001-DCL015)
+//! - Auto-fix capability for 8 rules
+//! - Multiple output formats (JSON, Stylish, GitHub Actions, etc.)
+//! - Comment-based rule disabling
+//!
+//! # Example
+//!
+//! ```rust,ignore
+//! use syncable_cli::analyzer::dclint::{lint, DclintConfig, LintResult};
+//!
+//! let compose = r#"
+//! services:
+//! web:
+//! image: nginx:latest
+//! ports:
+//! - "8080:80"
+//! "#;
+//!
+//! let config = DclintConfig::default();
+//! let result = lint(compose, &config);
+//!
+//! for failure in result.failures {
+//! println!("{}: {} - {}", failure.line, failure.code, failure.message);
+//! }
+//! ```
+//!
+//! # Rules
+//!
+//! | Code | Name | Fixable | Description |
+//! |--------|-----------------------------------------|---------|------------------------------------------------|
+//! | DCL001 | no-build-and-image | No | Service cannot have both build and image |
+//! | DCL002 | no-duplicate-container-names | No | Container names must be unique |
+//! | DCL003 | no-duplicate-exported-ports | No | Exported ports must be unique |
+//! | DCL004 | no-quotes-in-volumes | Yes | Volume paths should not be quoted |
+//! | DCL005 | no-unbound-port-interfaces | No | Ports should bind to specific interface |
+//! | DCL006 | no-version-field | Yes | Version field is deprecated |
+//! | DCL007 | require-project-name-field | No | Require top-level name field |
+//! | DCL008 | require-quotes-in-ports | Yes | Port mappings should be quoted |
+//! | DCL009 | service-container-name-regex | No | Container name format validation |
+//! | DCL010 | service-dependencies-alphabetical-order | Yes | Sort depends_on alphabetically |
+//! | DCL011 | service-image-require-explicit-tag | No | Images need explicit tags |
+//! | DCL012 | service-keys-order | Yes | Service keys in standard order |
+//! | DCL013 | service-ports-alphabetical-order | Yes | Sort ports alphabetically |
+//! | DCL014 | services-alphabetical-order | Yes | Sort services alphabetically |
+//! | DCL015 | top-level-properties-order | Yes | Top-level keys in standard order |
+
+pub mod config;
+pub mod formatter;
+pub mod lint;
+pub mod parser;
+pub mod pragma;
+pub mod rules;
+pub mod types;
+
+// Re-export main types and functions
+pub use config::DclintConfig;
+pub use formatter::{OutputFormat, format_result, format_result_to_string, format_results};
+pub use lint::{LintResult, fix_content, fix_file, lint, lint_file, lint_with_path};
+pub use types::{CheckFailure, ConfigLevel, RuleCategory, RuleCode, RuleMeta, Severity};
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_lint_basic() {
+ let yaml = r#"
+services:
+ web:
+ image: nginx:1.25
+"#;
+ let result = lint(yaml, &DclintConfig::default());
+ assert!(result.parse_errors.is_empty());
+ }
+
+ #[test]
+ fn test_lint_with_errors() {
+ let yaml = r#"
+services:
+ web:
+ build: .
+ image: nginx
+"#;
+ let result = lint(yaml, &DclintConfig::default());
+ assert!(result.parse_errors.is_empty());
+ // Should catch DCL001 and DCL011
+ assert!(result.failures.iter().any(|f| f.code.as_str() == "DCL001"));
+ }
+
+ #[test]
+ fn test_config_ignore() {
+ let yaml = r#"
+services:
+ web:
+ build: .
+ image: nginx
+"#;
+ let config = DclintConfig::default().ignore("DCL001");
+ let result = lint(yaml, &config);
+ assert!(!result.failures.iter().any(|f| f.code.as_str() == "DCL001"));
+ }
+
+ #[test]
+ fn test_format_json() {
+ let yaml = r#"
+services:
+ web:
+ image: nginx
+"#;
+ let result = lint(yaml, &DclintConfig::default());
+ let output = format_result(&result, OutputFormat::Json);
+ assert!(output.contains("filePath"));
+ }
+}
diff --git a/src/analyzer/dclint/parser/compose.rs b/src/analyzer/dclint/parser/compose.rs
new file mode 100644
index 00000000..009254e6
--- /dev/null
+++ b/src/analyzer/dclint/parser/compose.rs
@@ -0,0 +1,779 @@
+//! Docker Compose file structure types.
+//!
+//! Defines the structure of a docker-compose.yaml file with support for
+//! position tracking.
+
+use std::collections::HashMap;
+use yaml_rust2::{Yaml, YamlLoader};
+
+/// Error type for parsing.
+#[derive(Debug, Clone, thiserror::Error)]
+pub enum ParseError {
+ #[error("YAML parse error: {0}")]
+ YamlError(String),
+ #[error("Empty document")]
+ EmptyDocument,
+ #[error("Invalid structure: {0}")]
+ InvalidStructure(String),
+}
+
+/// Position in the source file.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
+pub struct Position {
+ pub line: u32,
+ pub column: u32,
+}
+
+impl Position {
+ pub fn new(line: u32, column: u32) -> Self {
+ Self { line, column }
+ }
+}
+
+/// Parsed Docker Compose file.
+#[derive(Debug, Clone, Default)]
+pub struct ComposeFile {
+ /// The deprecated `version` field.
+ pub version: Option,
+ /// Position of the version field.
+ pub version_pos: Option,
+ /// The `name` field (project name).
+ pub name: Option,
+ /// Position of the name field.
+ pub name_pos: Option,
+ /// Services defined in the compose file.
+ pub services: HashMap,
+ /// Position of the services section.
+ pub services_pos: Option,
+ /// Networks defined in the compose file.
+ pub networks: HashMap,
+ /// Volumes defined in the compose file.
+ pub volumes: HashMap,
+ /// Configs defined in the compose file.
+ pub configs: HashMap,
+ /// Secrets defined in the compose file.
+ pub secrets: HashMap,
+ /// Top-level key order (for ordering rules).
+ pub top_level_keys: Vec,
+ /// Raw source content for position lookups.
+ pub source: String,
+}
+
+/// A service definition.
+#[derive(Debug, Clone, Default)]
+pub struct Service {
+ /// Service name.
+ pub name: String,
+ /// Position of the service definition.
+ pub position: Position,
+ /// The image to use.
+ pub image: Option,
+ /// Position of the image field.
+ pub image_pos: Option,
+ /// Build configuration.
+ pub build: Option,
+ /// Position of the build field.
+ pub build_pos: Option,
+ /// Container name.
+ pub container_name: Option,
+ /// Position of the container_name field.
+ pub container_name_pos: Option,
+ /// Port mappings.
+ pub ports: Vec,
+ /// Position of the ports field.
+ pub ports_pos: Option,
+ /// Volume mounts.
+ pub volumes: Vec,
+ /// Position of the volumes field.
+ pub volumes_pos: Option,
+ /// Service dependencies.
+ pub depends_on: Vec,
+ /// Position of the depends_on field.
+ pub depends_on_pos: Option,
+ /// Environment variables.
+ pub environment: HashMap,
+ /// Pull policy (for build+image combinations).
+ pub pull_policy: Option,
+ /// All keys in this service (for ordering rules).
+ pub keys: Vec,
+ /// Raw YAML for this service.
+ pub raw: Option,
+}
+
+/// Build configuration for a service.
+#[derive(Debug, Clone)]
+pub enum ServiceBuild {
+ /// Simple build context path.
+ Simple(String),
+ /// Extended build configuration.
+ Extended {
+ context: Option,
+ dockerfile: Option,
+ args: HashMap,
+ target: Option,
+ },
+}
+
+impl Default for ServiceBuild {
+ fn default() -> Self {
+ Self::Simple(".".to_string())
+ }
+}
+
+/// Port mapping for a service.
+#[derive(Debug, Clone)]
+pub struct ServicePort {
+ /// Raw port string (e.g., "8080:80" or "80").
+ pub raw: String,
+ /// Position in the source.
+ pub position: Position,
+ /// Whether the port is quoted in source.
+ pub is_quoted: bool,
+ /// Host port (if specified).
+ pub host_port: Option,
+ /// Container port.
+ pub container_port: u16,
+ /// Host IP binding (e.g., "127.0.0.1").
+ pub host_ip: Option,
+ /// Protocol (tcp/udp).
+ pub protocol: Option,
+}
+
+impl ServicePort {
+ /// Parse a port string.
+ pub fn parse(raw: &str, position: Position, is_quoted: bool) -> Option {
+ let raw = raw.trim();
+ if raw.is_empty() {
+ return None;
+ }
+
+ // Handle protocol suffix
+ let (port_part, protocol) = if raw.contains('/') {
+ let parts: Vec<&str> = raw.rsplitn(2, '/').collect();
+ (parts[1], Some(parts[0].to_string()))
+ } else {
+ (raw, None)
+ };
+
+ // Handle different formats:
+ // - "80" (container only)
+ // - "8080:80" (host:container)
+ // - "127.0.0.1:8080:80" (ip:host:container)
+ // - "80-90:80-90" (range)
+ let parts: Vec<&str> = port_part.split(':').collect();
+
+ let (host_ip, host_port, container_port) = match parts.len() {
+ 1 => {
+ // Just container port
+ let cp = parts[0].parse().ok()?;
+ (None, None, cp)
+ }
+ 2 => {
+ // host:container
+ let hp = parts[0].parse().ok();
+ let cp = parts[1].parse().ok()?;
+ (None, hp, cp)
+ }
+ 3 => {
+ // ip:host:container
+ let ip = Some(parts[0].to_string());
+ let hp = parts[1].parse().ok();
+ let cp = parts[2].parse().ok()?;
+ (ip, hp, cp)
+ }
+ _ => return None,
+ };
+
+ Some(Self {
+ raw: raw.to_string(),
+ position,
+ is_quoted,
+ host_port,
+ container_port,
+ host_ip,
+ protocol,
+ })
+ }
+
+ /// Check if this port has an explicit host binding interface.
+ pub fn has_explicit_interface(&self) -> bool {
+ self.host_ip.is_some()
+ }
+
+ /// Get the exported port (for duplicate checking).
+ pub fn exported_port(&self) -> Option {
+ self.host_port.map(|p| {
+ if let Some(ip) = &self.host_ip {
+ format!("{}:{}", ip, p)
+ } else {
+ p.to_string()
+ }
+ })
+ }
+}
+
+/// Volume mount for a service.
+#[derive(Debug, Clone)]
+pub struct ServiceVolume {
+ /// Raw volume string.
+ pub raw: String,
+ /// Position in the source.
+ pub position: Position,
+ /// Whether the volume is quoted in source.
+ pub is_quoted: bool,
+ /// Source path or volume name.
+ pub source: Option,
+ /// Target mount path in container.
+ pub target: String,
+ /// Mount options (ro, rw, etc.).
+ pub options: Option,
+}
+
+impl ServiceVolume {
+ /// Parse a volume string.
+ pub fn parse(raw: &str, position: Position, is_quoted: bool) -> Option {
+ let raw = raw.trim();
+ if raw.is_empty() {
+ return None;
+ }
+
+ // Handle different formats:
+ // - "/path" (anonymous volume at path)
+ // - "name:/path" (named volume)
+ // - "/host:/container" (bind mount)
+ // - "/host:/container:ro" (bind mount with options)
+ let parts: Vec<&str> = raw.splitn(3, ':').collect();
+
+ let (source, target, options) = match parts.len() {
+ 1 => (None, parts[0].to_string(), None),
+ 2 => (Some(parts[0].to_string()), parts[1].to_string(), None),
+ 3 => (
+ Some(parts[0].to_string()),
+ parts[1].to_string(),
+ Some(parts[2].to_string()),
+ ),
+ _ => return None,
+ };
+
+ Some(Self {
+ raw: raw.to_string(),
+ position,
+ is_quoted,
+ source,
+ target,
+ options,
+ })
+ }
+}
+
+/// Parse a Docker Compose file from a string.
+pub fn parse_compose(content: &str) -> Result {
+ parse_compose_with_positions(content)
+}
+
+/// Parse a Docker Compose file with position tracking.
+pub fn parse_compose_with_positions(content: &str) -> Result {
+ let docs =
+ YamlLoader::load_from_str(content).map_err(|e| ParseError::YamlError(e.to_string()))?;
+
+ let doc = docs.into_iter().next().ok_or(ParseError::EmptyDocument)?;
+
+ let hash = match &doc {
+ Yaml::Hash(h) => h,
+ _ => {
+ return Err(ParseError::InvalidStructure(
+ "Root must be a mapping".to_string(),
+ ));
+ }
+ };
+
+ let mut compose = ComposeFile {
+ source: content.to_string(),
+ ..Default::default()
+ };
+
+ // Track top-level key order
+ for (key, _) in hash {
+ if let Yaml::String(k) = key {
+ compose.top_level_keys.push(k.clone());
+ }
+ }
+
+ // Parse version
+ if let Some(Yaml::String(version)) = hash.get(&Yaml::String("version".to_string())) {
+ compose.version = Some(version.clone());
+ compose.version_pos =
+ super::find_line_for_key(content, &["version"]).map(|l| Position::new(l, 1));
+ }
+
+ // Parse name
+ if let Some(Yaml::String(name)) = hash.get(&Yaml::String("name".to_string())) {
+ compose.name = Some(name.clone());
+ compose.name_pos =
+ super::find_line_for_key(content, &["name"]).map(|l| Position::new(l, 1));
+ }
+
+ // Parse services
+ if let Some(Yaml::Hash(services)) = hash.get(&Yaml::String("services".to_string())) {
+ compose.services_pos =
+ super::find_line_for_key(content, &["services"]).map(|l| Position::new(l, 1));
+
+ for (name_yaml, service_yaml) in services {
+ if let Yaml::String(name) = name_yaml {
+ let service = parse_service(name, service_yaml, content)?;
+ compose.services.insert(name.clone(), service);
+ }
+ }
+ }
+
+ // Parse networks (as raw JSON for now)
+ if let Some(Yaml::Hash(networks)) = hash.get(&Yaml::String("networks".to_string())) {
+ for (name_yaml, value_yaml) in networks {
+ if let Yaml::String(name) = name_yaml {
+ compose
+ .networks
+ .insert(name.clone(), yaml_to_json(value_yaml));
+ }
+ }
+ }
+
+ // Parse volumes (as raw JSON for now)
+ if let Some(Yaml::Hash(volumes)) = hash.get(&Yaml::String("volumes".to_string())) {
+ for (name_yaml, value_yaml) in volumes {
+ if let Yaml::String(name) = name_yaml {
+ compose
+ .volumes
+ .insert(name.clone(), yaml_to_json(value_yaml));
+ }
+ }
+ }
+
+ Ok(compose)
+}
+
+/// Parse a service definition.
+fn parse_service(name: &str, yaml: &Yaml, source: &str) -> Result {
+ let hash = match yaml {
+ Yaml::Hash(h) => h,
+ Yaml::Null => {
+ return Ok(Service {
+ name: name.to_string(),
+ ..Default::default()
+ });
+ }
+ _ => {
+ return Err(ParseError::InvalidStructure(format!(
+ "Service '{}' must be a mapping",
+ name
+ )));
+ }
+ };
+
+ let position = super::find_line_for_service(source, name)
+ .map(|l| Position::new(l, 1))
+ .unwrap_or_default();
+
+ let mut service = Service {
+ name: name.to_string(),
+ position,
+ raw: Some(yaml.clone()),
+ ..Default::default()
+ };
+
+ // Track key order
+ for (key, _) in hash {
+ if let Yaml::String(k) = key {
+ service.keys.push(k.clone());
+ }
+ }
+
+ // Parse image
+ if let Some(Yaml::String(image)) = hash.get(&Yaml::String("image".to_string())) {
+ service.image = Some(image.clone());
+ service.image_pos =
+ super::find_line_for_service_key(source, name, "image").map(|l| Position::new(l, 1));
+ }
+
+ // Parse build
+ if let Some(build_yaml) = hash.get(&Yaml::String("build".to_string())) {
+ service.build_pos =
+ super::find_line_for_service_key(source, name, "build").map(|l| Position::new(l, 1));
+
+ service.build = Some(match build_yaml {
+ Yaml::String(s) => ServiceBuild::Simple(s.clone()),
+ Yaml::Hash(h) => {
+ let context = h
+ .get(&Yaml::String("context".to_string()))
+ .and_then(|v| match v {
+ Yaml::String(s) => Some(s.clone()),
+ _ => None,
+ });
+ let dockerfile =
+ h.get(&Yaml::String("dockerfile".to_string()))
+ .and_then(|v| match v {
+ Yaml::String(s) => Some(s.clone()),
+ _ => None,
+ });
+ let target = h
+ .get(&Yaml::String("target".to_string()))
+ .and_then(|v| match v {
+ Yaml::String(s) => Some(s.clone()),
+ _ => None,
+ });
+
+ ServiceBuild::Extended {
+ context,
+ dockerfile,
+ args: HashMap::new(),
+ target,
+ }
+ }
+ _ => ServiceBuild::Simple(".".to_string()),
+ });
+ }
+
+ // Parse container_name
+ if let Some(Yaml::String(container_name)) =
+ hash.get(&Yaml::String("container_name".to_string()))
+ {
+ service.container_name = Some(container_name.clone());
+ service.container_name_pos =
+ super::find_line_for_service_key(source, name, "container_name")
+ .map(|l| Position::new(l, 1));
+ }
+
+ // Parse ports
+ if let Some(Yaml::Array(ports)) = hash.get(&Yaml::String("ports".to_string())) {
+ service.ports_pos =
+ super::find_line_for_service_key(source, name, "ports").map(|l| Position::new(l, 1));
+
+ let ports_start_line = service.ports_pos.map(|p| p.line).unwrap_or(1);
+
+ for (idx, port_yaml) in ports.iter().enumerate() {
+ let line = ports_start_line + 1 + idx as u32;
+ let position = Position::new(line, 1);
+
+ match port_yaml {
+ Yaml::String(s) => {
+ // Check if quoted in source
+ let is_quoted = is_value_quoted_at_line(source, line);
+ if let Some(port) = ServicePort::parse(s, position, is_quoted) {
+ service.ports.push(port);
+ }
+ }
+ Yaml::Integer(i) => {
+ // Integer ports are unquoted
+ let raw = i.to_string();
+ if let Some(port) = ServicePort::parse(&raw, position, false) {
+ service.ports.push(port);
+ }
+ }
+ Yaml::Hash(h) => {
+ // Long syntax port
+ let target = h
+ .get(&Yaml::String("target".to_string()))
+ .and_then(|v| match v {
+ Yaml::Integer(i) => Some(*i as u16),
+ Yaml::String(s) => s.parse().ok(),
+ _ => None,
+ });
+ let published =
+ h.get(&Yaml::String("published".to_string()))
+ .and_then(|v| match v {
+ Yaml::Integer(i) => Some(*i as u16),
+ Yaml::String(s) => s.parse().ok(),
+ _ => None,
+ });
+ let host_ip =
+ h.get(&Yaml::String("host_ip".to_string()))
+ .and_then(|v| match v {
+ Yaml::String(s) => Some(s.clone()),
+ _ => None,
+ });
+
+ if let Some(container_port) = target {
+ service.ports.push(ServicePort {
+ raw: format!(
+ "{}:{}",
+ published.unwrap_or(container_port),
+ container_port
+ ),
+ position,
+ is_quoted: false,
+ host_port: published,
+ container_port,
+ host_ip,
+ protocol: None,
+ });
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+
+ // Parse volumes
+ if let Some(Yaml::Array(volumes)) = hash.get(&Yaml::String("volumes".to_string())) {
+ service.volumes_pos =
+ super::find_line_for_service_key(source, name, "volumes").map(|l| Position::new(l, 1));
+
+ let volumes_start_line = service.volumes_pos.map(|p| p.line).unwrap_or(1);
+
+ for (idx, vol_yaml) in volumes.iter().enumerate() {
+ let line = volumes_start_line + 1 + idx as u32;
+ let position = Position::new(line, 1);
+
+ if let Yaml::String(s) = vol_yaml {
+ let is_quoted = is_value_quoted_at_line(source, line);
+ if let Some(vol) = ServiceVolume::parse(s, position, is_quoted) {
+ service.volumes.push(vol);
+ }
+ }
+ }
+ }
+
+ // Parse depends_on
+ if let Some(depends_on_yaml) = hash.get(&Yaml::String("depends_on".to_string())) {
+ service.depends_on_pos = super::find_line_for_service_key(source, name, "depends_on")
+ .map(|l| Position::new(l, 1));
+
+ match depends_on_yaml {
+ Yaml::Array(arr) => {
+ for dep in arr {
+ if let Yaml::String(s) = dep {
+ service.depends_on.push(s.clone());
+ }
+ }
+ }
+ Yaml::Hash(h) => {
+ // Long syntax: depends_on: { db: { condition: service_healthy } }
+ for (dep_name, _) in h {
+ if let Yaml::String(s) = dep_name {
+ service.depends_on.push(s.clone());
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ // Parse environment
+ if let Some(env_yaml) = hash.get(&Yaml::String("environment".to_string())) {
+ match env_yaml {
+ Yaml::Hash(h) => {
+ for (key, value) in h {
+ if let (Yaml::String(k), v) = (key, value) {
+ let val = match v {
+ Yaml::String(s) => s.clone(),
+ Yaml::Integer(i) => i.to_string(),
+ Yaml::Boolean(b) => b.to_string(),
+ Yaml::Null => String::new(),
+ _ => continue,
+ };
+ service.environment.insert(k.clone(), val);
+ }
+ }
+ }
+ Yaml::Array(arr) => {
+ for item in arr {
+ if let Yaml::String(s) = item {
+ if let Some((k, v)) = s.split_once('=') {
+ service.environment.insert(k.to_string(), v.to_string());
+ } else {
+ service.environment.insert(s.clone(), String::new());
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ // Parse pull_policy
+ if let Some(Yaml::String(pull_policy)) = hash.get(&Yaml::String("pull_policy".to_string())) {
+ service.pull_policy = Some(pull_policy.clone());
+ }
+
+ Ok(service)
+}
+
+/// Check if a value at a given line is quoted in the source.
+fn is_value_quoted_at_line(source: &str, line: u32) -> bool {
+ let lines: Vec<&str> = source.lines().collect();
+ if let Some(line_content) = lines.get((line - 1) as usize) {
+ let trimmed = line_content.trim();
+ // Check for list item with quoted value
+ if trimmed.starts_with('-') {
+ let after_dash = trimmed.trim_start_matches('-').trim();
+ return after_dash.starts_with('"') || after_dash.starts_with('\'');
+ }
+ // Check for key: value with quoted value
+ if let Some(pos) = trimmed.find(':') {
+ let after_colon = trimmed[pos + 1..].trim();
+ return after_colon.starts_with('"') || after_colon.starts_with('\'');
+ }
+ }
+ false
+}
+
+/// Convert a YAML value to JSON (for raw storage).
+fn yaml_to_json(yaml: &Yaml) -> serde_json::Value {
+ match yaml {
+ Yaml::Null => serde_json::Value::Null,
+ Yaml::Boolean(b) => serde_json::Value::Bool(*b),
+ Yaml::Integer(i) => serde_json::json!(i),
+ Yaml::Real(r) => {
+ if let Ok(f) = r.parse::() {
+ serde_json::json!(f)
+ } else {
+ serde_json::Value::String(r.clone())
+ }
+ }
+ Yaml::String(s) => serde_json::Value::String(s.clone()),
+ Yaml::Array(arr) => serde_json::Value::Array(arr.iter().map(yaml_to_json).collect()),
+ Yaml::Hash(h) => {
+ let mut map = serde_json::Map::new();
+ for (k, v) in h {
+ if let Yaml::String(key) = k {
+ map.insert(key.clone(), yaml_to_json(v));
+ }
+ }
+ serde_json::Value::Object(map)
+ }
+ _ => serde_json::Value::Null,
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_parse_simple_compose() {
+ let yaml = r#"
+version: "3.8"
+name: myproject
+services:
+ web:
+ image: nginx:latest
+ ports:
+ - "8080:80"
+ db:
+ image: postgres:15
+"#;
+
+ let compose = parse_compose(yaml).unwrap();
+ assert_eq!(compose.version, Some("3.8".to_string()));
+ assert_eq!(compose.name, Some("myproject".to_string()));
+ assert_eq!(compose.services.len(), 2);
+
+ let web = compose.services.get("web").unwrap();
+ assert_eq!(web.image, Some("nginx:latest".to_string()));
+ assert_eq!(web.ports.len(), 1);
+ assert_eq!(web.ports[0].container_port, 80);
+ assert_eq!(web.ports[0].host_port, Some(8080));
+ }
+
+ #[test]
+ fn test_parse_build_and_image() {
+ let yaml = r#"
+services:
+ app:
+ build: .
+ image: myapp:latest
+"#;
+
+ let compose = parse_compose(yaml).unwrap();
+ let app = compose.services.get("app").unwrap();
+ assert!(app.build.is_some());
+ assert!(app.image.is_some());
+ }
+
+ #[test]
+ fn test_parse_port_formats() {
+ let yaml = r#"
+services:
+ web:
+ image: nginx
+ ports:
+ - 80
+ - "8080:80"
+ - "127.0.0.1:8081:80"
+"#;
+
+ let compose = parse_compose(yaml).unwrap();
+ let web = compose.services.get("web").unwrap();
+ assert_eq!(web.ports.len(), 3);
+
+ assert_eq!(web.ports[0].container_port, 80);
+ assert_eq!(web.ports[0].host_port, None);
+
+ assert_eq!(web.ports[1].container_port, 80);
+ assert_eq!(web.ports[1].host_port, Some(8080));
+
+ assert_eq!(web.ports[2].container_port, 80);
+ assert_eq!(web.ports[2].host_port, Some(8081));
+ assert_eq!(web.ports[2].host_ip, Some("127.0.0.1".to_string()));
+ }
+
+ #[test]
+ fn test_parse_depends_on() {
+ let yaml = r#"
+services:
+ web:
+ image: nginx
+ depends_on:
+ - db
+ - redis
+ db:
+ image: postgres
+ redis:
+ image: redis
+"#;
+
+ let compose = parse_compose(yaml).unwrap();
+ let web = compose.services.get("web").unwrap();
+ assert_eq!(web.depends_on, vec!["db", "redis"]);
+ }
+
+ #[test]
+ fn test_port_parsing() {
+ let pos = Position::new(1, 1);
+
+ let p1 = ServicePort::parse("80", pos, false).unwrap();
+ assert_eq!(p1.container_port, 80);
+ assert_eq!(p1.host_port, None);
+
+ let p2 = ServicePort::parse("8080:80", pos, true).unwrap();
+ assert_eq!(p2.container_port, 80);
+ assert_eq!(p2.host_port, Some(8080));
+ assert!(p2.is_quoted);
+
+ let p3 = ServicePort::parse("127.0.0.1:8080:80", pos, false).unwrap();
+ assert_eq!(p3.container_port, 80);
+ assert_eq!(p3.host_port, Some(8080));
+ assert_eq!(p3.host_ip, Some("127.0.0.1".to_string()));
+
+ let p4 = ServicePort::parse("80/udp", pos, false).unwrap();
+ assert_eq!(p4.container_port, 80);
+ assert_eq!(p4.protocol, Some("udp".to_string()));
+ }
+
+ #[test]
+ fn test_volume_parsing() {
+ let pos = Position::new(1, 1);
+
+ let v1 = ServiceVolume::parse("/data", pos, false).unwrap();
+ assert_eq!(v1.target, "/data");
+ assert_eq!(v1.source, None);
+
+ let v2 = ServiceVolume::parse("./host:/container", pos, false).unwrap();
+ assert_eq!(v2.source, Some("./host".to_string()));
+ assert_eq!(v2.target, "/container");
+
+ let v3 = ServiceVolume::parse("./host:/container:ro", pos, false).unwrap();
+ assert_eq!(v3.source, Some("./host".to_string()));
+ assert_eq!(v3.target, "/container");
+ assert_eq!(v3.options, Some("ro".to_string()));
+ }
+}
diff --git a/src/analyzer/dclint/parser/mod.rs b/src/analyzer/dclint/parser/mod.rs
new file mode 100644
index 00000000..6bdd09eb
--- /dev/null
+++ b/src/analyzer/dclint/parser/mod.rs
@@ -0,0 +1,127 @@
+//! YAML parser for Docker Compose files.
+//!
+//! Provides parsing of docker-compose.yaml files with position tracking
+//! for accurate error reporting.
+
+pub mod compose;
+
+pub use compose::{
+ ComposeFile, ParseError, Position, Service, ServiceBuild, ServicePort, ServiceVolume,
+ parse_compose, parse_compose_with_positions,
+};
+
+use yaml_rust2::{Yaml, YamlLoader};
+
+/// Parse a YAML string and return the document.
+pub fn parse_yaml(content: &str) -> Result {
+ let docs =
+ YamlLoader::load_from_str(content).map_err(|e| ParseError::YamlError(e.to_string()))?;
+
+ docs.into_iter().next().ok_or(ParseError::EmptyDocument)
+}
+
+/// Find the line number for a given path in the source YAML.
+///
+/// This function searches the raw source for the key to determine its position.
+pub fn find_line_for_key(source: &str, path: &[&str]) -> Option {
+ if path.is_empty() {
+ return Some(1);
+ }
+
+ let lines: Vec<&str> = source.lines().collect();
+ let mut current_indent = 0;
+ let mut path_idx = 0;
+
+ for (line_num, line) in lines.iter().enumerate() {
+ if line.trim().is_empty() || line.trim().starts_with('#') {
+ continue;
+ }
+
+ let indent = line.len() - line.trim_start().len();
+ let trimmed = line.trim();
+
+ // Check if this line starts with the current path element as a key
+ let target_key = path[path_idx];
+ let key_pattern = format!("{}:", target_key);
+
+ if trimmed.starts_with(&key_pattern) || trimmed == target_key {
+ if path_idx == 0 || indent > current_indent {
+ path_idx += 1;
+ current_indent = indent;
+
+ if path_idx == path.len() {
+ return Some((line_num + 1) as u32); // 1-indexed
+ }
+ }
+ }
+ }
+
+ None
+}
+
+/// Find the line number for a service key.
+pub fn find_line_for_service(source: &str, service_name: &str) -> Option {
+ find_line_for_key(source, &["services", service_name])
+}
+
+/// Find the line number for a key within a service.
+pub fn find_line_for_service_key(source: &str, service_name: &str, key: &str) -> Option {
+ find_line_for_key(source, &["services", service_name, key])
+}
+
+/// Find the column for a value on a given line.
+pub fn find_column_for_value(source: &str, line: u32, key: &str) -> u32 {
+ let lines: Vec<&str> = source.lines().collect();
+ if let Some(line_content) = lines.get((line - 1) as usize) {
+ if let Some(pos) = line_content.find(':') {
+ // Column after the colon and any whitespace
+ let after_colon = &line_content[pos + 1..];
+ let leading_ws = after_colon.len() - after_colon.trim_start().len();
+ return (pos + 2 + leading_ws) as u32;
+ }
+ // If no colon, look for the key position
+ if let Some(pos) = line_content.find(key) {
+ return (pos + 1) as u32;
+ }
+ }
+ 1
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_find_line_for_key() {
+ let yaml = r#"
+services:
+ web:
+ image: nginx
+ ports:
+ - "80:80"
+ db:
+ image: postgres
+"#;
+ assert_eq!(find_line_for_key(yaml, &["services"]), Some(2));
+ assert_eq!(find_line_for_key(yaml, &["services", "web"]), Some(3));
+ assert_eq!(
+ find_line_for_key(yaml, &["services", "web", "image"]),
+ Some(4)
+ );
+ assert_eq!(find_line_for_key(yaml, &["services", "db"]), Some(7));
+ }
+
+ #[test]
+ fn test_find_line_for_service() {
+ let yaml = r#"
+services:
+ web:
+ image: nginx
+ db:
+ image: postgres
+"#;
+ assert_eq!(find_line_for_service(yaml, "web"), Some(3));
+ assert_eq!(find_line_for_service(yaml, "db"), Some(5));
+ assert_eq!(find_line_for_service(yaml, "nonexistent"), None);
+ }
+}
diff --git a/src/analyzer/dclint/pragma.rs b/src/analyzer/dclint/pragma.rs
new file mode 100644
index 00000000..92d09e69
--- /dev/null
+++ b/src/analyzer/dclint/pragma.rs
@@ -0,0 +1,288 @@
+//! Pragma handling for inline rule disabling.
+//!
+//! Supports comment-based rule disabling similar to ESLint:
+//! - `# dclint-disable` - Disable all rules for the rest of the file
+//! - `# dclint-disable rule-name` - Disable specific rule(s) globally
+//! - `# dclint-disable-next-line` - Disable all rules for the next line
+//! - `# dclint-disable-next-line rule-name` - Disable specific rule(s) for next line
+//! - `# dclint-disable-file` - Disable all rules for the entire file
+
+use std::collections::{HashMap, HashSet};
+
+use crate::analyzer::dclint::types::RuleCode;
+
+/// Tracks which rules are disabled at which lines.
+#[derive(Debug, Clone, Default)]
+pub struct PragmaState {
+ /// Rules disabled for the entire file (global).
+ pub global_disabled: HashSet,
+ /// Whether all rules are disabled globally.
+ pub all_disabled: bool,
+ /// Rules disabled for specific lines.
+ pub line_disabled: HashMap>,
+ /// Lines where all rules are disabled.
+ pub all_disabled_lines: HashSet,
+}
+
+impl PragmaState {
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ /// Check if a rule is ignored at a specific line.
+ pub fn is_ignored(&self, code: &RuleCode, line: u32) -> bool {
+ // Check global disables
+ if self.all_disabled {
+ return true;
+ }
+ if self.global_disabled.contains(code.as_str()) || self.global_disabled.contains("*") {
+ return true;
+ }
+
+ // Check line-specific disables
+ if self.all_disabled_lines.contains(&line) {
+ return true;
+ }
+ if let Some(rules) = self.line_disabled.get(&line) {
+ if rules.contains("*") || rules.contains(code.as_str()) {
+ return true;
+ }
+ }
+
+ false
+ }
+
+ /// Add a globally disabled rule.
+ pub fn disable_global(&mut self, rule: impl Into) {
+ let rule = rule.into();
+ if rule == "*" {
+ self.all_disabled = true;
+ } else {
+ self.global_disabled.insert(rule);
+ }
+ }
+
+ /// Disable rules for a specific line.
+ pub fn disable_line(&mut self, line: u32, rules: Vec) {
+ if rules.is_empty() || rules.iter().any(|r| r == "*") {
+ self.all_disabled_lines.insert(line);
+ } else {
+ self.line_disabled.entry(line).or_default().extend(rules);
+ }
+ }
+}
+
+/// Extract pragmas from source content.
+pub fn extract_pragmas(source: &str) -> PragmaState {
+ let mut state = PragmaState::new();
+ let lines: Vec<&str> = source.lines().collect();
+
+ for (idx, line) in lines.iter().enumerate() {
+ let line_num = (idx + 1) as u32;
+ let trimmed = line.trim();
+
+ // Skip non-comment lines
+ if !trimmed.starts_with('#') {
+ continue;
+ }
+
+ let comment = trimmed.trim_start_matches('#').trim();
+
+ // Check for disable-file (applies to entire file)
+ if comment.starts_with("dclint-disable-file") {
+ let rules = parse_rule_list(&comment["dclint-disable-file".len()..]);
+ if rules.is_empty() {
+ state.all_disabled = true;
+ } else {
+ for rule in rules {
+ state.disable_global(rule);
+ }
+ }
+ continue;
+ }
+
+ // Check for disable-next-line
+ if comment.starts_with("dclint-disable-next-line") {
+ let rules = parse_rule_list(&comment["dclint-disable-next-line".len()..]);
+ let next_line = line_num + 1;
+
+ if rules.is_empty() {
+ state.all_disabled_lines.insert(next_line);
+ } else {
+ state.disable_line(next_line, rules);
+ }
+ continue;
+ }
+
+ // Check for global disable (at first content line, affects rest of file)
+ if comment.starts_with("dclint-disable") && !comment.starts_with("dclint-disable-") {
+ let rules = parse_rule_list(&comment["dclint-disable".len()..]);
+ if rules.is_empty() {
+ state.all_disabled = true;
+ } else {
+ for rule in rules {
+ state.disable_global(rule);
+ }
+ }
+ continue;
+ }
+ }
+
+ state
+}
+
+/// Parse a comma-separated list of rule names.
+fn parse_rule_list(s: &str) -> Vec {
+ let trimmed = s.trim();
+ if trimmed.is_empty() {
+ return vec![];
+ }
+
+ trimmed
+ .split(',')
+ .map(|r| r.trim().to_string())
+ .filter(|r| !r.is_empty())
+ .collect()
+}
+
+/// Extract global disable rules from the first comment line.
+/// Returns set of disabled rule names (empty string means all disabled).
+pub fn extract_global_disable_rules(source: &str) -> HashSet