From deb923ae8cc0bdcd8b37e333ed1750e99ffca0e8 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 16 Mar 2026 17:36:34 +0000 Subject: [PATCH 001/133] Replace instant with web-time - swap the cross-platform timer dependency to web-time - remove instant-specific wasm feature wiring - update optimizer timing call sites to use web_time::Instant - keep existing native and wasm timing behavior without stdweb risk - completely remove wasm --- Cargo.toml | 9 ++------- src/alm/alm_optimizer.rs | 2 +- src/core/fbs/fbs_optimizer.rs | 2 +- src/core/panoc/panoc_optimizer.rs | 4 ++-- 4 files changed, 6 insertions(+), 11 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 450ec90f..e42617e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,11 +82,9 @@ num = "0.4" # Our own stuff - L-BFGS: limited-memory BFGS directions lbfgs = "0.3" -# Instant is a generic timer that works on Wasm (with wasm-bindgen) -instant = { version = "0.1" } +# Cross-platform time primitives with WebAssembly support +web-time = "1" -# Wasm-bindgen is only activated if OpEn is compiled with `--features wasm` -wasm-bindgen = { version = "0.2", optional = true } # sc-allocator provides an implementation of a bump allocator rpmalloc = { version = "0.2", features = [ @@ -116,9 +114,6 @@ jem = ["jemallocator"] # RPMalloc rp = ["rpmalloc"] -# WebAssembly -wasm = ["wasm-bindgen", "instant/wasm-bindgen", "instant/inaccurate"] - # -------------------------------------------------------------------------- # T.E.S.T. D.E.P.E.N.D.E.N.C.I.E.S # -------------------------------------------------------------------------- diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index c7d9ec08..a872103a 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -914,7 +914,7 @@ where pub fn solve(&mut self, u: &mut [f64]) -> Result { let mut num_outer_iterations = 0; // let tic = std::time::Instant::now(); - let tic = instant::Instant::now(); + let tic = web_time::Instant::now(); let mut exit_status = ExitStatus::Converged; self.alm_cache.reset(); // first, reset the cache self.alm_cache.available_time = self.max_duration; diff --git a/src/core/fbs/fbs_optimizer.rs b/src/core/fbs/fbs_optimizer.rs index d714ab67..32cd3886 100644 --- a/src/core/fbs/fbs_optimizer.rs +++ b/src/core/fbs/fbs_optimizer.rs @@ -99,7 +99,7 @@ where ConstraintType: constraints::Constraint + 'life, { fn solve(&mut self, u: &mut [f64]) -> Result { - let now = instant::Instant::now(); + let now = web_time::Instant::now(); // Initialize - propagate error upstream, if any self.fbs_engine.init(u)?; diff --git a/src/core/panoc/panoc_optimizer.rs b/src/core/panoc/panoc_optimizer.rs index 602abd45..efe06b90 100644 --- a/src/core/panoc/panoc_optimizer.rs +++ b/src/core/panoc/panoc_optimizer.rs @@ -123,7 +123,7 @@ where ConstraintType: constraints::Constraint + 'life, { fn solve(&mut self, u: &mut [f64]) -> Result { - let now = instant::Instant::now(); + let now = web_time::Instant::now(); /* * Initialise [call panoc_engine.init()] @@ -218,7 +218,7 @@ mod tests { let mut panoc_cache = PANOCCache::new(n_dimension, tolerance, lbfgs_memory); let problem = Problem::new(&bounds, cost_gradient, cost_function); let mut panoc = PANOCOptimizer::new(problem, &mut panoc_cache).with_max_iter(max_iters); - let now = instant::Instant::now(); + let now = web_time::Instant::now(); let status = panoc.solve(&mut u_solution).unwrap(); println!("{} iterations", status.iterations()); From f292feb1d6bdcfdd5ffb342e812c0a143c419dd0 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 20 Mar 2026 23:01:04 +0000 Subject: [PATCH 002/133] Rust constraints: additional unit tests --- src/constraints/tests.rs | 186 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 186 insertions(+) diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index 58f88f25..c9ca8caa 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -1247,3 +1247,189 @@ fn t_ballp_at_xc_projection() { "wrong projection on lp-ball centered at xc != 0", ); } + +#[test] +#[should_panic] +fn t_rectangle_no_bounds() { + let _rectangle = Rectangle::new(None, None); +} + +#[test] +#[should_panic] +fn t_rectangle_only_xmin_wrong_dimension() { + let xmin = [1.0, 2.0, 3.0]; + let rectangle = Rectangle::new(Some(&xmin), None); + let mut x = [0.0, 1.0]; + rectangle.project(&mut x); +} + +#[test] +#[should_panic] +fn t_rectangle_only_xmax_wrong_dimension() { + let xmax = [1.0, 2.0, 3.0]; + let rectangle = Rectangle::new(None, Some(&xmax)); + let mut x = [0.0, 1.0]; + rectangle.project(&mut x); +} + +#[test] +#[should_panic] +fn t_halfspace_wrong_dimension() { + let normal_vector = [1.0, 2.0, 3.0]; + let halfspace = Halfspace::new(&normal_vector, 1.0); + let mut x = [1.0, 2.0]; + halfspace.project(&mut x); +} + +#[test] +#[should_panic] +fn t_ball2_wrong_dimensions() { + let center = [1.0, 2.0]; + let ball = Ball2::new(Some(¢er), 1.0); + let mut x = [1.0, 2.0, 3.0]; + ball.project(&mut x); +} + +#[test] +#[should_panic] +fn t_ball2_nonpositive_radius() { + let _ball = Ball2::new(None, 0.0); +} + +#[test] +#[should_panic] +fn t_ball_inf_wrong_dimensions() { + let center = [1.0, 2.0]; + let ball_inf = BallInf::new(Some(¢er), 1.0); + let mut x = [1.0, 2.0, 3.0]; + ball_inf.project(&mut x); +} + +#[test] +#[should_panic] +fn t_ball_inf_nonpositive_radius() { + let _ball_inf = BallInf::new(None, 0.0); +} + +#[test] +#[should_panic] +fn t_epigraph_squared_norm_short_vector() { + let epi = EpigraphSquaredNorm::new(); + let mut x = [1.0]; + epi.project(&mut x); +} + +#[test] +#[should_panic] +fn t_affine_space_empty_b() { + let _affine_set = AffineSpace::new(vec![1.0, 2.0], vec![]); +} + +#[test] +#[should_panic] +fn t_affine_space_project_wrong_dimension() { + let a = vec![1.0, 0.0, 0.0, 1.0]; + let b = vec![0.0, 0.0]; + let affine_set = AffineSpace::new(a, b); + let mut x = [1.0]; + affine_set.project(&mut x); +} + +#[test] +#[should_panic] +fn t_affine_space_rank_deficient_matrix() { + let a = vec![1.0, 2.0, 1.0, 2.0]; + let b = vec![1.0, 1.0]; + let _affine_set = AffineSpace::new(a, b); +} + +#[test] +fn t_is_convex_sphere2() { + let sphere = Sphere2::new(None, 1.0); + assert!(!sphere.is_convex()); +} + +#[test] +fn t_is_convex_no_constraints() { + let whole_space = NoConstraints::new(); + assert!(whole_space.is_convex()); +} + +#[test] +fn t_is_convex_rectangle() { + let xmin = [-1.0, -2.0]; + let xmax = [1.0, 2.0]; + let rectangle = Rectangle::new(Some(&xmin), Some(&xmax)); + assert!(rectangle.is_convex()); +} + +#[test] +fn t_is_convex_simplex() { + let simplex = Simplex::new(1.0); + assert!(simplex.is_convex()); +} + +#[test] +fn t_is_convex_ball1() { + let ball1 = Ball1::new(None, 1.0); + assert!(ball1.is_convex()); +} + +#[test] +fn t_is_convex_ballp() { + let ballp = BallP::new(None, 1.0, 3.0, 1e-12, 100); + assert!(ballp.is_convex()); +} + +#[test] +fn t_is_convex_epigraph_squared_norm() { + let epi = EpigraphSquaredNorm::new(); + assert!(epi.is_convex()); +} + +#[test] +fn t_is_convex_affine_space() { + let a = vec![1.0, 0.0, 0.0, 1.0]; + let b = vec![1.0, -1.0]; + let affine_set = AffineSpace::new(a, b); + assert!(affine_set.is_convex()); +} + +#[test] +#[should_panic] +fn t_ballp_nonpositive_radius() { + let _ballp = BallP::new(None, 0.0, 2.0, 1e-12, 100); +} + +#[test] +#[should_panic] +fn t_ballp_exponent_too_small() { + let _ballp = BallP::new(None, 1.0, 1.0, 1e-12, 100); +} + +#[test] +#[should_panic] +fn t_ballp_nonfinite_exponent() { + let _ballp = BallP::new(None, 1.0, f64::INFINITY, 1e-12, 100); +} + +#[test] +#[should_panic] +fn t_ballp_nonpositive_tolerance() { + let _ballp = BallP::new(None, 1.0, 2.0, 0.0, 100); +} + +#[test] +#[should_panic] +fn t_ballp_zero_max_iters() { + let _ballp = BallP::new(None, 1.0, 2.0, 1e-12, 0); +} + +#[test] +#[should_panic] +fn t_ballp_wrong_dimensions() { + let center = [1.0, 2.0]; + let ballp = BallP::new(Some(¢er), 1.0, 3.0, 1e-12, 100); + let mut x = [1.0, 2.0, 3.0]; + ballp.project(&mut x); +} From 156b90c7b929f572dd29eeb4740c5d83dee3a1df Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 20 Mar 2026 23:17:56 +0000 Subject: [PATCH 003/133] more tests + update changelog --- CHANGELOG.md | 9 ++ src/constraints/tests.rs | 234 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 243 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d992bded..3e521a78 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,15 @@ and this project adheres to [Semantic Versioning](http://semver.org/). Note: This is the main Changelog file for the Rust solver. The Changelog file for the Python interface (`opengen`) can be found in [/open-codegen/CHANGELOG.md](open-codegen/CHANGELOG.md) + +## [v0.11.1] - Unreleased + +### Changed + +- Expanded Rust constraint test coverage with constructor validation, translated `BallP` cases across multiple `p` values, and idempotence checks for projection operators; all tests pass + diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index c9ca8caa..49a64350 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -341,6 +341,16 @@ fn t_ball2_elsewhere() { ); } +#[test] +fn t_ball2_boundary_no_change() { + let radius = 2.0; + let mut x = [0.0, 2.0]; + let x_expected = x; + let ball = Ball2::new(None, radius); + ball.project(&mut x); + unit_test_utils::assert_nearly_equal_array(&x_expected, &x, 1e-12, 1e-12, "wrong result"); +} + #[test] fn t_no_constraints() { let mut x = [1.0, 2.0, 3.0]; @@ -620,6 +630,15 @@ fn t_ball_inf_center() { unit_test_utils::assert_nearly_equal_array(&[5.0, -6.0], &x, 1e-10, 1e-12, "centre"); } +#[test] +fn t_ball_inf_boundary_no_change() { + let ball_inf = BallInf::new(None, 1.0); + let mut x = [-1.0, 0.2, 1.0]; + let x_expected = x; + ball_inf.project(&mut x); + unit_test_utils::assert_nearly_equal_array(&x_expected, &x, 1e-12, 1e-12, "wrong result"); +} + #[test] fn t_is_convex_ball_inf() { let ball_inf = BallInf::new(None, 1.5); @@ -700,6 +719,17 @@ fn t_simplex_projection() { ); } +#[test] +fn t_halfspace_boundary_no_change() { + let normal_vector = [1.0, 2.0]; + let offset = 5.0; + let halfspace = Halfspace::new(&normal_vector, offset); + let mut x = [1.0, 2.0]; + let x_expected = x; + halfspace.project(&mut x); + unit_test_utils::assert_nearly_equal_array(&x_expected, &x, 1e-12, 1e-12, "wrong result"); +} + #[test] fn t_simplex_projection_random_spam() { let n = 10; @@ -962,6 +992,15 @@ fn t_epigraph_squared_norm_inside() { ); } +#[test] +fn t_epigraph_squared_norm_boundary_no_change() { + let epi = EpigraphSquaredNorm::new(); + let mut x = [1.0, 2.0, 5.0]; + let x_expected = x; + epi.project(&mut x); + unit_test_utils::assert_nearly_equal_array(&x_expected, &x, 1e-12, 1e-12, "wrong result"); +} + #[test] fn t_epigraph_squared_norm() { let epi = EpigraphSquaredNorm::new(); @@ -1018,6 +1057,26 @@ fn t_affine_space() { ); } +#[test] +fn t_affine_space_projection_feasibility() { + let a = vec![ + 0.5, 0.1, 0.2, -0.3, -0.6, 0.3, 0., 0.5, 1.0, 0.1, -1.0, -0.4, + ]; + let b = vec![1., 2., -0.5]; + let affine_set = AffineSpace::new(a.clone(), b.clone()); + let mut x = [1., -2., -0.3, 0.5]; + affine_set.project(&mut x); + let residual = [ + a[0] * x[0] + a[1] * x[1] + a[2] * x[2] + a[3] * x[3] - b[0], + a[4] * x[0] + a[5] * x[1] + a[6] * x[2] + a[7] * x[3] - b[1], + a[8] * x[0] + a[9] * x[1] + a[10] * x[2] + a[11] * x[3] - b[2], + ]; + assert!( + crate::matrix_operations::norm_inf(&residual) <= 1e-10, + "projection does not satisfy Ax = b" + ); +} + #[test] fn t_affine_space_larger() { let a = vec![ @@ -1188,6 +1247,15 @@ fn is_norm_p_projection( true } +fn assert_projection_idempotent(constraint: &C, x0: &[f64], message: &'static str) { + let mut once = x0.to_vec(); + let mut twice = x0.to_vec(); + constraint.project(&mut once); + constraint.project(&mut twice); + constraint.project(&mut twice); + unit_test_utils::assert_nearly_equal_array(&once, &twice, 1e-10, 1e-12, message); +} + #[test] fn t_ballp_at_origin_projection() { let radius = 0.8; @@ -1201,6 +1269,172 @@ fn t_ballp_at_origin_projection() { assert!(is_norm_p_projection(&x0, &x, p, radius, 10_000)); } +#[test] +fn t_ballp_at_origin_projection_preserves_signs() { + let radius = 0.9; + let mut x = [1.0, -3.0, 2.5, -0.7]; + let x0 = x; + let ball = BallP::new(None, radius, 3.0, 1e-14, 200); + ball.project(&mut x); + for (proj, original) in x.iter().zip(x0.iter()) { + assert!(proj.abs() <= original.abs() + 1e-12); + if *original != 0.0 { + assert_eq!(proj.signum(), original.signum()); + } + } +} + +#[test] +fn t_ballp_zero_coordinates_branch() { + let radius = 0.7; + let p = 3.5; + let mut x = [0.0, -2.0, 0.0, 1.5]; + let x0 = x; + let ball = BallP::new(None, radius, p, 1e-14, 300); + ball.project(&mut x); + assert_eq!(x[0], 0.0); + assert_eq!(x[2], 0.0); + assert!(is_norm_p_projection(&x0, &x, p, radius, 10_000)); +} + +#[test] +fn t_ballp_outside_projection_lands_on_boundary_for_multiple_p() { + let test_cases = [ + (1.1, [2.0, -1.0, 0.5]), + (1.5, [1.0, -2.0, 3.0]), + (2.5, [3.0, -4.0, 1.0]), + (10.0, [1.2, -0.7, 2.1]), + ]; + let radius = 0.8; + + for (p, x_init) in test_cases { + let mut x = x_init; + let ball = BallP::new(None, radius, p, 1e-14, 400); + ball.project(&mut x); + let norm_p = x + .iter() + .map(|xi| xi.abs().powf(p)) + .sum::() + .powf(1.0 / p); + unit_test_utils::assert_nearly_equal( + radius, + norm_p, + 1e-9, + 1e-11, + "projection should lie on the boundary", + ); + } +} + +#[test] +fn t_ballp_boundary_no_change() { + let radius = 1.0; + let p = 4.0; + let mut x = [1.0, 0.0]; + let x_expected = x; + let ball = BallP::new(None, radius, p, 1e-14, 200); + ball.project(&mut x); + unit_test_utils::assert_nearly_equal_array(&x_expected, &x, 1e-12, 1e-12, "wrong result"); +} + +#[test] +fn t_ballp_translated_projection_multiple_p_values() { + let center = [1.0, -2.0, 0.5]; + let radius = 0.9; + let cases = [ + (1.1, [3.0, -4.0, 2.0]), + (1.5, [2.5, -0.5, 1.8]), + (2.5, [4.0, -3.5, -1.0]), + (10.0, [1.8, 0.5, 3.0]), + ]; + + for (p, x_init) in cases { + let mut x = x_init; + let ball = BallP::new(Some(¢er), radius, p, 1e-14, 400); + ball.project(&mut x); + let norm_p = x + .iter() + .zip(center.iter()) + .map(|(xi, ci)| (xi - ci).abs().powf(p)) + .sum::() + .powf(1.0 / p); + unit_test_utils::assert_nearly_equal( + radius, + norm_p, + 1e-9, + 1e-11, + "translated lp projection should lie on the boundary", + ); + } +} + +#[test] +fn t_halfspace_projection_is_idempotent() { + let normal_vector = [1.0, 2.0]; + let halfspace = Halfspace::new(&normal_vector, 1.0); + assert_projection_idempotent( + &halfspace, + &[-1.0, 3.0], + "halfspace projection not idempotent", + ); +} + +#[test] +fn t_rectangle_projection_is_idempotent() { + let xmin = [-1.0, 0.0, -2.0]; + let xmax = [1.0, 2.0, 0.5]; + let rectangle = Rectangle::new(Some(&xmin), Some(&xmax)); + assert_projection_idempotent( + &rectangle, + &[-10.0, 1.5, 3.0], + "rectangle projection not idempotent", + ); +} + +#[test] +fn t_ball2_projection_is_idempotent() { + let center = [0.5, -1.0]; + let ball = Ball2::new(Some(¢er), 0.8); + assert_projection_idempotent(&ball, &[3.0, 2.0], "ball2 projection not idempotent"); +} + +#[test] +fn t_ball_inf_projection_is_idempotent() { + let center = [2.0, -3.0]; + let ball_inf = BallInf::new(Some(¢er), 1.2); + assert_projection_idempotent(&ball_inf, &[10.0, 1.0], "ballinf projection not idempotent"); +} + +#[test] +fn t_affine_space_projection_is_idempotent() { + let a = vec![1.0, 1.0, 0.0, 1.0, -1.0, 2.0]; + let b = vec![1.0, 0.5]; + let affine_set = AffineSpace::new(a, b); + assert_projection_idempotent( + &affine_set, + &[3.0, -2.0, 4.0], + "affine-space projection not idempotent", + ); +} + +#[test] +fn t_sphere2_projection_is_idempotent() { + let center = [1.0, 1.0, -1.0]; + let sphere = Sphere2::new(Some(¢er), 2.0); + assert_projection_idempotent( + &sphere, + &[4.0, -2.0, 3.0], + "sphere projection not idempotent", + ); +} + +#[test] +fn t_ballp_projection_is_idempotent() { + let center = [0.0, 1.0, -1.0]; + let ball = BallP::new(Some(¢er), 0.75, 3.0, 1e-14, 300); + assert_projection_idempotent(&ball, &[2.0, -3.0, 1.5], "ballp projection not idempotent"); +} + #[test] fn t_ballp_at_origin_x_already_inside() { let radius = 1.5; From 23b61fef20f02e0e158439ed9ffed17dfe1866c6 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 17:26:44 +0000 Subject: [PATCH 004/133] Lipschitz estimator in Rust - Support for generic float types - update unit tests --- src/lipschitz_estimator.rs | 89 ++++++++++++++++++++++++++------------ 1 file changed, 61 insertions(+), 28 deletions(-) diff --git a/src/lipschitz_estimator.rs b/src/lipschitz_estimator.rs index d8657062..af158bb8 100644 --- a/src/lipschitz_estimator.rs +++ b/src/lipschitz_estimator.rs @@ -37,37 +37,49 @@ //! ``` //! -use crate::{matrix_operations, SolverError}; +use crate::SolverError; +use num::Float; -const DEFAULT_DELTA: f64 = 1e-6; -const DEFAULT_EPSILON: f64 = 1e-6; +fn default_delta() -> T { + T::from(1e-6).expect("1e-6 must be representable") +} + +fn default_epsilon() -> T { + T::from(1e-6).expect("1e-6 must be representable") +} + +fn norm2(a: &[T]) -> T { + a.iter().fold(T::zero(), |sum, &x| sum + x * x).sqrt() +} /// Structure for the computation of estimates of the Lipschitz constant of mappings -pub struct LipschitzEstimator<'a, F> +pub struct LipschitzEstimator<'a, T, F> where - F: Fn(&[f64], &mut [f64]) -> Result<(), SolverError>, + T: Float, + F: Fn(&[T], &mut [T]) -> Result<(), SolverError>, { /// `u_decision_var` is the point where the Lipschitz constant is estimated - u_decision_var: &'a mut [f64], + u_decision_var: &'a mut [T], /// internally allocated workspace memory - workspace: Vec, + workspace: Vec, /// `function_value_at_u` a vector which is updated with the /// value of the given function, `F`, at `u`; the provided value /// of `function_value_at_u_p` is not used - function_value_at_u: &'a mut [f64], + function_value_at_u: &'a mut [T], /// /// Function whose Lipschitz constant is to be approximated /// /// For example, in optimization, this is the gradient (Jacobian matrix) /// of the cost function (this is a closure) function: &'a F, - epsilon_lip: f64, - delta_lip: f64, + epsilon_lip: T, + delta_lip: T, } -impl<'a, F> LipschitzEstimator<'a, F> +impl<'a, T, F> LipschitzEstimator<'a, T, F> where - F: Fn(&[f64], &mut [f64]) -> Result<(), SolverError>, + T: Float, + F: Fn(&[T], &mut [T]) -> Result<(), SolverError>, { /// Creates a new instance of this structure /// @@ -88,18 +100,18 @@ where /// /// pub fn new( - u_: &'a mut [f64], + u_: &'a mut [T], f_: &'a F, - function_value_: &'a mut [f64], - ) -> LipschitzEstimator<'a, F> { + function_value_: &'a mut [T], + ) -> LipschitzEstimator<'a, T, F> { let n: usize = u_.len(); LipschitzEstimator { u_decision_var: u_, - workspace: vec![0.0_f64; n], + workspace: vec![T::zero(); n], function_value_at_u: function_value_, function: f_, - epsilon_lip: DEFAULT_EPSILON, - delta_lip: DEFAULT_DELTA, + epsilon_lip: default_epsilon(), + delta_lip: default_delta(), } } @@ -113,8 +125,8 @@ where /// # Panics /// The method will panic if `delta` is non positive /// - pub fn with_delta(mut self, delta: f64) -> Self { - assert!(delta > 0.0); + pub fn with_delta(mut self, delta: T) -> Self { + assert!(delta > T::zero()); self.delta_lip = delta; self } @@ -129,8 +141,8 @@ where /// # Panics /// The method will panic if `epsilon` is non positive /// - pub fn with_epsilon(mut self, epsilon: f64) -> Self { - assert!(epsilon > 0.0); + pub fn with_epsilon(mut self, epsilon: T) -> Self { + assert!(epsilon > T::zero()); self.epsilon_lip = epsilon; self } @@ -143,7 +155,7 @@ where /// /// If `estimate_local_lipschitz` has not been computed, the result /// will point to a zero vector. - pub fn get_function_value(&self) -> &[f64] { + pub fn get_function_value(&self) -> &[T] { self.function_value_at_u } @@ -180,7 +192,7 @@ where /// No rust-side panics, unless the C function which is called via this interface /// fails. /// - pub fn estimate_local_lipschitz(&mut self) -> Result { + pub fn estimate_local_lipschitz(&mut self) -> Result { // function_value = gradient(u, p) (self.function)(self.u_decision_var, self.function_value_at_u)?; let epsilon_lip = self.epsilon_lip; @@ -197,14 +209,14 @@ where delta_lip } }); - let norm_h = matrix_operations::norm2(&self.workspace); + let norm_h = norm2(&self.workspace); // u += workspace // u = u + h self.u_decision_var .iter_mut() .zip(self.workspace.iter()) - .for_each(|(out, a)| *out += *a); + .for_each(|(out, a)| *out = *out + *a); // workspace = F(u + h) (self.function)(self.u_decision_var, &mut self.workspace)?; @@ -213,9 +225,9 @@ where self.workspace .iter_mut() .zip(self.function_value_at_u.iter()) - .for_each(|(out, a)| *out -= *a); + .for_each(|(out, a)| *out = *out - *a); - let norm_workspace = matrix_operations::norm2(&self.workspace); + let norm_workspace = norm2(&self.workspace); Ok(norm_workspace / norm_h) } } @@ -333,4 +345,25 @@ mod tests { "computed/actual gradient", ); } + + #[test] + fn t_test_lip_estimator_f32() { + let mut u = [1.0_f32, 2.0, 3.0]; + let mut function_value = [0.0_f32; 3]; + + let f = |u: &[f32], g: &mut [f32]| -> Result<(), SolverError> { + g[0] = 3.0 * u[0]; + g[1] = 2.0 * u[1]; + g[2] = 4.5; + Ok(()) + }; + + let mut lip_estimator = LipschitzEstimator::new(&mut u, &f, &mut function_value) + .with_delta(1e-4_f32) + .with_epsilon(1e-4_f32); + let lip = lip_estimator.estimate_local_lipschitz().unwrap(); + + let expected = 5.0_f32 / 14.0_f32.sqrt(); + assert!((lip - expected).abs() < 1e-4); + } } From 2fa950d62030b281ffcc8c8c159035aa2da16000 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 17:27:23 +0000 Subject: [PATCH 005/133] update changelog --- CHANGELOG.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3933a30..65ef37b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,17 @@ and this project adheres to [Semantic Versioning](http://semver.org/). Note: This is the main Changelog file for the Rust solver. The Changelog file for the Python interface (`opengen`) can be found in [/open-codegen/CHANGELOG.md](open-codegen/CHANGELOG.md) + +## [v0.11.2] - Unreleased + + +### Fixed + +- Rust solver supports generic float types + + From 0e9a9cc731fc0ffb349a96fa0fcc6a7cc5054414 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 17:31:40 +0000 Subject: [PATCH 006/133] make sure Cholesky factorizer works with f32 --- CHANGELOG.md | 2 +- src/cholesky_factorizer.rs | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 65ef37b7..c8506dd5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ Note: This is the main Changelog file for the Rust solver. The Changelog file fo ## [v0.11.2] - Unreleased diff --git a/src/cholesky_factorizer.rs b/src/cholesky_factorizer.rs index 12cad81c..e18233f0 100644 --- a/src/cholesky_factorizer.rs +++ b/src/cholesky_factorizer.rs @@ -260,6 +260,26 @@ mod tests { unit_test_utils::nearly_equal_array(&expected_sol, &x, 1e-10, 1e-12); } + #[test] + fn t_cholesky_f32() { + let a = vec![4.0_f32, 12.0, -16.0, 12.0, 37.0, -43.0, -16.0, -43.0, 98.0]; + let mut factorizer = CholeskyFactorizer::new(3); + factorizer.factorize(&a).unwrap(); + + let expected_l = [2.0_f32, 0.0, 0.0, 6.0, 1.0, 0.0, -8.0, 5.0, 3.0]; + unit_test_utils::nearly_equal_array( + &expected_l, + factorizer.cholesky_factor(), + 1e-5, + 1e-6, + ); + + let rhs = vec![-5.0_f32, 2.0, -3.0]; + let x = factorizer.solve(&rhs).unwrap(); + let expected_sol = [-280.25_f32, 77.0, -12.0]; + unit_test_utils::nearly_equal_array(&expected_sol, &x, 1e-4, 1e-5); + } + #[test] fn t_cholesky_not_square_matrix() { let a = vec![1.0_f64, 2., 7., 5., 9.]; From c76527eb7d427f3ea00f94b6d3c7e6a15225904a Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 17:36:39 +0000 Subject: [PATCH 007/133] Lipschitz estimator API docs --- src/lipschitz_estimator.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/src/lipschitz_estimator.rs b/src/lipschitz_estimator.rs index af158bb8..1906ddc1 100644 --- a/src/lipschitz_estimator.rs +++ b/src/lipschitz_estimator.rs @@ -5,6 +5,10 @@ //! //! Functions are provided as closures. //! +//! The estimator is generic over a scalar type `T` satisfying [`num::Float`]. +//! In practice this means it can be used with floating-point slices such as +//! `&[f64]` or `&[f32]`. The examples below use `f64` for simplicity. +//! //! # Method //! //! This method computes a numerical approximation of the norm of the directional @@ -52,7 +56,10 @@ fn norm2(a: &[T]) -> T { a.iter().fold(T::zero(), |sum, &x| sum + x * x).sqrt() } -/// Structure for the computation of estimates of the Lipschitz constant of mappings +/// Structure for the computation of estimates of the Lipschitz constant of mappings. +/// +/// The scalar type `T` is generic and must implement [`num::Float`]. This allows +/// the estimator to operate on either `f64`, `f32`, or another compatible float type. pub struct LipschitzEstimator<'a, T, F> where T: Float, @@ -83,6 +90,11 @@ where { /// Creates a new instance of this structure /// + /// The type parameter `T` is inferred from `u_`, `f_`, and `function_value_`. + /// For example, if those use `f64`, then this constructs a + /// `LipschitzEstimator<'_, f64, _>`; if they use `f32`, it constructs a + /// `LipschitzEstimator<'_, f32, _>`. + /// /// # Arguments /// /// - `u_` On entry: point where the Lipschitz constant is estimated, @@ -120,7 +132,8 @@ where /// /// # Arguments /// - /// - `delta`: parameter delta (the default value is `1e-6`) + /// - `delta`: parameter delta of type `T` (the default value is `1e-6` + /// converted to `T`) /// /// # Panics /// The method will panic if `delta` is non positive @@ -136,7 +149,8 @@ where /// /// # Arguments /// - /// - `epsilon`: parameter epsilon (the default value is `1e-6`) + /// - `epsilon`: parameter epsilon of type `T` (the default value is `1e-6` + /// converted to `T`) /// /// # Panics /// The method will panic if `epsilon` is non positive @@ -151,7 +165,7 @@ where /// /// During the computation of the local lipschitz constant at `u`, /// the value of the given function at `u` is computed and stored - /// internally. This method returns a pointer to that vector. + /// internally. This method returns a pointer to that vector as a slice of `T`. /// /// If `estimate_local_lipschitz` has not been computed, the result /// will point to a zero vector. @@ -163,6 +177,7 @@ where /// Evaluates a local Lipschitz constant of a given function /// /// Functions are closures of type `F` as shown here. + /// The returned estimate has the same scalar type `T` as the input data. /// /// # Returns /// From a6db7a76aa0ef01e052f342dcfc5f7635b91da6b Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 17:37:01 +0000 Subject: [PATCH 008/133] panoc_cache: generic float types --- src/core/panoc/panoc_cache.rs | 179 ++++++++++++++++++++-------------- 1 file changed, 107 insertions(+), 72 deletions(-) diff --git a/src/core/panoc/panoc_cache.rs b/src/core/panoc/panoc_cache.rs index 7711a349..7aa4b69c 100644 --- a/src/core/panoc/panoc_cache.rs +++ b/src/core/panoc/panoc_cache.rs @@ -1,6 +1,18 @@ -const DEFAULT_SY_EPSILON: f64 = 1e-10; -const DEFAULT_CBFGS_EPSILON: f64 = 1e-8; -const DEFAULT_CBFGS_ALPHA: f64 = 1.0; +use lbfgs::LbfgsPrecision; +use num::Float; +use std::iter::Sum; + +fn default_sy_epsilon() -> T { + T::from(1e-10).expect("1e-10 must be representable") +} + +fn default_cbfgs_epsilon() -> T { + T::from(1e-8).expect("1e-8 must be representable") +} + +fn default_cbfgs_alpha() -> T { + T::one() +} /// Cache for PANOC /// @@ -12,38 +24,44 @@ const DEFAULT_CBFGS_ALPHA: f64 = 1.0; /// Subsequently, a `PANOCEngine` is used to construct an instance of `PANOCAlgorithm` /// #[derive(Debug)] -pub struct PANOCCache { - pub(crate) lbfgs: lbfgs::Lbfgs, - pub(crate) gradient_u: Vec, +pub struct PANOCCache +where + T: Float + LbfgsPrecision + Sum, +{ + pub(crate) lbfgs: lbfgs::Lbfgs, + pub(crate) gradient_u: Vec, /// Stores the gradient of the cost at the previous iteration. This is /// an optional field because it is used (and needs to be allocated) /// only if we need to check the AKKT-specific termination conditions - pub(crate) gradient_u_previous: Option>, - pub(crate) u_half_step: Vec, + pub(crate) gradient_u_previous: Option>, + pub(crate) u_half_step: Vec, /// Keeps track of best point so far - pub(crate) best_u_half_step: Vec, - pub(crate) gradient_step: Vec, - pub(crate) direction_lbfgs: Vec, - pub(crate) u_plus: Vec, - pub(crate) rhs_ls: f64, - pub(crate) lhs_ls: f64, - pub(crate) gamma_fpr: Vec, - pub(crate) gamma: f64, - pub(crate) tolerance: f64, - pub(crate) norm_gamma_fpr: f64, + pub(crate) best_u_half_step: Vec, + pub(crate) gradient_step: Vec, + pub(crate) direction_lbfgs: Vec, + pub(crate) u_plus: Vec, + pub(crate) rhs_ls: T, + pub(crate) lhs_ls: T, + pub(crate) gamma_fpr: Vec, + pub(crate) gamma: T, + pub(crate) tolerance: T, + pub(crate) norm_gamma_fpr: T, /// Keeps track of best FPR so far - pub(crate) best_norm_gamma_fpr: f64, - pub(crate) gradient_u_norm_sq: f64, - pub(crate) gradient_step_u_half_step_diff_norm_sq: f64, - pub(crate) tau: f64, - pub(crate) lipschitz_constant: f64, - pub(crate) sigma: f64, - pub(crate) cost_value: f64, + pub(crate) best_norm_gamma_fpr: T, + pub(crate) gradient_u_norm_sq: T, + pub(crate) gradient_step_u_half_step_diff_norm_sq: T, + pub(crate) tau: T, + pub(crate) lipschitz_constant: T, + pub(crate) sigma: T, + pub(crate) cost_value: T, pub(crate) iteration: usize, - pub(crate) akkt_tolerance: Option, + pub(crate) akkt_tolerance: Option, } -impl PANOCCache { +impl PANOCCache +where + T: Float + LbfgsPrecision + Sum, +{ /// Construct a new instance of `PANOCCache` /// /// ## Arguments @@ -63,36 +81,36 @@ impl PANOCCache { /// /// This constructor allocated memory using `vec!`. /// - /// It allocates a total of `8*problem_size + 2*lbfgs_memory_size*problem_size + 2*lbfgs_memory_size + 11` floats (`f64`) + /// It allocates a total of `8*problem_size + 2*lbfgs_memory_size*problem_size + 2*lbfgs_memory_size + 11` floats of type `T` /// - pub fn new(problem_size: usize, tolerance: f64, lbfgs_memory_size: usize) -> PANOCCache { - assert!(tolerance > 0., "tolerance must be positive"); + pub fn new(problem_size: usize, tolerance: T, lbfgs_memory_size: usize) -> PANOCCache { + assert!(tolerance > T::zero(), "tolerance must be positive"); PANOCCache { - gradient_u: vec![0.0; problem_size], + gradient_u: vec![T::zero(); problem_size], gradient_u_previous: None, - u_half_step: vec![0.0; problem_size], - best_u_half_step: vec![0.0; problem_size], - gamma_fpr: vec![0.0; problem_size], - direction_lbfgs: vec![0.0; problem_size], - gradient_step: vec![0.0; problem_size], - u_plus: vec![0.0; problem_size], - gamma: 0.0, + u_half_step: vec![T::zero(); problem_size], + best_u_half_step: vec![T::zero(); problem_size], + gamma_fpr: vec![T::zero(); problem_size], + direction_lbfgs: vec![T::zero(); problem_size], + gradient_step: vec![T::zero(); problem_size], + u_plus: vec![T::zero(); problem_size], + gamma: T::zero(), tolerance, - norm_gamma_fpr: f64::INFINITY, - best_norm_gamma_fpr: f64::INFINITY, - gradient_u_norm_sq: 0.0, - gradient_step_u_half_step_diff_norm_sq: 0.0, + norm_gamma_fpr: T::infinity(), + best_norm_gamma_fpr: T::infinity(), + gradient_u_norm_sq: T::zero(), + gradient_step_u_half_step_diff_norm_sq: T::zero(), lbfgs: lbfgs::Lbfgs::new(problem_size, lbfgs_memory_size) - .with_cbfgs_alpha(DEFAULT_CBFGS_ALPHA) - .with_cbfgs_epsilon(DEFAULT_CBFGS_EPSILON) - .with_sy_epsilon(DEFAULT_SY_EPSILON), - lhs_ls: 0.0, - rhs_ls: 0.0, - tau: 1.0, - lipschitz_constant: 0.0, - sigma: 0.0, - cost_value: 0.0, + .with_cbfgs_alpha(default_cbfgs_alpha()) + .with_cbfgs_epsilon(default_cbfgs_epsilon()) + .with_sy_epsilon(default_sy_epsilon()), + lhs_ls: T::zero(), + rhs_ls: T::zero(), + tau: T::one(), + lipschitz_constant: T::zero(), + sigma: T::zero(), + cost_value: T::zero(), iteration: 0, akkt_tolerance: None, } @@ -109,10 +127,10 @@ impl PANOCCache { /// /// The method panics if `akkt_tolerance` is nonpositive /// - pub fn set_akkt_tolerance(&mut self, akkt_tolerance: f64) { - assert!(akkt_tolerance > 0.0, "akkt_tolerance must be positive"); + pub fn set_akkt_tolerance(&mut self, akkt_tolerance: T) { + assert!(akkt_tolerance > T::zero(), "akkt_tolerance must be positive"); self.akkt_tolerance = Some(akkt_tolerance); - self.gradient_u_previous = Some(vec![0.0; self.gradient_step.len()]); + self.gradient_u_previous = Some(vec![T::zero(); self.gradient_step.len()]); } /// Copies the value of the current cost gradient to `gradient_u_previous`, @@ -127,8 +145,8 @@ impl PANOCCache { } /// Computes the AKKT residual which is defined as `||gamma*(fpr + df - df_previous)||` - fn akkt_residual(&self) -> f64 { - let mut r = 0.0; + fn akkt_residual(&self) -> T { + let mut r = T::zero(); if let Some(df_previous) = &self.gradient_u_previous { // Notation: gamma_fpr_i is the i-th element of gamma_fpr = gamma * fpr, // df_i is the i-th element of the gradient of the cost function at the @@ -139,9 +157,8 @@ impl PANOCCache { .iter() .zip(self.gradient_u.iter()) .zip(df_previous.iter()) - .fold(0.0, |mut sum, ((&gamma_fpr_i, &df_i), &dfp_i)| { - sum += (gamma_fpr_i + self.gamma * (df_i - dfp_i)).powi(2); - sum + .fold(T::zero(), |sum, ((&gamma_fpr_i, &df_i), &dfp_i)| { + sum + (gamma_fpr_i + self.gamma * (df_i - dfp_i)).powi(2) }) .sqrt(); } @@ -185,19 +202,19 @@ impl PANOCCache { /// and `gamma` to 0.0 pub fn reset(&mut self) { self.lbfgs.reset(); - self.best_u_half_step.fill(0.0); - self.best_norm_gamma_fpr = f64::INFINITY; - self.norm_gamma_fpr = f64::INFINITY; - self.gradient_u_norm_sq = 0.0; - self.gradient_step_u_half_step_diff_norm_sq = 0.0; - self.lhs_ls = 0.0; - self.rhs_ls = 0.0; - self.tau = 1.0; - self.lipschitz_constant = 0.0; - self.sigma = 0.0; - self.cost_value = 0.0; + self.best_u_half_step.fill(T::zero()); + self.best_norm_gamma_fpr = T::infinity(); + self.norm_gamma_fpr = T::infinity(); + self.gradient_u_norm_sq = T::zero(); + self.gradient_step_u_half_step_diff_norm_sq = T::zero(); + self.lhs_ls = T::zero(); + self.rhs_ls = T::zero(); + self.tau = T::one(); + self.lipschitz_constant = T::zero(); + self.sigma = T::zero(); + self.cost_value = T::zero(); self.iteration = 0; - self.gamma = 0.0; + self.gamma = T::zero(); } /// Store the current half step if it improves the best fixed-point residual so far. @@ -225,7 +242,7 @@ impl PANOCCache { /// The method panics if alpha or epsilon are nonpositive and if sy_epsilon /// is negative. /// - pub fn with_cbfgs_parameters(mut self, alpha: f64, epsilon: f64, sy_epsilon: f64) -> Self { + pub fn with_cbfgs_parameters(mut self, alpha: T, epsilon: T, sy_epsilon: T) -> Self { self.lbfgs = self .lbfgs .with_cbfgs_alpha(alpha) @@ -264,4 +281,22 @@ mod tests { assert_eq!(2.0, cache.best_norm_gamma_fpr); assert_eq!(&[-1.0, -2.0], &cache.best_u_half_step[..]); } + + #[test] + fn t_cache_best_half_step_f32() { + let mut cache = PANOCCache::::new(2, 1e-6_f32, 3); + + cache.u_half_step.copy_from_slice(&[1.0_f32, 2.0]); + cache.norm_gamma_fpr = 3.0_f32; + cache.cache_best_half_step(); + + assert_eq!(3.0_f32, cache.best_norm_gamma_fpr); + assert_eq!(&[1.0_f32, 2.0], &cache.best_u_half_step[..]); + + cache.reset(); + assert!(cache.best_norm_gamma_fpr.is_infinite()); + assert!(cache.norm_gamma_fpr.is_infinite()); + assert_eq!(0.0_f32, cache.gamma); + assert_eq!(1.0_f32, cache.tau); + } } From 93fa1b65f96f4899742a907d086723621e14426c Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 17:55:44 +0000 Subject: [PATCH 009/133] Rust constraints support generic float types --- src/constraints/affine_space.rs | 29 +++++--- src/constraints/ball1.rs | 38 ++++++---- src/constraints/ball2.rs | 27 ++++--- src/constraints/ballinf.rs | 19 ++--- src/constraints/ballp.rs | 91 +++++++++++++----------- src/constraints/cartesian_product.rs | 12 ++-- src/constraints/epigraph_squared_norm.rs | 46 +++++++----- src/constraints/finite.rs | 31 +++++--- src/constraints/halfspace.rs | 26 ++++--- src/constraints/hyperplane.rs | 28 +++++--- src/constraints/mod.rs | 4 +- src/constraints/no_constraints.rs | 4 +- src/constraints/rectangle.rs | 15 ++-- src/constraints/simplex.rs | 34 ++++----- src/constraints/soc.rs | 26 ++++--- src/constraints/sphere2.rs | 38 ++++++---- src/constraints/tests.rs | 16 +++-- src/constraints/zero.rs | 7 +- 18 files changed, 292 insertions(+), 199 deletions(-) diff --git a/src/constraints/affine_space.rs b/src/constraints/affine_space.rs index 2f25ceba..fe705340 100644 --- a/src/constraints/affine_space.rs +++ b/src/constraints/affine_space.rs @@ -2,21 +2,25 @@ use super::Constraint; use crate::matrix_operations; use crate::CholeskyFactorizer; -use ndarray::{ArrayView1, ArrayView2}; +use ndarray::{ArrayView1, ArrayView2, LinalgScalar}; +use num::Float; #[derive(Clone)] /// An affine space here is defined as the set of solutions of a linear equation, $Ax = b$, /// that is, $E=\\{x\in\mathbb{R}^n: Ax = b\\}$, which is an affine space. It is assumed that /// the matrix $AA^\intercal$ is full-rank. -pub struct AffineSpace { - a_mat: Vec, - b_vec: Vec, - factorizer: CholeskyFactorizer, +pub struct AffineSpace { + a_mat: Vec, + b_vec: Vec, + factorizer: CholeskyFactorizer, n_rows: usize, n_cols: usize, } -impl AffineSpace { +impl AffineSpace +where + T: Float + LinalgScalar + 'static, +{ /// Construct a new affine space given the matrix $A\in\mathbb{R}^{m\times n}$ and /// the vector $b\in\mathbb{R}^m$ /// @@ -28,7 +32,7 @@ impl AffineSpace { /// ## Returns /// New Affine Space structure /// - pub fn new(a: Vec, b: Vec) -> Self { + pub fn new(a: Vec, b: Vec) -> Self { let n_rows = b.len(); let n_elements_a = a.len(); assert!(n_rows > 0, "b must not be empty"); @@ -50,7 +54,10 @@ impl AffineSpace { } } -impl Constraint for AffineSpace { +impl Constraint for AffineSpace +where + T: Float + LinalgScalar + 'static, +{ /// Projection onto the set $E = \\{x: Ax = b\\}$, which is computed by /// $$P_E(x) = x - A^\intercal z(x),$$ /// where $z$ is the solution of the linear system @@ -82,7 +89,7 @@ impl Constraint for AffineSpace { /// ``` /// /// The result is stored in `x` and it can be verified that $Ax = b$. - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) { let n = self.n_cols; assert!(x.len() == n, "x has wrong dimension"); @@ -92,7 +99,7 @@ impl Constraint for AffineSpace { let x_view = ArrayView1::from(&x[..]); let b = ArrayView1::from(&self.b_vec[..]); let e = a.dot(&x_view) - b; - let e_slice: &[f64] = e.as_slice().unwrap(); + let e_slice: &[T] = e.as_slice().unwrap(); // Step 2: Solve AA' z = e and compute z let z = self.factorizer.solve(e_slice).unwrap(); @@ -100,7 +107,7 @@ impl Constraint for AffineSpace { // Step 3: Compute x = x - A'z let at_z = a.t().dot(&ArrayView1::from(&z[..])); for (xi, corr) in x.iter_mut().zip(at_z.iter()) { - *xi -= *corr; + *xi = *xi - *corr; } } diff --git a/src/constraints/ball1.rs b/src/constraints/ball1.rs index eb1ee77f..3f605229 100644 --- a/src/constraints/ball1.rs +++ b/src/constraints/ball1.rs @@ -1,20 +1,22 @@ use super::Constraint; use super::Simplex; +use num::Float; +use std::iter::Sum; #[derive(Copy, Clone)] /// A norm-1 ball, that is, a set given by $B_1^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert_1 \leq r\\}$ /// or a ball-1 centered at a point $x_c$, that is, $B_1^{x_c, r} = \\{x \in \mathbb{R}^n {}:{} \Vert{}x-x_c{}\Vert_1 \leq r\\}$ -pub struct Ball1<'a> { - center: Option<&'a [f64]>, - radius: f64, - simplex: Simplex, +pub struct Ball1<'a, T = f64> { + center: Option<&'a [T]>, + radius: T, + simplex: Simplex, } -impl<'a> Ball1<'a> { +impl<'a, T: Float> Ball1<'a, T> { /// Construct a new ball-1 with given center and radius. /// If no `center` is given, then it is assumed to be in the origin - pub fn new(center: Option<&'a [f64]>, radius: f64) -> Self { - assert!(radius > 0.0); + pub fn new(center: Option<&'a [T]>, radius: T) -> Self { + assert!(radius > T::zero()); let simplex = Simplex::new(radius); Ball1 { center, @@ -23,24 +25,30 @@ impl<'a> Ball1<'a> { } } - fn project_on_ball1_centered_at_origin(&self, x: &mut [f64]) { + fn project_on_ball1_centered_at_origin(&self, x: &mut [T]) + where + T: Sum, + { if crate::matrix_operations::norm1(x) > self.radius { // u = |x| (copied) - let mut u = vec![0.0; x.len()]; + let mut u = vec![T::zero(); x.len()]; u.iter_mut() .zip(x.iter()) - .for_each(|(ui, &xi)| *ui = f64::abs(xi)); + .for_each(|(ui, &xi)| *ui = xi.abs()); // u = P_simplex(u) self.simplex.project(&mut u); x.iter_mut() .zip(u.iter()) - .for_each(|(xi, &ui)| *xi = f64::signum(*xi) * ui); + .for_each(|(xi, &ui)| *xi = xi.signum() * ui); } } } -impl<'a> Constraint for Ball1<'a> { - fn project(&self, x: &mut [f64]) { +impl<'a, T> Constraint for Ball1<'a, T> +where + T: Float + Sum, +{ + fn project(&self, x: &mut [T]) { if let Some(center) = &self.center { assert_eq!( x.len(), @@ -49,11 +57,11 @@ impl<'a> Constraint for Ball1<'a> { ); x.iter_mut() .zip(center.iter()) - .for_each(|(xi, &ci)| *xi -= ci); + .for_each(|(xi, &ci)| *xi = *xi - ci); self.project_on_ball1_centered_at_origin(x); x.iter_mut() .zip(center.iter()) - .for_each(|(xi, &ci)| *xi += ci); + .for_each(|(xi, &ci)| *xi = *xi + ci); } else { self.project_on_ball1_centered_at_origin(x); } diff --git a/src/constraints/ball2.rs b/src/constraints/ball2.rs index c4475cde..c360f681 100644 --- a/src/constraints/ball2.rs +++ b/src/constraints/ball2.rs @@ -1,35 +1,40 @@ use super::Constraint; +use num::Float; +use std::iter::Sum; #[derive(Copy, Clone)] /// A Euclidean ball, that is, a set given by $B_2^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert \leq r\\}$ /// or a Euclidean ball centered at a point $x_c$, that is, $B_2^{x_c, r} = \\{x \in \mathbb{R}^n {}:{} \Vert{}x-x_c{}\Vert \leq r\\}$ -pub struct Ball2<'a> { - center: Option<&'a [f64]>, - radius: f64, +pub struct Ball2<'a, T = f64> { + center: Option<&'a [T]>, + radius: T, } -impl<'a> Ball2<'a> { +impl<'a, T: Float> Ball2<'a, T> { /// Construct a new Euclidean ball with given center and radius /// If no `center` is given, then it is assumed to be in the origin - pub fn new(center: Option<&'a [f64]>, radius: f64) -> Self { - assert!(radius > 0.0); + pub fn new(center: Option<&'a [T]>, radius: T) -> Self { + assert!(radius > T::zero()); Ball2 { center, radius } } } -impl<'a> Constraint for Ball2<'a> { - fn project(&self, x: &mut [f64]) { +impl<'a, T> Constraint for Ball2<'a, T> +where + T: Float + Sum, +{ + fn project(&self, x: &mut [T]) { if let Some(center) = &self.center { assert_eq!( x.len(), center.len(), "x and xc have incompatible dimensions" ); - let mut norm_difference = 0.0; + let mut norm_difference = T::zero(); x.iter().zip(center.iter()).for_each(|(a, b)| { let diff_ = *a - *b; - norm_difference += diff_ * diff_ + norm_difference = norm_difference + diff_ * diff_ }); norm_difference = norm_difference.sqrt(); @@ -43,7 +48,7 @@ impl<'a> Constraint for Ball2<'a> { let norm_x = crate::matrix_operations::norm2(x); if norm_x > self.radius { let norm_over_radius = norm_x / self.radius; - x.iter_mut().for_each(|x_| *x_ /= norm_over_radius); + x.iter_mut().for_each(|x_| *x_ = *x_ / norm_over_radius); } } } diff --git a/src/constraints/ballinf.rs b/src/constraints/ballinf.rs index 8b87c688..4b528764 100644 --- a/src/constraints/ballinf.rs +++ b/src/constraints/ballinf.rs @@ -1,26 +1,27 @@ use super::Constraint; +use num::Float; #[derive(Copy, Clone)] /// An infinity ball defined as $B_\infty^r = \\{x\in\mathbb{R}^n {}:{} \Vert{}x{}\Vert_{\infty} \leq r\\}$, /// where $\Vert{}\cdot{}\Vert_{\infty}$ is the infinity norm. The infinity ball centered at a point /// $x_c$ is defined as $B_\infty^{x_c,r} = \\{x\in\mathbb{R}^n {}:{} \Vert{}x-x_c{}\Vert_{\infty} \leq r\\}$. /// -pub struct BallInf<'a> { - center: Option<&'a [f64]>, - radius: f64, +pub struct BallInf<'a, T = f64> { + center: Option<&'a [T]>, + radius: T, } -impl<'a> BallInf<'a> { +impl<'a, T: Float> BallInf<'a, T> { /// Construct a new infinity-norm ball with given center and radius /// If no `center` is given, then it is assumed to be in the origin /// - pub fn new(center: Option<&'a [f64]>, radius: f64) -> Self { - assert!(radius > 0.0); + pub fn new(center: Option<&'a [T]>, radius: T) -> Self { + assert!(radius > T::zero()); BallInf { center, radius } } } -impl<'a> Constraint for BallInf<'a> { +impl<'a, T: Float> Constraint for BallInf<'a, T> { /// Computes the projection of a given vector `x` on the current infinity ball. /// /// @@ -42,7 +43,7 @@ impl<'a> Constraint for BallInf<'a> { /// /// for all $i=1,\ldots, n$. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) { if let Some(center) = &self.center { assert_eq!( x.len(), @@ -52,7 +53,7 @@ impl<'a> Constraint for BallInf<'a> { x.iter_mut() .zip(center.iter()) .filter(|(&mut xi, &ci)| (xi - ci).abs() > self.radius) - .for_each(|(xi, ci)| *xi = ci + (*xi - ci).signum() * self.radius); + .for_each(|(xi, ci)| *xi = *ci + (*xi - *ci).signum() * self.radius); } else { x.iter_mut() .filter(|xi| xi.abs() > self.radius) diff --git a/src/constraints/ballp.rs b/src/constraints/ballp.rs index 3a0d893b..de964c45 100644 --- a/src/constraints/ballp.rs +++ b/src/constraints/ballp.rs @@ -1,4 +1,5 @@ use super::Constraint; +use num::Float; #[derive(Copy, Clone)] /// An $\\ell_p$ ball, that is, @@ -89,33 +90,33 @@ use super::Constraint; /// in [`Ball2`](crate::constraints::Ball2) is more efficient /// - The quality and speed of the computation depend on the chosen numerical /// tolerance and iteration limit. -pub struct BallP<'a> { +pub struct BallP<'a, T = f64> { /// Optional center of the ball. /// /// If `None`, the ball is centered at the origin. /// If `Some(center)`, the ball is centered at `center`. - center: Option<&'a [f64]>, + center: Option<&'a [T]>, /// Radius of the ball. /// /// Must be strictly positive. - radius: f64, + radius: T, /// Exponent of the norm. /// /// Must satisfy `p > 1.0` and be finite. - p: f64, + p: T, /// Numerical tolerance used by the outer bisection on the Lagrange /// multiplier and by the inner Newton solver. - tolerance: f64, + tolerance: T, /// Maximum number of iterations used by the outer bisection and /// the inner Newton solver. max_iter: usize, } -impl<'a> BallP<'a> { +impl<'a, T: Float> BallP<'a, T> { /// Construct a new l_p ball with given center, radius, and exponent. /// /// - `center`: if `None`, the ball is centered at the origin @@ -124,15 +125,15 @@ impl<'a> BallP<'a> { /// - `tolerance`: tolerance for the numerical solvers /// - `max_iter`: maximum number of iterations for the numerical solvers pub fn new( - center: Option<&'a [f64]>, - radius: f64, - p: f64, - tolerance: f64, + center: Option<&'a [T]>, + radius: T, + p: T, + tolerance: T, max_iter: usize, ) -> Self { - assert!(radius > 0.0); - assert!(p > 1.0 && p.is_finite()); - assert!(tolerance > 0.0); + assert!(radius > T::zero()); + assert!(p > T::one() && p.is_finite()); + assert!(tolerance > T::zero()); assert!(max_iter > 0); BallP { @@ -150,14 +151,14 @@ impl<'a> BallP<'a> { /// The $p$-norm of a vector $x\in \mathbb{R}^n$ is given by /// $$\Vert x \Vert_p = \left(\sum_{i=1}^{n} |x_i|^p\right)^{1/p},$$ /// for $p > 1$. - fn lp_norm(&self, x: &[f64]) -> f64 { + fn lp_norm(&self, x: &[T]) -> T { x.iter() .map(|xi| xi.abs().powf(self.p)) - .sum::() - .powf(1.0 / self.p) + .fold(T::zero(), |sum, xi| sum + xi) + .powf(T::one() / self.p) } - fn project_lp_ball(&self, x: &mut [f64]) { + fn project_lp_ball(&self, x: &mut [T]) { let p = self.p; let r = self.radius; let tol = self.tolerance; @@ -168,32 +169,32 @@ impl<'a> BallP<'a> { return; } - let abs_x: Vec = x.iter().map(|xi| xi.abs()).collect(); + let abs_x: Vec = x.iter().map(|xi| xi.abs()).collect(); let target = r.powf(p); - let radius_error = |lambda: f64| -> f64 { + let radius_error = |lambda: T| -> T { abs_x .iter() .map(|&a| { let u = Self::solve_coordinate_newton(a, lambda, p, tol, max_iter); u.powf(p) }) - .sum::() + .fold(T::zero(), |sum, ui| sum + ui) - target }; - let mut lambda_lo = 0.0_f64; - let mut lambda_hi = 1.0_f64; + let mut lambda_lo = T::zero(); + let mut lambda_hi = T::one(); - while radius_error(lambda_hi) > 0.0 { - lambda_hi *= 2.0; - if lambda_hi > 1e20 { + while radius_error(lambda_hi) > T::zero() { + lambda_hi = lambda_hi * T::from(2.0).expect("2.0 must be representable"); + if lambda_hi > T::from(1e20).expect("1e20 must be representable") { panic!("Failed to bracket the Lagrange multiplier"); } } for _ in 0..max_iter { - let lambda_mid = 0.5 * (lambda_lo + lambda_hi); + let lambda_mid = T::from(0.5).expect("0.5 must be representable") * (lambda_lo + lambda_hi); let err = radius_error(lambda_mid); if err.abs() <= tol { @@ -202,14 +203,14 @@ impl<'a> BallP<'a> { break; } - if err > 0.0 { + if err > T::zero() { lambda_lo = lambda_mid; } else { lambda_hi = lambda_mid; } } - let lambda_star = 0.5 * (lambda_lo + lambda_hi); + let lambda_star = T::from(0.5).expect("0.5 must be representable") * (lambda_lo + lambda_hi); x.iter_mut().zip(abs_x.iter()).for_each(|(xi, &a)| { let u = Self::solve_coordinate_newton(a, lambda_star, p, tol, max_iter); @@ -222,56 +223,60 @@ impl<'a> BallP<'a> { /// /// The solution always belongs to [0, a], so Newton is combined with /// bracketing and a bisection fallback. - fn solve_coordinate_newton(a: f64, lambda: f64, p: f64, tol: f64, max_iter: usize) -> f64 { - if a == 0.0 { - return 0.0; + fn solve_coordinate_newton(a: T, lambda: T, p: T, tol: T, max_iter: usize) -> T { + if a == T::zero() { + return T::zero(); } - if lambda == 0.0 { + if lambda == T::zero() { return a; } - let mut lo = 0.0_f64; + let mut lo = T::zero(); let mut hi = a; // Heuristic initial guess: // exact when p = 2, and usually in the right scale for general p. - let mut u = (a / (1.0 + lambda * p)).clamp(lo, hi); + let mut u = (a / (T::one() + lambda * p)).clamp(lo, hi); for _ in 0..max_iter { - let upm1 = u.powf(p - 1.0); + let upm1 = u.powf(p - T::one()); let f = u + lambda * p * upm1 - a; if f.abs() <= tol { return u; } - if f > 0.0 { + if f > T::zero() { hi = u; } else { lo = u; } - let df = 1.0 + lambda * p * (p - 1.0) * u.powf(p - 2.0); + let df = T::one() + + lambda + * p + * (p - T::one()) + * u.powf(p - T::from(2.0).expect("2.0 must be representable")); let mut candidate = u - f / df; if !candidate.is_finite() || candidate <= lo || candidate >= hi { - candidate = 0.5 * (lo + hi); + candidate = T::from(0.5).expect("0.5 must be representable") * (lo + hi); } - if (candidate - u).abs() <= tol * (1.0 + u.abs()) { + if (candidate - u).abs() <= tol * (T::one() + u.abs()) { return candidate; } u = candidate; } - 0.5 * (lo + hi) + T::from(0.5).expect("0.5 must be representable") * (lo + hi) } } -impl<'a> Constraint for BallP<'a> { - fn project(&self, x: &mut [f64]) { +impl<'a, T: Float> Constraint for BallP<'a, T> { + fn project(&self, x: &mut [T]) { if let Some(center) = &self.center { assert_eq!( x.len(), @@ -279,7 +284,7 @@ impl<'a> Constraint for BallP<'a> { "x and xc have incompatible dimensions" ); - let mut shifted = vec![0.0; x.len()]; + let mut shifted = vec![T::zero(); x.len()]; shifted .iter_mut() .zip(x.iter().zip(center.iter())) diff --git a/src/constraints/cartesian_product.rs b/src/constraints/cartesian_product.rs index edf15b5f..bbca4f33 100644 --- a/src/constraints/cartesian_product.rs +++ b/src/constraints/cartesian_product.rs @@ -22,12 +22,12 @@ use super::Constraint; /// for all $i=0,\ldots, n-1$. /// #[derive(Default)] -pub struct CartesianProduct<'a> { +pub struct CartesianProduct<'a, T = f64> { idx: Vec, - constraints: Vec>, + constraints: Vec + 'a>>, } -impl<'a> CartesianProduct<'a> { +impl<'a, T> CartesianProduct<'a, T> { /// Construct a new Cartesian product of constraints. /// /// # Note @@ -123,7 +123,7 @@ impl<'a> CartesianProduct<'a> { /// ``` /// The method will panic if any of the associated projections panics. /// - pub fn add_constraint(mut self, ni: usize, constraint: impl Constraint + 'a) -> Self { + pub fn add_constraint(mut self, ni: usize, constraint: impl Constraint + 'a) -> Self { assert!( self.dimension() < ni, "provided index is smaller than or equal to previous index, or zero" @@ -134,7 +134,7 @@ impl<'a> CartesianProduct<'a> { } } -impl<'a> Constraint for CartesianProduct<'a> { +impl<'a, T> Constraint for CartesianProduct<'a, T> { /// Project onto the Cartesian product of constraints. /// /// The given vector `x` is updated with the projection on the set @@ -143,7 +143,7 @@ impl<'a> Constraint for CartesianProduct<'a> { /// /// The method will panic if the dimension of `x` is not equal to the /// dimension of the Cartesian product (see `dimension()`) - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) { assert!(x.len() == self.dimension(), "x has wrong size"); let mut j = 0; self.idx diff --git a/src/constraints/epigraph_squared_norm.rs b/src/constraints/epigraph_squared_norm.rs index f91617c8..1ed9874e 100644 --- a/src/constraints/epigraph_squared_norm.rs +++ b/src/constraints/epigraph_squared_norm.rs @@ -1,6 +1,13 @@ use crate::matrix_operations; use super::Constraint; +use num::Float; +use roots::FloatType; +use std::iter::Sum; + +fn cast(value: f64) -> T { + T::from(value).expect("constant must be representable") +} #[derive(Copy, Clone, Default)] /// The epigraph of the squared Euclidean norm, that is, @@ -22,7 +29,10 @@ impl EpigraphSquaredNorm { } } -impl Constraint for EpigraphSquaredNorm { +impl Constraint for EpigraphSquaredNorm +where + T: Float + FloatType + Sum, +{ /// Project on the epigraph of the squared Euclidean norm. /// /// Let the input be represented as $(z,t)$, where `z` is the vector formed @@ -63,7 +73,7 @@ impl Constraint for EpigraphSquaredNorm { /// /// epi.project(&mut x); /// ``` - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) { assert!( x.len() >= 2, "EpigraphSquaredNorm::project requires x.len() >= 2" @@ -81,25 +91,25 @@ impl Constraint for EpigraphSquaredNorm { // Cubic: // 4 r^3 + 4 theta r^2 + theta^2 r - ||z||^2 = 0 - let theta = 1.0 - 2.0 * t; - let a3 = 4.0; - let a2 = 4.0 * theta; + let theta = cast::(1.0) - cast::(2.0) * t; + let a3 = cast::(4.0); + let a2 = cast::(4.0) * theta; let a1 = theta * theta; let a0 = -norm_z_sq; let cubic_poly_roots = roots::find_roots_cubic(a3, a2, a1, a0); - let root_tol = 1e-6; - let mut right_root: Option = None; + let root_tol = cast::(1e-6); + let mut right_root: Option = None; // Pick the first admissible real root for &ri in cubic_poly_roots.as_ref().iter() { - let denom = 1.0 + 2.0 * (ri - t); + let denom = cast::(1.0) + cast::(2.0) * (ri - t); // We need a valid scaling and consistency with ||z_proj||^2 = ri - if denom > 0.0 { + if denom > cast::(0.0) { let candidate_norm_sq = norm_z_sq / (denom * denom); - if (candidate_norm_sq - ri).abs() <= root_tol { + if num::Float::abs(candidate_norm_sq - ri) <= root_tol { right_root = Some(ri); break; } @@ -111,37 +121,37 @@ impl Constraint for EpigraphSquaredNorm { // Newton refinement let newton_max_iters: usize = 5; - let newton_eps = 1e-14; + let newton_eps = cast::(1e-14); for _ in 0..newton_max_iters { let zsol_sq = zsol * zsol; let zsol_cb = zsol_sq * zsol; let p_z = a3 * zsol_cb + a2 * zsol_sq + a1 * zsol + a0; - if p_z.abs() <= newton_eps { + if num::Float::abs(p_z) <= newton_eps { break; } - let dp_z = 3.0 * a3 * zsol_sq + 2.0 * a2 * zsol + a1; + let dp_z = cast::(3.0) * a3 * zsol_sq + cast::(2.0) * a2 * zsol + a1; assert!( - dp_z.abs() > 1e-15, + num::Float::abs(dp_z) > cast::(1e-15), "EpigraphSquaredNorm::project: Newton derivative too small" ); - zsol -= p_z / dp_z; + zsol = zsol - p_z / dp_z; } let right_root = zsol; - let scaling = 1.0 + 2.0 * (right_root - t); + let scaling = cast::(1.0) + cast::(2.0) * (right_root - t); assert!( - scaling.abs() > 1e-15, + num::Float::abs(scaling) > cast::(1e-15), "EpigraphSquaredNorm::project: scaling factor too small" ); // Projection for xi in x.iter_mut().take(nx) { - *xi /= scaling; + *xi = *xi / scaling; } x[nx] = right_root; } diff --git a/src/constraints/finite.rs b/src/constraints/finite.rs index 3f9393cd..2c76ede8 100644 --- a/src/constraints/finite.rs +++ b/src/constraints/finite.rs @@ -1,17 +1,29 @@ use super::Constraint; +use num::Float; +use std::iter::Sum; + +fn norm2_squared_diff(a: &[T], b: &[T]) -> T { + assert_eq!(a.len(), b.len()); + a.iter() + .zip(b.iter()) + .fold(T::zero(), |sum, (&x, &y)| sum + (x - y) * (x - y)) +} /// /// A finite set, $X = \\{x_1, x_2, \ldots, x_n\\}\subseteq\mathbb{R}^n$, given vectors /// $x_i\in\mathbb{R}^n$ /// #[derive(Clone, Copy)] -pub struct FiniteSet<'a> { +pub struct FiniteSet<'a, T = f64> { /// The data is stored in a Vec-of-Vec datatype, that is, a vector /// of vectors - data: &'a [&'a [f64]], + data: &'a [&'a [T]], } -impl<'a> FiniteSet<'a> { +impl<'a, T> FiniteSet<'a, T> +where + T: Float + Sum, +{ /// Construct a finite set, $X = \\{x_1, x_2, \ldots, x_n\\}$, given vectors /// $x_i\in\mathbb{R}^n$ /// @@ -41,7 +53,7 @@ impl<'a> FiniteSet<'a> { /// This method will panic if the given vector of data is empty, /// or if the given vectors have unequal dimensions. /// - pub fn new(data: &'a [&'a [f64]]) -> Self { + pub fn new(data: &'a [&'a [T]]) -> Self { // Do a sanity check... assert!(!data.is_empty(), "empty data not allowed"); let n = data[0].len(); @@ -52,7 +64,10 @@ impl<'a> FiniteSet<'a> { } } -impl<'a> Constraint for FiniteSet<'a> { +impl<'a, T> Constraint for FiniteSet<'a, T> +where + T: Float + Sum, +{ /// /// Projection on the current finite set /// @@ -85,12 +100,12 @@ impl<'a> Constraint for FiniteSet<'a> { /// This method panics if the dimension of `x` is not equal to the /// dimension of the points in the finite set. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) { assert_eq!(x.len(), self.data[0].len(), "x has incompatible dimension"); let mut idx: usize = 0; - let mut best_distance: f64 = num::Float::infinity(); + let mut best_distance = T::infinity(); for (i, v) in self.data.iter().enumerate() { - let dist = crate::matrix_operations::norm2_squared_diff(v, x); + let dist = norm2_squared_diff(v, x); if dist < best_distance { idx = i; best_distance = dist; diff --git a/src/constraints/halfspace.rs b/src/constraints/halfspace.rs index 5442ca54..40ed4104 100644 --- a/src/constraints/halfspace.rs +++ b/src/constraints/halfspace.rs @@ -1,18 +1,23 @@ use super::Constraint; use crate::matrix_operations; +use num::Float; +use std::iter::Sum; #[derive(Clone)] /// A halfspace is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle \leq b\\}$. -pub struct Halfspace<'a> { +pub struct Halfspace<'a, T = f64> { /// normal vector - normal_vector: &'a [f64], + normal_vector: &'a [T], /// offset - offset: f64, + offset: T, /// squared Euclidean norm of the normal vector (computed once upon construction) - normal_vector_squared_norm: f64, + normal_vector_squared_norm: T, } -impl<'a> Halfspace<'a> { +impl<'a, T> Halfspace<'a, T> +where + T: Float + Sum, +{ /// A halfspace is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle \leq b\\}$, /// where $c$ is the normal vector of the halfspace and $b$ is an offset. /// @@ -45,7 +50,7 @@ impl<'a> Halfspace<'a> { /// halfspace.project(&mut x); /// ``` /// - pub fn new(normal_vector: &'a [f64], offset: f64) -> Self { + pub fn new(normal_vector: &'a [T], offset: T) -> Self { let normal_vector_squared_norm = matrix_operations::norm2_squared(normal_vector); Halfspace { normal_vector, @@ -55,7 +60,10 @@ impl<'a> Halfspace<'a> { } } -impl<'a> Constraint for Halfspace<'a> { +impl<'a, T> Constraint for Halfspace<'a, T> +where + T: Float + Sum, +{ /// Projects on halfspace using the following formula: /// /// $$\begin{aligned} @@ -79,13 +87,13 @@ impl<'a> Constraint for Halfspace<'a> { /// This method panics if the length of `x` is not equal to the dimension /// of the halfspace. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) { let inner_product = matrix_operations::inner_product(x, self.normal_vector); if inner_product > self.offset { let factor = (inner_product - self.offset) / self.normal_vector_squared_norm; x.iter_mut() .zip(self.normal_vector.iter()) - .for_each(|(x, normal_vector_i)| *x -= factor * normal_vector_i); + .for_each(|(x, normal_vector_i)| *x = *x - factor * *normal_vector_i); } } diff --git a/src/constraints/hyperplane.rs b/src/constraints/hyperplane.rs index 886fd494..1400fbad 100644 --- a/src/constraints/hyperplane.rs +++ b/src/constraints/hyperplane.rs @@ -1,18 +1,23 @@ use super::Constraint; use crate::matrix_operations; +use num::Float; +use std::iter::Sum; #[derive(Clone)] /// A hyperplane is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle = b\\}$. -pub struct Hyperplane<'a> { +pub struct Hyperplane<'a, T = f64> { /// normal vector - normal_vector: &'a [f64], + normal_vector: &'a [T], /// offset - offset: f64, + offset: T, /// squared Euclidean norm of the normal vector (computed once upon construction) - normal_vector_squared_norm: f64, + normal_vector_squared_norm: T, } -impl<'a> Hyperplane<'a> { +impl<'a, T> Hyperplane<'a, T> +where + T: Float + Sum, +{ /// A hyperplane is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle = b\\}$, /// where $c$ is the normal vector of the hyperplane and $b$ is an offset. /// @@ -44,10 +49,10 @@ impl<'a> Hyperplane<'a> { /// hyperplane.project(&mut x); /// ``` /// - pub fn new(normal_vector: &'a [f64], offset: f64) -> Self { + pub fn new(normal_vector: &'a [T], offset: T) -> Self { let normal_vector_squared_norm = matrix_operations::norm2_squared(normal_vector); assert!( - normal_vector_squared_norm > 0.0, + normal_vector_squared_norm > T::zero(), "normal_vector must have positive norm" ); Hyperplane { @@ -58,7 +63,10 @@ impl<'a> Hyperplane<'a> { } } -impl<'a> Constraint for Hyperplane<'a> { +impl<'a, T> Constraint for Hyperplane<'a, T> +where + T: Float + Sum, +{ /// Projects on the hyperplane using the formula: /// /// $$\begin{aligned} @@ -79,13 +87,13 @@ impl<'a> Constraint for Hyperplane<'a> { /// This method panics if the length of `x` is not equal to the dimension /// of the hyperplane. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) { assert_eq!(x.len(), self.normal_vector.len(), "x has wrong dimension"); let inner_product = matrix_operations::inner_product(x, self.normal_vector); let factor = (inner_product - self.offset) / self.normal_vector_squared_norm; x.iter_mut() .zip(self.normal_vector.iter()) - .for_each(|(x, nrm_vct)| *x -= factor * nrm_vct); + .for_each(|(x, nrm_vct)| *x = *x - factor * *nrm_vct); } /// Hyperplanes are convex sets diff --git a/src/constraints/mod.rs b/src/constraints/mod.rs index d4ce2462..31586257 100644 --- a/src/constraints/mod.rs +++ b/src/constraints/mod.rs @@ -46,7 +46,7 @@ pub use zero::Zero; /// /// This trait defines an abstract function that allows to compute projections /// on sets; this is implemented by a series of structures (see below for details) -pub trait Constraint { +pub trait Constraint { /// Projection onto the set, that is, /// /// $$ @@ -57,7 +57,7 @@ pub trait Constraint { /// /// - `x`: The given vector $x$ is updated with the projection on the set /// - fn project(&self, x: &mut [f64]); + fn project(&self, x: &mut [T]); /// Returns true if and only if the set is convex fn is_convex(&self) -> bool; diff --git a/src/constraints/no_constraints.rs b/src/constraints/no_constraints.rs index 88df8845..53188676 100644 --- a/src/constraints/no_constraints.rs +++ b/src/constraints/no_constraints.rs @@ -12,8 +12,8 @@ impl NoConstraints { } } -impl Constraint for NoConstraints { - fn project(&self, _x: &mut [f64]) {} +impl Constraint for NoConstraints { + fn project(&self, _x: &mut [T]) {} fn is_convex(&self) -> bool { true diff --git a/src/constraints/rectangle.rs b/src/constraints/rectangle.rs index 76b52ceb..e71c9395 100644 --- a/src/constraints/rectangle.rs +++ b/src/constraints/rectangle.rs @@ -1,4 +1,5 @@ use super::Constraint; +use num::Float; #[derive(Clone, Copy)] /// @@ -7,12 +8,12 @@ use super::Constraint; /// A set of the form $\\{x \in \mathbb{R}^n {}:{} x_{\min} {}\leq{} x {}\leq{} x_{\max}\\}$, /// where $\leq$ is meant in the element-wise sense and either of $x_{\min}$ and $x_{\max}$ can /// be equal to infinity. -pub struct Rectangle<'a> { - xmin: Option<&'a [f64]>, - xmax: Option<&'a [f64]>, +pub struct Rectangle<'a, T = f64> { + xmin: Option<&'a [T]>, + xmax: Option<&'a [T]>, } -impl<'a> Rectangle<'a> { +impl<'a, T: Float> Rectangle<'a, T> { /// Construct a new rectangle with given $x_{\min}$ and $x_{\max}$ /// /// # Arguments @@ -34,7 +35,7 @@ impl<'a> Rectangle<'a> { /// - Both `xmin` and `xmax` have been provided, but they have incompatible /// dimensions /// - pub fn new(xmin: Option<&'a [f64]>, xmax: Option<&'a [f64]>) -> Self { + pub fn new(xmin: Option<&'a [T]>, xmax: Option<&'a [T]>) -> Self { assert!(xmin.is_some() || xmax.is_some()); // xmin or xmax must be Some assert!( xmin.is_none() || xmax.is_none() || xmin.unwrap().len() == xmax.unwrap().len(), @@ -53,8 +54,8 @@ impl<'a> Rectangle<'a> { } } -impl<'a> Constraint for Rectangle<'a> { - fn project(&self, x: &mut [f64]) { +impl<'a, T: Float> Constraint for Rectangle<'a, T> { + fn project(&self, x: &mut [T]) { if let Some(xmin) = &self.xmin { assert_eq!( x.len(), diff --git a/src/constraints/simplex.rs b/src/constraints/simplex.rs index 061b390e..2d11e897 100644 --- a/src/constraints/simplex.rs +++ b/src/constraints/simplex.rs @@ -1,50 +1,51 @@ use super::Constraint; +use num::Float; #[derive(Copy, Clone)] /// A simplex with level $\alpha$ is a set of the form /// $\Delta_\alpha^n = \\{x \in \mathbb{R}^n {}:{} x \geq 0, \sum_i x_i = \alpha\\}$, /// where $\alpha$ is a positive constant. -pub struct Simplex { +pub struct Simplex { /// Simplex level - alpha: f64, + alpha: T, } -impl Simplex { +impl Simplex { /// Construct a new simplex with given (positive) $\alpha$. The user does not need /// to specify the dimension of the simplex. - pub fn new(alpha: f64) -> Self { - assert!(alpha > 0.0, "alpha is nonpositive"); + pub fn new(alpha: T) -> Self { + assert!(alpha > T::zero(), "alpha is nonpositive"); Simplex { alpha } } } -impl Constraint for Simplex { +impl Constraint for Simplex { /// Project onto $\Delta_\alpha^n$ using Condat's fast projection algorithm. /// /// See: Laurent Condat. Fast Projection onto the Simplex and the $\ell_1$ Ball. /// Mathematical Programming, Series A, Springer, 2016, 158 (1), pp.575-585. /// ⟨10.1007/s10107-015-0946-6⟩. - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) { assert!(!x.is_empty(), "x must be nonempty"); let a = &self.alpha; // ---- step 1 - let mut v = Vec::::with_capacity(x.len()); // vector containing x[0] + let mut v = Vec::::with_capacity(x.len()); // vector containing x[0] v.push(x[0]); let mut v_size_old: i64 = -1; // 64 bit signed int - let mut v_tilde: Vec = Vec::new(); // empty vector of f64 - let mut rho: f64 = x[0] - a; // 64 bit float + let mut v_tilde: Vec = Vec::new(); + let mut rho: T = x[0] - *a; // ---- step 2 x.iter().skip(1).for_each(|x_n| { if *x_n > rho { - rho += (*x_n - rho) / ((v.len() + 1) as f64); - if rho > *x_n - a { + rho = rho + (*x_n - rho) / T::from(v.len() + 1).expect("usize must fit in T"); + if rho > *x_n - *a { v.push(*x_n); } else { v_tilde.extend(&v); v = vec![*x_n]; - rho = *x_n - a; + rho = *x_n - *a; } } }); @@ -54,7 +55,7 @@ impl Constraint for Simplex { v_tilde.iter().for_each(|v_t_n| { if *v_t_n > rho { v.push(*v_t_n); - rho += (*v_t_n - rho) / (v.len() as f64); + rho = rho + (*v_t_n - rho) / T::from(v.len()).expect("usize must fit in T"); } }); } @@ -68,7 +69,8 @@ impl Constraint for Simplex { if *v_n <= rho { hit_list.push(n); current_len_v -= 1; - rho += (rho - *v_n) / (current_len_v as f64); + rho = rho + + (rho - *v_n) / T::from(current_len_v).expect("i64 must fit in T"); } }); hit_list.iter().rev().for_each(|target| { @@ -80,7 +82,7 @@ impl Constraint for Simplex { } // ---- step 6 - let zero: f64 = 0.0; + let zero = T::zero(); x.iter_mut().for_each(|x_n| *x_n = zero.max(*x_n - rho)); } diff --git a/src/constraints/soc.rs b/src/constraints/soc.rs index 8ff0759b..6ca51dfc 100644 --- a/src/constraints/soc.rs +++ b/src/constraints/soc.rs @@ -1,5 +1,7 @@ use super::Constraint; use crate::matrix_operations; +use num::Float; +use std::iter::Sum; #[derive(Clone, Copy)] /// @@ -17,11 +19,11 @@ use crate::matrix_operations; /// 1996 doctoral dissertation: Projection Algorithms and Monotone Operators /// (p. 40, Theorem 3.3.6). /// -pub struct SecondOrderCone { - alpha: f64, +pub struct SecondOrderCone { + alpha: T, } -impl SecondOrderCone { +impl SecondOrderCone { /// Construct a new instance of `SecondOrderCone` with parameter `alpha`. /// /// A second-order cone with parameter `alpha` is the set @@ -38,13 +40,16 @@ impl SecondOrderCone { /// # Panics /// /// The method panics if the given parameter `alpha` is nonpositive. - pub fn new(alpha: f64) -> SecondOrderCone { - assert!(alpha > 0.0); // alpha must be positive + pub fn new(alpha: T) -> SecondOrderCone { + assert!(alpha > T::zero()); // alpha must be positive SecondOrderCone { alpha } } } -impl Constraint for SecondOrderCone { +impl Constraint for SecondOrderCone +where + T: Float + Sum, +{ /// Project onto the second-order cone. /// /// # Arguments @@ -56,7 +61,7 @@ impl Constraint for SecondOrderCone { /// /// This method panics if the length of `x` is less than 2. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) { // x = (z, r) let n = x.len(); assert!(n >= 2, "x must be of dimension at least 2"); @@ -64,12 +69,13 @@ impl Constraint for SecondOrderCone { let r = x[n - 1]; let norm_z = matrix_operations::norm2(z); if self.alpha * norm_z <= -r { - x.iter_mut().for_each(|v| *v = 0.0); + x.iter_mut().for_each(|v| *v = T::zero()); } else if norm_z > self.alpha * r { - let beta = (self.alpha * norm_z + r) / (self.alpha.powi(2) + 1.0); + let beta = + (self.alpha * norm_z + r) / (self.alpha.powi(2) + T::one()); x[..n - 1] .iter_mut() - .for_each(|v| *v *= self.alpha * beta / norm_z); + .for_each(|v| *v = *v * self.alpha * beta / norm_z); x[n - 1] = beta; } } diff --git a/src/constraints/sphere2.rs b/src/constraints/sphere2.rs index 86433855..790450a4 100644 --- a/src/constraints/sphere2.rs +++ b/src/constraints/sphere2.rs @@ -1,23 +1,35 @@ use super::Constraint; +use num::Float; +use std::iter::Sum; + +fn norm2_squared_diff(a: &[T], b: &[T]) -> T { + assert_eq!(a.len(), b.len()); + a.iter() + .zip(b.iter()) + .fold(T::zero(), |sum, (&x, &y)| sum + (x - y) * (x - y)) +} #[derive(Copy, Clone)] /// A Euclidean sphere, that is, a set given by $S_2^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert = r\\}$ /// or a Euclidean sphere centered at a point $x_c$, that is, $S_2^{x_c, r} = \\{x \in \mathbb{R}^n {}:{} \Vert{}x-x_c{}\Vert = r\\}$ -pub struct Sphere2<'a> { - center: Option<&'a [f64]>, - radius: f64, +pub struct Sphere2<'a, T = f64> { + center: Option<&'a [T]>, + radius: T, } -impl<'a> Sphere2<'a> { +impl<'a, T: Float> Sphere2<'a, T> { /// Construct a new Euclidean sphere with given center and radius /// If no `center` is given, then it is assumed to be in the origin - pub fn new(center: Option<&'a [f64]>, radius: f64) -> Self { - assert!(radius > 0.0); + pub fn new(center: Option<&'a [T]>, radius: T) -> Self { + assert!(radius > T::zero()); Sphere2 { center, radius } } } -impl<'a> Constraint for Sphere2<'a> { +impl<'a, T> Constraint for Sphere2<'a, T> +where + T: Float + Sum, +{ /// Projection onto the sphere, $S_{r, c}$ with radius $r$ and center $c$. /// If $x\neq c$, the projection is uniquely defined by /// @@ -38,8 +50,8 @@ impl<'a> Constraint for Sphere2<'a> { /// Panics if `x` is empty or, when a center is provided, if `x` and /// `center` have incompatible dimensions. /// - fn project(&self, x: &mut [f64]) { - let epsilon = 1e-12; + fn project(&self, x: &mut [T]) { + let epsilon = T::from(1e-12).expect("1e-12 must be representable"); assert!(!x.is_empty(), "x must be nonempty"); if let Some(center) = &self.center { assert_eq!( @@ -47,10 +59,10 @@ impl<'a> Constraint for Sphere2<'a> { center.len(), "x and center have incompatible dimensions" ); - let norm_difference = crate::matrix_operations::norm2_squared_diff(x, center).sqrt(); + let norm_difference = norm2_squared_diff(x, center).sqrt(); if norm_difference <= epsilon { x.copy_from_slice(center); - x[0] += self.radius; + x[0] = x[0] + self.radius; return; } x.iter_mut().zip(center.iter()).for_each(|(x, c)| { @@ -59,11 +71,11 @@ impl<'a> Constraint for Sphere2<'a> { } else { let norm_x = crate::matrix_operations::norm2(x); if norm_x <= epsilon { - x[0] += self.radius; + x[0] = x[0] + self.radius; return; } let norm_over_radius = self.radius / norm_x; - x.iter_mut().for_each(|x_| *x_ *= norm_over_radius); + x.iter_mut().for_each(|x_| *x_ = *x_ * norm_over_radius); } } diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index 58f88f25..7a54f2f5 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -308,7 +308,8 @@ fn t_ball2_at_center_different_radius_outside() { let center = [-0.8, -1.1]; let ball = Ball2::new(Some(¢er), radius); ball.project(&mut x); - let norm_x_minus_c = crate::matrix_operations::norm2_squared_diff(&x, ¢er).sqrt(); + let norm_sq_x_minus_c: f64 = crate::matrix_operations::norm2_squared_diff(&x, ¢er); + let norm_x_minus_c = norm_sq_x_minus_c.sqrt(); unit_test_utils::assert_nearly_equal(radius, norm_x_minus_c, 1e-10, 1e-12, "wrong norm"); } @@ -470,7 +471,7 @@ fn t_second_order_cone_case_iii() { let mut x = vec![1.0, 1.0, 0.1]; soc.project(&mut x); // make sure the new `x` is in the cone - let norm_z = crate::matrix_operations::norm2(&x[..=1]); + let norm_z: f64 = crate::matrix_operations::norm2(&x[..=1]); assert!(norm_z <= alpha * x[2]); // in fact the projection should be on the boundary of the cone assert!((norm_z - alpha * x[2]).abs() <= 1e-7); @@ -650,7 +651,7 @@ fn t_is_convex_soc() { #[test] fn t_is_convex_zero() { let zero = Zero::new(); - assert!(zero.is_convex()); + assert!(>::is_convex(&zero)); } #[test] @@ -1231,11 +1232,14 @@ fn t_ballp_at_xc_projection() { let ball = BallP::new(Some(&x_center), radius, p, tol, max_iters); ball.project(&mut x); - let nrm = (x + let nrm: f64 = (x .iter() .zip(x_center.iter()) - .fold(0.0, |s, (x, y)| (*x - *y).abs().powf(p) + s)) - .powf(1. / p); + .fold(0.0_f64, |s, (x, y)| { + let diff: f64 = *x - *y; + diff.abs().powf(p) + s + })) + .powf(1.0_f64 / p); unit_test_utils::assert_nearly_equal(radius, nrm, 1e-10, 1e-12, "wrong distance to lp-ball"); let proj_expected = [0.5178727276722618, 2.2277981662325224]; diff --git a/src/constraints/zero.rs b/src/constraints/zero.rs index 81064d33..f1539cfd 100644 --- a/src/constraints/zero.rs +++ b/src/constraints/zero.rs @@ -1,4 +1,5 @@ use super::Constraint; +use num::Float; #[derive(Clone, Copy, Default)] /// Set Zero, $\\{0\\}$ @@ -11,11 +12,11 @@ impl Zero { } } -impl Constraint for Zero { +impl Constraint for Zero { /// Computes the projection on $\\{0\\}$, that is, $\Pi_{\\{0\\}}(x) = 0$ /// for all $x$ - fn project(&self, x: &mut [f64]) { - x.iter_mut().for_each(|xi| *xi = 0.0); + fn project(&self, x: &mut [T]) { + x.iter_mut().for_each(|xi| *xi = T::zero()); } fn is_convex(&self) -> bool { From 1964e0d571cb5b4e5cd00f36a5f12654631723e0 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 17:56:14 +0000 Subject: [PATCH 010/133] panoc supports generic float types --- CHANGELOG.md | 6 +- src/core/mod.rs | 6 +- src/core/panoc/panoc_engine.rs | 202 ++++++++++++++++++++++----------- src/core/problem.rs | 22 ++-- 4 files changed, 153 insertions(+), 83 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c8506dd5..3cdb7b6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,12 +9,12 @@ Note: This is the main Changelog file for the Rust solver. The Changelog file fo -## [v0.11.2] - Unreleased +## [v0.12.0] - Unreleased -### Fixed +### Changed - Rust solver supports generic float types diff --git a/src/core/mod.rs b/src/core/mod.rs index 46099e1a..6e59fe05 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -46,10 +46,10 @@ pub trait Optimizer { /// It defines what the algorithm does at every step (see `step`) and whether /// the specified termination criterion is satisfied /// -pub trait AlgorithmEngine { +pub trait AlgorithmEngine { /// Take a step of the algorithm and return `Ok(true)` only if the iterations should continue - fn step(&mut self, u: &mut [f64]) -> Result; + fn step(&mut self, u: &mut [T]) -> Result; /// Initializes the algorithm - fn init(&mut self, u: &mut [f64]) -> FunctionCallResult; + fn init(&mut self, u: &mut [T]) -> FunctionCallResult; } diff --git a/src/core/panoc/panoc_engine.rs b/src/core/panoc/panoc_engine.rs index 63ad862d..b74e877e 100644 --- a/src/core/panoc/panoc_engine.rs +++ b/src/core/panoc/panoc_engine.rs @@ -3,50 +3,68 @@ use crate::{ core::{panoc::PANOCCache, AlgorithmEngine, Problem}, matrix_operations, FunctionCallResult, SolverError, }; +use lbfgs::LbfgsPrecision; +use num::Float; +use std::iter::Sum; -/// Mimum estimated Lipschitz constant (initial estimate) -const MIN_L_ESTIMATE: f64 = 1e-10; +fn min_l_estimate() -> T { + T::from(1e-10).expect("1e-10 must be representable") +} -/// gamma = GAMMA_L_COEFF/L -const GAMMA_L_COEFF: f64 = 0.95; +fn gamma_l_coeff() -> T { + T::from(0.95).expect("0.95 must be representable") +} //const SIGMA_COEFF: f64 = 0.49; -/// Delta in the estimation of the initial Lipschitz constant -const DELTA_LIPSCHITZ: f64 = 1e-12; +fn delta_lipschitz() -> T { + T::from(1e-12).expect("1e-12 must be representable") +} -/// Epsilon in the estimation of the initial Lipschitz constant -const EPSILON_LIPSCHITZ: f64 = 1e-6; +fn epsilon_lipschitz() -> T { + T::from(1e-6).expect("1e-6 must be representable") +} -/// Safety parameter used to check a strict inequality in the update of the Lipschitz constant -const LIPSCHITZ_UPDATE_EPSILON: f64 = 1e-6; +fn lipschitz_update_epsilon() -> T { + T::from(1e-6).expect("1e-6 must be representable") +} /// Maximum iterations of updating the Lipschitz constant const MAX_LIPSCHITZ_UPDATE_ITERATIONS: usize = 10; -/// Maximum possible Lipschitz constant -const MAX_LIPSCHITZ_CONSTANT: f64 = 1e9; +fn max_lipschitz_constant() -> T { + T::from(1e9).expect("1e9 must be representable") +} + +fn norm2_squared_diff(a: &[T], b: &[T]) -> T { + assert_eq!(a.len(), b.len()); + a.iter() + .zip(b.iter()) + .fold(T::zero(), |sum, (&x, &y)| sum + (x - y) * (x - y)) +} /// Maximum number of linesearch iterations const MAX_LINESEARCH_ITERATIONS: u32 = 10; /// Engine for PANOC algorithm -pub struct PANOCEngine<'a, GradientType, ConstraintType, CostType> +pub struct PANOCEngine<'a, GradientType, ConstraintType, CostType, T = f64> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { - problem: Problem<'a, GradientType, ConstraintType, CostType>, - pub(crate) cache: &'a mut PANOCCache, + problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + pub(crate) cache: &'a mut PANOCCache, } -impl<'a, GradientType, ConstraintType, CostType> - PANOCEngine<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> + PANOCEngine<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { /// Construct a new Engine for PANOC /// @@ -60,28 +78,28 @@ where /// /// pub fn new( - problem: Problem<'a, GradientType, ConstraintType, CostType>, - cache: &'a mut PANOCCache, - ) -> PANOCEngine<'a, GradientType, ConstraintType, CostType> { + problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + cache: &'a mut PANOCCache, + ) -> PANOCEngine<'a, GradientType, ConstraintType, CostType, T> { PANOCEngine { problem, cache } } /// Estimate the local Lipschitz constant at `u` - fn estimate_loc_lip(&mut self, u: &mut [f64]) -> FunctionCallResult { + fn estimate_loc_lip(&mut self, u: &mut [T]) -> FunctionCallResult { let mut lipest = crate::lipschitz_estimator::LipschitzEstimator::new( u, &self.problem.gradf, &mut self.cache.gradient_u, ) - .with_delta(DELTA_LIPSCHITZ) - .with_epsilon(EPSILON_LIPSCHITZ); + .with_delta(delta_lipschitz()) + .with_epsilon(epsilon_lipschitz()); self.cache.lipschitz_constant = lipest.estimate_local_lipschitz()?; Ok(()) } /// Computes the FPR and its norm - fn compute_fpr(&mut self, u_current: &[f64]) { + fn compute_fpr(&mut self, u_current: &[T]) { // compute the FPR: // fpr ← u - u_half_step let cache = &mut self.cache; @@ -90,19 +108,19 @@ where .iter_mut() .zip(u_current.iter()) .zip(cache.u_half_step.iter()) - .for_each(|((fpr, u), uhalf)| *fpr = u - uhalf); + .for_each(|((fpr, u), uhalf)| *fpr = *u - *uhalf); // compute the norm of FPR cache.norm_gamma_fpr = matrix_operations::norm2(&cache.gamma_fpr); } /// Score the current feasible half step and cache it if it is the best so far. - pub(crate) fn cache_best_half_step(&mut self, u_current: &[f64]) { + pub(crate) fn cache_best_half_step(&mut self, u_current: &[T]) { self.compute_fpr(u_current); self.cache.cache_best_half_step(); } /// Computes a gradient step; does not compute the gradient - fn gradient_step(&mut self, u_current: &[f64]) { + fn gradient_step(&mut self, u_current: &[T]) { // take a gradient step: // gradient_step ← u_current - gamma * gradient let cache = &mut self.cache; @@ -141,11 +159,11 @@ where cache.u_half_step.copy_from_slice(&cache.gradient_step); self.problem.constraints.project(&mut cache.u_half_step); cache.gradient_step_u_half_step_diff_norm_sq = - matrix_operations::norm2_squared_diff(&cache.gradient_step, &cache.u_half_step); + norm2_squared_diff(&cache.gradient_step, &cache.u_half_step); } /// Computes an LBFGS direction; updates `cache.direction_lbfgs` - fn lbfgs_direction(&mut self, u_current: &[f64]) { + fn lbfgs_direction(&mut self, u_current: &[T]) { let cache = &mut self.cache; // update the LBFGS buffer cache.lbfgs.update_hessian(&cache.gamma_fpr, u_current); @@ -160,7 +178,7 @@ where /// Returns the RHS of the Lipschitz update /// Computes rhs = cost + LIP_EPS * |f| - gamma * + (L/2/gamma) ||gamma * fpr||^2 - fn lipschitz_check_rhs(&mut self) -> f64 { + fn lipschitz_check_rhs(&mut self) -> T { let cache = &mut self.cache; let gamma = cache.gamma; let cost_value = cache.cost_value; @@ -169,13 +187,15 @@ where matrix_operations::inner_product(&cache.gradient_u, &cache.gamma_fpr); // rhs ← cost + LIP_EPS * |f| - + (L/2/gamma) ||gamma_fpr||^2 - cost_value + LIPSCHITZ_UPDATE_EPSILON * cost_value.abs() - inner_prod_grad_fpr - + (GAMMA_L_COEFF / (2.0 * gamma)) * cache.norm_gamma_fpr * cache.norm_gamma_fpr + cost_value + lipschitz_update_epsilon::() * cost_value.abs() - inner_prod_grad_fpr + + (gamma_l_coeff::() / (T::from(2.0).expect("2.0 must be representable") * gamma)) + * cache.norm_gamma_fpr + * cache.norm_gamma_fpr } /// Updates the estimate of the Lipscthiz constant - fn update_lipschitz_constant(&mut self, u_current: &[f64]) -> FunctionCallResult { - let mut cost_u_half_step = 0.0; + fn update_lipschitz_constant(&mut self, u_current: &[T]) -> FunctionCallResult { + let mut cost_u_half_step = T::zero(); // Compute the cost at the half step (self.problem.cost)(&self.cache.u_half_step, &mut cost_u_half_step)?; @@ -185,13 +205,15 @@ where while cost_u_half_step > self.lipschitz_check_rhs() && it_lipschitz_search < MAX_LIPSCHITZ_UPDATE_ITERATIONS - && self.cache.lipschitz_constant < MAX_LIPSCHITZ_CONSTANT + && self.cache.lipschitz_constant < max_lipschitz_constant() { self.cache.lbfgs.reset(); // invalidate the L-BFGS buffer // update L, sigma and gamma... - self.cache.lipschitz_constant *= 2.; - self.cache.gamma /= 2.; + self.cache.lipschitz_constant = + self.cache.lipschitz_constant * T::from(2.0).expect("2.0 must be representable"); + self.cache.gamma = + self.cache.gamma / T::from(2.0).expect("2.0 must be representable"); // recompute the half step... self.gradient_step(u_current); // updates self.cache.gradient_step @@ -205,17 +227,18 @@ where self.compute_fpr(u_current); it_lipschitz_search += 1; } - self.cache.sigma = (1.0 - GAMMA_L_COEFF) / (4.0 * self.cache.gamma); + self.cache.sigma = (T::one() - gamma_l_coeff::()) + / (T::from(4.0).expect("4.0 must be representable") * self.cache.gamma); Ok(()) } /// Computes u_plus ← u - gamma * (1-tau) * fpr - tau * dir, - fn compute_u_plus(&mut self, u: &[f64]) { + fn compute_u_plus(&mut self, u: &[T]) { let cache = &mut self.cache; let _gamma = cache.gamma; let tau = cache.tau; - let temp_ = 1.0 - tau; + let temp_ = T::one() - tau; cache .u_plus .iter_mut() @@ -230,21 +253,23 @@ where /// Computes the RHS of the linesearch condition fn compute_rhs_ls(&mut self) { let cache = &mut self.cache; + let half = T::from(0.5).expect("0.5 must be representable"); // dist squared ← norm(gradient step - u half step)^2 // rhs_ls ← f - (gamma/2) * norm(gradf)^2 // + 0.5 * dist squared / gamma // - sigma * norm_gamma_fpr^2 - let fbe = cache.cost_value - 0.5 * cache.gamma * cache.gradient_u_norm_sq - + 0.5 * cache.gradient_step_u_half_step_diff_norm_sq / cache.gamma; + let fbe = cache.cost_value - half * cache.gamma * cache.gradient_u_norm_sq + + half * cache.gradient_step_u_half_step_diff_norm_sq / cache.gamma; let sigma_fpr_sq = cache.sigma * cache.norm_gamma_fpr * cache.norm_gamma_fpr; cache.rhs_ls = fbe - sigma_fpr_sq; } /// Computes the left hand side of the line search condition and compares it with the RHS; /// returns `true` if and only if lhs > rhs (when the line search should continue) - fn line_search_condition(&mut self, u: &[f64]) -> Result { + fn line_search_condition(&mut self, u: &[T]) -> Result { let gamma = self.cache.gamma; + let half = T::from(0.5).expect("0.5 must be representable"); // u_plus ← u - (1-tau)*gamma_fpr + tau*direction self.compute_u_plus(u); @@ -260,14 +285,14 @@ where self.half_step(); // u_half_step ← project(gradient_step) // Update the LHS of the line search condition - self.cache.lhs_ls = self.cache.cost_value - 0.5 * gamma * self.cache.gradient_u_norm_sq - + 0.5 * self.cache.gradient_step_u_half_step_diff_norm_sq / self.cache.gamma; + self.cache.lhs_ls = self.cache.cost_value - half * gamma * self.cache.gradient_u_norm_sq + + half * self.cache.gradient_step_u_half_step_diff_norm_sq / self.cache.gamma; Ok(self.cache.lhs_ls > self.cache.rhs_ls) } /// Update without performing a line search; this is executed at the first iteration - fn update_no_linesearch(&mut self, u_current: &mut [f64]) -> FunctionCallResult { + fn update_no_linesearch(&mut self, u_current: &mut [T]) -> FunctionCallResult { u_current.copy_from_slice(&self.cache.u_half_step); // set u_current ← u_half_step (self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value (self.problem.gradf)(u_current, &mut self.cache.gradient_u)?; // compute gradient @@ -279,17 +304,17 @@ where } /// Performs a line search to select tau - fn linesearch(&mut self, u_current: &mut [f64]) -> FunctionCallResult { + fn linesearch(&mut self, u_current: &mut [T]) -> FunctionCallResult { // perform line search self.compute_rhs_ls(); // compute the right hand side of the line search - self.cache.tau = 1.0; // initialise tau ← 1.0 + self.cache.tau = T::one(); // initialise tau ← 1.0 let mut num_ls_iters = 0; while self.line_search_condition(u_current)? && num_ls_iters < MAX_LINESEARCH_ITERATIONS { - self.cache.tau /= 2.0; + self.cache.tau = self.cache.tau / T::from(2.0).expect("2.0 must be representable"); num_ls_iters += 1; } if num_ls_iters == MAX_LINESEARCH_ITERATIONS { - self.cache.tau = 0.; + self.cache.tau = T::zero(); u_current.copy_from_slice(&self.cache.u_half_step); } // Sets `u_current` to `u_plus` (u_current ← u_plus) @@ -299,20 +324,21 @@ where } /// Compute the cost value at the best cached feasible half step. - pub(crate) fn cost_value_at_best_half_step(&mut self) -> Result { - let mut cost = 0.0; + pub(crate) fn cost_value_at_best_half_step(&mut self) -> Result { + let mut cost = T::zero(); (self.problem.cost)(&self.cache.best_u_half_step, &mut cost)?; Ok(cost) } } /// Implementation of the `step` and `init` methods of [trait.AlgorithmEngine.html] -impl<'a, GradientType, ConstraintType, CostType> AlgorithmEngine - for PANOCEngine<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> AlgorithmEngine + for PANOCEngine<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { /// PANOC step /// @@ -324,7 +350,7 @@ where /// iterate of PANOC /// /// - fn step(&mut self, u_current: &mut [f64]) -> Result { + fn step(&mut self, u_current: &mut [T]) -> Result { // caches the previous gradient vector (copies df to df_previous) self.cache.cache_previous_gradient(); @@ -358,13 +384,14 @@ where /// gradient of the cost at the initial point, initial estimates for `gamma` and `sigma`, /// a gradient step and a half step (projected gradient step) /// - fn init(&mut self, u_current: &mut [f64]) -> FunctionCallResult { + fn init(&mut self, u_current: &mut [T]) -> FunctionCallResult { self.cache.reset(); (self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value self.estimate_loc_lip(u_current)?; // computes the gradient as well! (self.cache.gradient_u) self.cache_gradient_norm(); - self.cache.gamma = GAMMA_L_COEFF / f64::max(self.cache.lipschitz_constant, MIN_L_ESTIMATE); - self.cache.sigma = (1.0 - GAMMA_L_COEFF) / (4.0 * self.cache.gamma); + self.cache.gamma = gamma_l_coeff::() / self.cache.lipschitz_constant.max(min_l_estimate()); + self.cache.sigma = (T::one() - gamma_l_coeff::()) + / (T::from(4.0).expect("4.0 must be representable") * self.cache.gamma); self.gradient_step(u_current); // updated self.cache.gradient_step self.half_step(); // updates self.cache.u_half_step @@ -382,7 +409,7 @@ mod tests { use crate::constraints; use crate::core::panoc::panoc_engine::PANOCEngine; use crate::core::panoc::*; - use crate::core::Problem; + use crate::core::{AlgorithmEngine, Problem}; use crate::mocks; use crate::FunctionCallResult; @@ -607,4 +634,43 @@ mod tests { "update_lipschitz_constant should only evaluate the half-step cost" ); } + + #[test] + fn t_panoc_init_f32() { + let bounds = constraints::NoConstraints::new(); + let problem = Problem::new( + &bounds, + |u: &[f32], grad: &mut [f32]| -> FunctionCallResult { + grad.copy_from_slice(u); + Ok(()) + }, + |u: &[f32], c: &mut f32| -> FunctionCallResult { + *c = 0.5_f32 * (u[0] * u[0] + u[1] * u[1]); + Ok(()) + }, + ); + let mut panoc_cache = PANOCCache::::new(2, 1e-6_f32, 3); + let mut panoc_engine = PANOCEngine::new(problem, &mut panoc_cache); + let mut u = [1_000.0_f32, 2_000.0_f32]; + + panoc_engine.init(&mut u).unwrap(); + + assert!(panoc_engine.cache.lipschitz_constant.is_finite()); + assert!(panoc_engine.cache.lipschitz_constant > 0.0_f32); + let expected_gamma = 0.95_f32 / panoc_engine.cache.lipschitz_constant; + assert!((panoc_engine.cache.gamma - expected_gamma).abs() < 1e-6); + unit_test_utils::assert_nearly_equal_array( + &[1_000.0_f32, 2_000.0_f32], + &panoc_engine.cache.gradient_u, + 1e-5, + 1e-6, + "gradient at u", + ); + let expected_half_step = [ + (1.0_f32 - panoc_engine.cache.gamma) * 1_000.0_f32, + (1.0_f32 - panoc_engine.cache.gamma) * 2_000.0_f32, + ]; + assert!((panoc_engine.cache.u_half_step[0] - expected_half_step[0]).abs() < 5e-3); + assert!((panoc_engine.cache.u_half_step[1] - expected_half_step[1]).abs() < 5e-3); + } } diff --git a/src/core/problem.rs b/src/core/problem.rs index c6e61dae..c2ca372f 100644 --- a/src/core/problem.rs +++ b/src/core/problem.rs @@ -7,6 +7,7 @@ //! C (and then invoked from Rust via an interface such as icasadi). //! use crate::{constraints, FunctionCallResult}; +use std::marker::PhantomData; /// Definition of an optimisation problem /// @@ -15,11 +16,11 @@ use crate::{constraints, FunctionCallResult}; /// - the cost function /// - the set of constraints, which is described by implementations of /// [Constraint](../../panoc_rs/constraints/trait.Constraint.html) -pub struct Problem<'a, GradientType, ConstraintType, CostType> +pub struct Problem<'a, GradientType, ConstraintType, CostType, T = f64> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { /// constraints pub(crate) constraints: &'a ConstraintType, @@ -27,13 +28,15 @@ where pub(crate) gradf: GradientType, /// cost function pub(crate) cost: CostType, + marker: PhantomData, } -impl<'a, GradientType, ConstraintType, CostType> Problem<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> + Problem<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { /// Construct a new instance of an optimisation problem /// @@ -50,11 +53,12 @@ where constraints: &'a ConstraintType, cost_gradient: GradientType, cost: CostType, - ) -> Problem<'a, GradientType, ConstraintType, CostType> { + ) -> Problem<'a, GradientType, ConstraintType, CostType, T> { Problem { constraints, gradf: cost_gradient, cost, + marker: PhantomData, } } } From a3ac25af7912bf6271a25a2787033a9624c79b7e Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 17:59:27 +0000 Subject: [PATCH 011/133] constraints with f32 are better tested --- src/constraints/tests.rs | 73 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index 7a54f2f5..c688b9c1 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -20,6 +20,14 @@ fn t_zero_set() { ); } +#[test] +fn t_zero_set_f32() { + let zero = Zero::new(); + let mut x = [1.0_f32, -2.0, 3.5]; + zero.project(&mut x); + assert_eq!([0.0_f32, 0.0, 0.0], x); +} + #[test] fn t_hyperplane() { let normal_vector = [1.0, 2.0, 3.0]; @@ -149,6 +157,15 @@ fn t_finite_set() { ); } +#[test] +fn t_finite_set_f32() { + let data: &[&[f32]] = &[&[0.0_f32, 0.0], &[1.0, 1.0], &[0.0, 1.0], &[1.0, 0.0]]; + let finite_set = FiniteSet::new(data); + let mut x = [0.7_f32, 0.2]; + finite_set.project(&mut x); + assert_eq!([1.0_f32, 0.0], x); +} + #[test] #[should_panic] fn t_finite_set_project_wrong_dimension() { @@ -176,6 +193,18 @@ fn t_rectangle_bounded() { ); } +#[test] +fn t_rectangle_bounded_f32() { + let xmin = vec![2.0_f32; 3]; + let xmax = vec![4.5_f32; 3]; + let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); + let mut x = [1.0_f32, 3.0, 5.0]; + + rectangle.project(&mut x); + + assert_eq!([2.0_f32, 3.0, 4.5], x); +} + #[test] fn t_rectangle_infinite_bounds() { let xmin = [-1.0, 2.0, f64::NEG_INFINITY]; @@ -282,6 +311,19 @@ fn t_ball2_at_origin() { ); } +#[test] +fn t_ball2_at_origin_f32() { + let radius = 1.0_f32; + let mut x = [1.0_f32, 1.0]; + let ball = Ball2::new(None, radius); + + ball.project(&mut x); + + let expected = std::f32::consts::FRAC_1_SQRT_2; + assert!((x[0] - expected).abs() < 1e-6); + assert!((x[1] - expected).abs() < 1e-6); +} + #[test] fn t_ball2_at_origin_different_radius_outside() { let radius = 0.8; @@ -352,6 +394,16 @@ fn t_no_constraints() { unit_test_utils::assert_nearly_equal_array(&[1., 2., 3.], &x, 1e-10, 1e-15, "x is wrong"); } +#[test] +fn t_no_constraints_f32() { + let mut x = [1.0_f32, 2.0, 3.0]; + let whole_space = NoConstraints::new(); + + whole_space.project(&mut x); + + assert_eq!([1.0_f32, 2.0, 3.0], x); +} + #[test] #[should_panic] fn t_cartesian_product_constraints_incoherent_indices() { @@ -701,6 +753,18 @@ fn t_simplex_projection() { ); } +#[test] +fn t_simplex_projection_f32() { + let mut x = [1.0_f32, 2.0, 3.0]; + let alpha = 3.0_f32; + let simplex = Simplex::new(alpha); + simplex.project(&mut x); + + let sum = x[0] + x[1] + x[2]; + assert!((sum - alpha).abs() < 1e-5); + assert!(x.iter().all(|&xi| xi >= -1e-6)); +} + #[test] fn t_simplex_projection_random_spam() { let n = 10; @@ -995,6 +1059,15 @@ fn t_epigraph_squared_norm_correctness() { ); } +#[test] +fn t_epigraph_squared_norm_f32() { + let epi = EpigraphSquaredNorm::new(); + let mut x = [1.0_f32, 0.0, 0.0]; + epi.project(&mut x); + let err = (matrix_operations::norm2_squared(&x[..2]) - x[2]).abs(); + assert!(err < 1e-4); +} + #[test] fn t_affine_space() { let a = vec![ From a5ee7a8873a56c1d993a8c6ea92638c9e5608d88 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 18:00:30 +0000 Subject: [PATCH 012/133] cargo fmt --- src/cholesky_factorizer.rs | 7 +------ src/constraints/ballp.rs | 14 +++++--------- src/constraints/simplex.rs | 3 +-- src/constraints/soc.rs | 3 +-- src/constraints/tests.rs | 11 ++++------- src/core/panoc/panoc_cache.rs | 5 ++++- src/core/panoc/panoc_engine.rs | 6 +++--- 7 files changed, 19 insertions(+), 30 deletions(-) diff --git a/src/cholesky_factorizer.rs b/src/cholesky_factorizer.rs index e18233f0..78c91757 100644 --- a/src/cholesky_factorizer.rs +++ b/src/cholesky_factorizer.rs @@ -267,12 +267,7 @@ mod tests { factorizer.factorize(&a).unwrap(); let expected_l = [2.0_f32, 0.0, 0.0, 6.0, 1.0, 0.0, -8.0, 5.0, 3.0]; - unit_test_utils::nearly_equal_array( - &expected_l, - factorizer.cholesky_factor(), - 1e-5, - 1e-6, - ); + unit_test_utils::nearly_equal_array(&expected_l, factorizer.cholesky_factor(), 1e-5, 1e-6); let rhs = vec![-5.0_f32, 2.0, -3.0]; let x = factorizer.solve(&rhs).unwrap(); diff --git a/src/constraints/ballp.rs b/src/constraints/ballp.rs index de964c45..7428f0f3 100644 --- a/src/constraints/ballp.rs +++ b/src/constraints/ballp.rs @@ -124,13 +124,7 @@ impl<'a, T: Float> BallP<'a, T> { /// - `p`: norm exponent, must satisfy `p > 1.0` and be finite /// - `tolerance`: tolerance for the numerical solvers /// - `max_iter`: maximum number of iterations for the numerical solvers - pub fn new( - center: Option<&'a [T]>, - radius: T, - p: T, - tolerance: T, - max_iter: usize, - ) -> Self { + pub fn new(center: Option<&'a [T]>, radius: T, p: T, tolerance: T, max_iter: usize) -> Self { assert!(radius > T::zero()); assert!(p > T::one() && p.is_finite()); assert!(tolerance > T::zero()); @@ -194,7 +188,8 @@ impl<'a, T: Float> BallP<'a, T> { } for _ in 0..max_iter { - let lambda_mid = T::from(0.5).expect("0.5 must be representable") * (lambda_lo + lambda_hi); + let lambda_mid = + T::from(0.5).expect("0.5 must be representable") * (lambda_lo + lambda_hi); let err = radius_error(lambda_mid); if err.abs() <= tol { @@ -210,7 +205,8 @@ impl<'a, T: Float> BallP<'a, T> { } } - let lambda_star = T::from(0.5).expect("0.5 must be representable") * (lambda_lo + lambda_hi); + let lambda_star = + T::from(0.5).expect("0.5 must be representable") * (lambda_lo + lambda_hi); x.iter_mut().zip(abs_x.iter()).for_each(|(xi, &a)| { let u = Self::solve_coordinate_newton(a, lambda_star, p, tol, max_iter); diff --git a/src/constraints/simplex.rs b/src/constraints/simplex.rs index 2d11e897..4cd9c0f1 100644 --- a/src/constraints/simplex.rs +++ b/src/constraints/simplex.rs @@ -69,8 +69,7 @@ impl Constraint for Simplex { if *v_n <= rho { hit_list.push(n); current_len_v -= 1; - rho = rho - + (rho - *v_n) / T::from(current_len_v).expect("i64 must fit in T"); + rho = rho + (rho - *v_n) / T::from(current_len_v).expect("i64 must fit in T"); } }); hit_list.iter().rev().for_each(|target| { diff --git a/src/constraints/soc.rs b/src/constraints/soc.rs index 6ca51dfc..d2418543 100644 --- a/src/constraints/soc.rs +++ b/src/constraints/soc.rs @@ -71,8 +71,7 @@ where if self.alpha * norm_z <= -r { x.iter_mut().for_each(|v| *v = T::zero()); } else if norm_z > self.alpha * r { - let beta = - (self.alpha * norm_z + r) / (self.alpha.powi(2) + T::one()); + let beta = (self.alpha * norm_z + r) / (self.alpha.powi(2) + T::one()); x[..n - 1] .iter_mut() .for_each(|v| *v = *v * self.alpha * beta / norm_z); diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index c688b9c1..0db9549c 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -1305,13 +1305,10 @@ fn t_ballp_at_xc_projection() { let ball = BallP::new(Some(&x_center), radius, p, tol, max_iters); ball.project(&mut x); - let nrm: f64 = (x - .iter() - .zip(x_center.iter()) - .fold(0.0_f64, |s, (x, y)| { - let diff: f64 = *x - *y; - diff.abs().powf(p) + s - })) + let nrm: f64 = (x.iter().zip(x_center.iter()).fold(0.0_f64, |s, (x, y)| { + let diff: f64 = *x - *y; + diff.abs().powf(p) + s + })) .powf(1.0_f64 / p); unit_test_utils::assert_nearly_equal(radius, nrm, 1e-10, 1e-12, "wrong distance to lp-ball"); diff --git a/src/core/panoc/panoc_cache.rs b/src/core/panoc/panoc_cache.rs index 7aa4b69c..b491ae43 100644 --- a/src/core/panoc/panoc_cache.rs +++ b/src/core/panoc/panoc_cache.rs @@ -128,7 +128,10 @@ where /// The method panics if `akkt_tolerance` is nonpositive /// pub fn set_akkt_tolerance(&mut self, akkt_tolerance: T) { - assert!(akkt_tolerance > T::zero(), "akkt_tolerance must be positive"); + assert!( + akkt_tolerance > T::zero(), + "akkt_tolerance must be positive" + ); self.akkt_tolerance = Some(akkt_tolerance); self.gradient_u_previous = Some(vec![T::zero(); self.gradient_step.len()]); } diff --git a/src/core/panoc/panoc_engine.rs b/src/core/panoc/panoc_engine.rs index b74e877e..eb4780c5 100644 --- a/src/core/panoc/panoc_engine.rs +++ b/src/core/panoc/panoc_engine.rs @@ -212,8 +212,7 @@ where // update L, sigma and gamma... self.cache.lipschitz_constant = self.cache.lipschitz_constant * T::from(2.0).expect("2.0 must be representable"); - self.cache.gamma = - self.cache.gamma / T::from(2.0).expect("2.0 must be representable"); + self.cache.gamma = self.cache.gamma / T::from(2.0).expect("2.0 must be representable"); // recompute the half step... self.gradient_step(u_current); // updates self.cache.gradient_step @@ -389,7 +388,8 @@ where (self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value self.estimate_loc_lip(u_current)?; // computes the gradient as well! (self.cache.gradient_u) self.cache_gradient_norm(); - self.cache.gamma = gamma_l_coeff::() / self.cache.lipschitz_constant.max(min_l_estimate()); + self.cache.gamma = + gamma_l_coeff::() / self.cache.lipschitz_constant.max(min_l_estimate()); self.cache.sigma = (T::one() - gamma_l_coeff::()) / (T::from(4.0).expect("4.0 must be representable") * self.cache.gamma); self.gradient_step(u_current); // updated self.cache.gradient_step From 7f7611de0804c22818ad75498d2f550e7b55ca99 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 18:06:27 +0000 Subject: [PATCH 013/133] fbs supports generic floats --- src/core/fbs/fbs_cache.rs | 26 +++++++----- src/core/fbs/fbs_engine.rs | 52 ++++++++++++----------- src/core/fbs/fbs_optimizer.rs | 80 ++++++++++++++++++++--------------- src/core/fbs/tests.rs | 58 +++++++++++++++++++++++++ 4 files changed, 148 insertions(+), 68 deletions(-) diff --git a/src/core/fbs/fbs_cache.rs b/src/core/fbs/fbs_cache.rs index 83dd61bb..546c2dc6 100644 --- a/src/core/fbs/fbs_cache.rs +++ b/src/core/fbs/fbs_cache.rs @@ -1,19 +1,23 @@ //! FBS Cache //! +use num::Float; use std::num::NonZeroUsize; /// Cache for the forward-backward splitting (FBS), or projected gradient, algorithm /// /// This struct allocates memory needed for the FBS algorithm -pub struct FBSCache { - pub(crate) work_gradient_u: Vec, - pub(crate) work_u_previous: Vec, - pub(crate) gamma: f64, - pub(crate) tolerance: f64, - pub(crate) norm_fpr: f64, +pub struct FBSCache +where + T: Float, +{ + pub(crate) work_gradient_u: Vec, + pub(crate) work_u_previous: Vec, + pub(crate) gamma: T, + pub(crate) tolerance: T, + pub(crate) norm_fpr: T, } -impl FBSCache { +impl FBSCache { /// Construct a new instance of `FBSCache` /// /// ## Arguments @@ -37,13 +41,13 @@ impl FBSCache { /// This method will panic if there is no available memory for the required allocation /// (capacity overflow) /// - pub fn new(n: NonZeroUsize, gamma: f64, tolerance: f64) -> FBSCache { + pub fn new(n: NonZeroUsize, gamma: T, tolerance: T) -> FBSCache { FBSCache { - work_gradient_u: vec![0.0; n.get()], - work_u_previous: vec![0.0; n.get()], + work_gradient_u: vec![T::zero(); n.get()], + work_u_previous: vec![T::zero(); n.get()], gamma, tolerance, - norm_fpr: f64::INFINITY, + norm_fpr: T::infinity(), } } } diff --git a/src/core/fbs/fbs_engine.rs b/src/core/fbs/fbs_engine.rs index 717e70c6..c13ae870 100644 --- a/src/core/fbs/fbs_engine.rs +++ b/src/core/fbs/fbs_engine.rs @@ -5,25 +5,28 @@ use crate::{ core::{fbs::FBSCache, AlgorithmEngine, Problem}, matrix_operations, FunctionCallResult, SolverError, }; +use num::Float; /// The FBE Engine defines the steps of the FBE algorithm and the termination criterion /// -pub struct FBSEngine<'a, GradientType, ConstraintType, CostType> +pub struct FBSEngine<'a, GradientType, ConstraintType, CostType, T = f64> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { - pub(crate) problem: Problem<'a, GradientType, ConstraintType, CostType>, - pub(crate) cache: &'a mut FBSCache, + pub(crate) problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + pub(crate) cache: &'a mut FBSCache, } -impl<'a, GradientType, ConstraintType, CostType> - FBSEngine<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> + FBSEngine<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { /// Constructor for instances of `FBSEngine` /// @@ -36,13 +39,13 @@ where /// /// An new instance of `FBSEngine` pub fn new( - problem: Problem<'a, GradientType, ConstraintType, CostType>, - cache: &'a mut FBSCache, - ) -> FBSEngine<'a, GradientType, ConstraintType, CostType> { + problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + cache: &'a mut FBSCache, + ) -> FBSEngine<'a, GradientType, ConstraintType, CostType, T> { FBSEngine { problem, cache } } - fn gradient_step(&mut self, u_current: &mut [f64]) { + fn gradient_step(&mut self, u_current: &mut [T]) { assert_eq!( Ok(()), (self.problem.gradf)(u_current, &mut self.cache.work_gradient_u), @@ -53,20 +56,21 @@ where u_current .iter_mut() .zip(self.cache.work_gradient_u.iter()) - .for_each(|(u, w)| *u -= self.cache.gamma * *w); + .for_each(|(u, w)| *u = *u - self.cache.gamma * *w); } - fn projection_step(&mut self, u_current: &mut [f64]) { + fn projection_step(&mut self, u_current: &mut [T]) { self.problem.constraints.project(u_current); } } -impl<'a, GradientType, ConstraintType, CostType> AlgorithmEngine - for FBSEngine<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> AlgorithmEngine + for FBSEngine<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult + 'a, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult + 'a, - ConstraintType: constraints::Constraint + 'a, + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult + 'a, + CostType: Fn(&[T], &mut T) -> FunctionCallResult + 'a, + ConstraintType: constraints::Constraint + 'a, { /// Take a forward-backward step and check whether the algorithm should terminate /// @@ -83,7 +87,7 @@ where /// /// The method may panick if the computation of the gradient of the cost function /// or the cost function panics. - fn step(&mut self, u_current: &mut [f64]) -> Result { + fn step(&mut self, u_current: &mut [T]) -> Result { self.cache.work_u_previous.copy_from_slice(u_current); // cache the previous step self.gradient_step(u_current); // compute the gradient self.projection_step(u_current); // project @@ -93,7 +97,7 @@ where Ok(self.cache.norm_fpr > self.cache.tolerance) } - fn init(&mut self, _u_current: &mut [f64]) -> FunctionCallResult { + fn init(&mut self, _u_current: &mut [T]) -> FunctionCallResult { Ok(()) } } diff --git a/src/core/fbs/fbs_optimizer.rs b/src/core/fbs/fbs_optimizer.rs index d714ab67..284cb911 100644 --- a/src/core/fbs/fbs_optimizer.rs +++ b/src/core/fbs/fbs_optimizer.rs @@ -8,6 +8,7 @@ use crate::{ }, matrix_operations, FunctionCallResult, SolverError, }; +use num::Float; use std::time; const MAX_ITER: usize = 100_usize; @@ -22,23 +23,25 @@ const MAX_ITER: usize = 100_usize; /// a different optimization problem. /// /// -pub struct FBSOptimizer<'a, GradientType, ConstraintType, CostType> +pub struct FBSOptimizer<'a, GradientType, ConstraintType, CostType, T = f64> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { - fbs_engine: FBSEngine<'a, GradientType, ConstraintType, CostType>, + fbs_engine: FBSEngine<'a, GradientType, ConstraintType, CostType, T>, max_iter: usize, max_duration: Option, } -impl<'a, GradientType, ConstraintType, CostType> - FBSOptimizer<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> + FBSOptimizer<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult + 'a, + CostType: Fn(&[T], &mut T) -> FunctionCallResult + 'a, + ConstraintType: constraints::Constraint + 'a, { /// Constructs a new instance of `FBSOptimizer` /// @@ -47,8 +50,8 @@ where /// - `problem`: problem definition /// - `cache`: instance of `FBSCache` pub fn new( - problem: Problem<'a, GradientType, ConstraintType, CostType>, - cache: &'a mut FBSCache, + problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + cache: &'a mut FBSCache, ) -> Self { FBSOptimizer { fbs_engine: FBSEngine::new(problem, cache), @@ -64,9 +67,9 @@ where /// The method panics if the specified tolerance is not positive pub fn with_tolerance( self, - tolerance: f64, - ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType> { - assert!(tolerance > 0.0); + tolerance: T, + ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType, T> { + assert!(tolerance > T::zero()); self.fbs_engine.cache.tolerance = tolerance; self @@ -76,7 +79,7 @@ where pub fn with_max_iter( mut self, max_iter: usize, - ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType> { + ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType, T> { self.max_iter = max_iter; self } @@ -85,30 +88,25 @@ where pub fn with_max_duration( mut self, max_duration: time::Duration, - ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType> { + ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType, T> { self.max_duration = Some(max_duration); self } -} -impl<'life, GradientType, ConstraintType, CostType> Optimizer - for FBSOptimizer<'life, GradientType, ConstraintType, CostType> -where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult + 'life, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult + 'life, - ConstraintType: constraints::Constraint + 'life, -{ - fn solve(&mut self, u: &mut [f64]) -> Result { + /// Solves the optimization problem for decision variables of scalar type `T`. + /// + /// The returned [`SolverStatus`] stores the reported norm of the fixed-point + /// residual and cost value as `f64`, so these values are converted from `T`. + pub fn solve(&mut self, u: &mut [T]) -> Result { let now = instant::Instant::now(); - // Initialize - propagate error upstream, if any self.fbs_engine.init(u)?; let mut num_iter: usize = 0; let mut step_flag = self.fbs_engine.step(u)?; if let Some(dur) = self.max_duration { - while step_flag && num_iter < self.max_iter && dur <= now.elapsed() { + while step_flag && num_iter < self.max_iter && now.elapsed() <= dur { num_iter += 1; step_flag = self.fbs_engine.step(u)? } @@ -119,15 +117,13 @@ where } } - // cost at the solution [propagate error upstream] - let mut cost_value: f64 = 0.0; + let mut cost_value = T::zero(); (self.fbs_engine.problem.cost)(u, &mut cost_value)?; if !matrix_operations::is_finite(u) || !cost_value.is_finite() { return Err(SolverError::NotFiniteComputation); } - // export solution status Ok(SolverStatus::new( if num_iter < self.max_iter { ExitStatus::Converged @@ -136,8 +132,26 @@ where }, num_iter, now.elapsed(), - self.fbs_engine.cache.norm_fpr, - cost_value, + self.fbs_engine + .cache + .norm_fpr + .to_f64() + .expect("norm_fpr must be representable as f64"), + cost_value + .to_f64() + .expect("cost value must be representable as f64"), )) } } + +impl<'life, GradientType, ConstraintType, CostType> Optimizer + for FBSOptimizer<'life, GradientType, ConstraintType, CostType, f64> +where + GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult + 'life, + CostType: Fn(&[f64], &mut f64) -> FunctionCallResult + 'life, + ConstraintType: constraints::Constraint + 'life, +{ + fn solve(&mut self, u: &mut [f64]) -> Result { + FBSOptimizer::solve(self, u) + } +} diff --git a/src/core/fbs/tests.rs b/src/core/fbs/tests.rs index b94425b1..28aef53c 100644 --- a/src/core/fbs/tests.rs +++ b/src/core/fbs/tests.rs @@ -147,3 +147,61 @@ fn t_solve_fbs_many_times() { assert!(status.norm_fpr() < tolerance); } } + +#[test] +fn t_fbs_step_no_constraints_f32() { + let no_constraints = constraints::NoConstraints::new(); + let problem = Problem::new( + &no_constraints, + |u: &[f32], grad: &mut [f32]| -> FunctionCallResult { + grad[0] = u[0] + u[1] + 1.0; + grad[1] = u[0] + 2.0 * u[1] - 1.0; + Ok(()) + }, + |u: &[f32], cost: &mut f32| -> FunctionCallResult { + *cost = u[0] * u[0] + 2.0 * u[1] * u[1] + u[0] - u[1] + 3.0; + Ok(()) + }, + ); + let gamma = 0.1_f32; + let tolerance = 1e-6_f32; + + let mut fbs_cache = FBSCache::::new(NonZeroUsize::new(N_DIM).unwrap(), gamma, tolerance); + let mut fbs_engine = FBSEngine::new(problem, &mut fbs_cache); + let mut u = [1.0_f32, 3.0_f32]; + + assert!(fbs_engine.step(&mut u).unwrap()); + assert!((u[0] - 0.5_f32).abs() < 1e-6); + assert!((u[1] - 2.4_f32).abs() < 1e-6); +} + +#[test] +fn t_solve_fbs_f32() { + let radius = 0.2_f32; + let box_constraints = constraints::Ball2::new(None, radius); + let problem = Problem::new( + &box_constraints, + |u: &[f32], grad: &mut [f32]| -> FunctionCallResult { + grad[0] = u[0] + u[1] + 1.0; + grad[1] = u[0] + 2.0 * u[1] - 1.0; + Ok(()) + }, + |u: &[f32], cost: &mut f32| -> FunctionCallResult { + *cost = u[0] * u[0] + 2.0 * u[1] * u[1] + u[0] - u[1] + 3.0; + Ok(()) + }, + ); + let gamma = 0.1_f32; + let tolerance = 1e-6_f32; + + let mut fbs_cache = FBSCache::::new(NonZeroUsize::new(N_DIM).unwrap(), gamma, tolerance); + let mut u = [0.0_f32; N_DIM]; + let mut optimizer = FBSOptimizer::new(problem, &mut fbs_cache); + + let status = optimizer.solve(&mut u).unwrap(); + + assert!(status.has_converged()); + assert!(status.norm_fpr() < tolerance as f64); + assert!((u[0] - crate::mocks::SOLUTION_A[0] as f32).abs() < 1e-4); + assert!((u[1] - crate::mocks::SOLUTION_A[1] as f32).abs() < 1e-4); +} From afea1dc60ef725bb883a1da41dc342b1ae442bee Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 18:17:53 +0000 Subject: [PATCH 014/133] alm/pm support generic float types --- src/alm/alm_cache.rs | 79 ++++++++----- src/alm/alm_factory.rs | 91 +++++++++------ src/alm/alm_optimizer.rs | 180 ++++++++++++++++-------------- src/alm/alm_optimizer_status.rs | 75 +++++++------ src/alm/alm_problem.rs | 37 +++--- src/alm/mod.rs | 22 +++- src/alm/tests.rs | 78 +++++++++++++ src/core/panoc/panoc_optimizer.rs | 76 ++++++++----- 8 files changed, 412 insertions(+), 226 deletions(-) diff --git a/src/alm/alm_cache.rs b/src/alm/alm_cache.rs index 2f5211bf..bae8bb12 100644 --- a/src/alm/alm_cache.rs +++ b/src/alm/alm_cache.rs @@ -1,6 +1,11 @@ use crate::panoc::PANOCCache; +use lbfgs::LbfgsPrecision; +use num::Float; +use std::iter::Sum; -const DEFAULT_INITIAL_PENALTY: f64 = 10.0; +fn default_initial_penalty() -> T { + T::from(10.0).expect("10.0 must be representable") +} /// Cache for `AlmOptimizer` (to be allocated once) /// @@ -12,32 +17,35 @@ const DEFAULT_INITIAL_PENALTY: f64 = 10.0; /// of `AlmProblem` /// #[derive(Debug)] -pub struct AlmCache { +pub struct AlmCache +where + T: Float + LbfgsPrecision + Sum, +{ /// PANOC cache for inner problems - pub(crate) panoc_cache: PANOCCache, + pub(crate) panoc_cache: PANOCCache, /// Lagrange multipliers (next) - pub(crate) y_plus: Option>, + pub(crate) y_plus: Option>, /// Vector $\xi^\nu = (c^\nu, y^\nu)$ - pub(crate) xi: Option>, + pub(crate) xi: Option>, /// Infeasibility related to ALM-type constraints - pub(crate) delta_y_norm: f64, + pub(crate) delta_y_norm: T, /// Delta y at iteration `nu+1` - pub(crate) delta_y_norm_plus: f64, + pub(crate) delta_y_norm_plus: T, /// Value $\Vert F_2(u^\nu) \Vert$ - pub(crate) f2_norm: f64, + pub(crate) f2_norm: T, /// Value $\Vert F_2(u^{\nu+1}) \Vert$ - pub(crate) f2_norm_plus: f64, + pub(crate) f2_norm_plus: T, /// Auxiliary variable `w` - pub(crate) w_alm_aux: Option>, + pub(crate) w_alm_aux: Option>, /// Infeasibility related to PM-type constraints, `w_pm = F2(u)` - pub(crate) w_pm: Option>, + pub(crate) w_pm: Option>, /// (Outer) iteration count pub(crate) iteration: usize, /// Counter for inner iterations pub(crate) inner_iteration_count: usize, /// Value of the norm of the fixed-point residual for the last /// solved inner problem - pub(crate) last_inner_problem_norm_fpr: f64, + pub(crate) last_inner_problem_norm_fpr: T, /// Available time left for ALM/PM computations (the value `None` /// corresponds to an unspecified available time, i.e., there are /// no bounds on the maximum time). The maximum time is specified, @@ -45,7 +53,10 @@ pub struct AlmCache { pub(crate) available_time: Option, } -impl AlmCache { +impl AlmCache +where + T: Float + LbfgsPrecision + Sum, +{ /// Construct a new instance of `AlmCache` /// /// # Arguments @@ -58,30 +69,42 @@ impl AlmCache { /// /// Does not panic /// - pub fn new(panoc_cache: PANOCCache, n1: usize, n2: usize) -> Self { + pub fn new(panoc_cache: PANOCCache, n1: usize, n2: usize) -> Self { AlmCache { panoc_cache, - y_plus: if n1 > 0 { Some(vec![0.0; n1]) } else { None }, + y_plus: if n1 > 0 { + Some(vec![T::zero(); n1]) + } else { + None + }, // Allocate memory for xi = (c, y) if either n1 or n2 is nonzero, // otherwise, xi is None xi: if n1 + n2 > 0 { - let mut xi_init = vec![DEFAULT_INITIAL_PENALTY; 1]; - xi_init.append(&mut vec![0.0; n1]); + let mut xi_init = vec![default_initial_penalty(); 1]; + xi_init.append(&mut vec![T::zero(); n1]); Some(xi_init) } else { None }, // w_alm_aux should be allocated only if n1 > 0 - w_alm_aux: if n1 > 0 { Some(vec![0.0; n1]) } else { None }, + w_alm_aux: if n1 > 0 { + Some(vec![T::zero(); n1]) + } else { + None + }, // w_pm is needed only if n2 > 0 - w_pm: if n2 > 0 { Some(vec![0.0; n2]) } else { None }, + w_pm: if n2 > 0 { + Some(vec![T::zero(); n2]) + } else { + None + }, iteration: 0, - delta_y_norm: 0.0, - delta_y_norm_plus: f64::INFINITY, - f2_norm: 0.0, - f2_norm_plus: f64::INFINITY, + delta_y_norm: T::zero(), + delta_y_norm_plus: T::infinity(), + f2_norm: T::zero(), + f2_norm_plus: T::infinity(), inner_iteration_count: 0, - last_inner_problem_norm_fpr: -1.0, + last_inner_problem_norm_fpr: -T::one(), available_time: None, } } @@ -92,10 +115,10 @@ impl AlmCache { pub fn reset(&mut self) { self.panoc_cache.reset(); self.iteration = 0; - self.f2_norm = 0.0; - self.f2_norm_plus = 0.0; - self.delta_y_norm = 0.0; - self.delta_y_norm_plus = 0.0; + self.f2_norm = T::zero(); + self.f2_norm_plus = T::zero(); + self.delta_y_norm = T::zero(); + self.delta_y_norm_plus = T::zero(); self.inner_iteration_count = 0; } } diff --git a/src/alm/alm_factory.rs b/src/alm/alm_factory.rs index 21776453..91ae3a1e 100644 --- a/src/alm/alm_factory.rs +++ b/src/alm/alm_factory.rs @@ -6,6 +6,13 @@ /* ---------------------------------------------------------------------------- */ use crate::{constraints::Constraint, matrix_operations, FunctionCallResult}; +use num::Float; +use std::marker::PhantomData; +use std::{iter::Sum, ops::AddAssign}; + +fn half() -> T { + T::from(0.5).expect("0.5 must be representable") +} /// Prepares function $\psi$ and its gradient given the problem data: $f$, $\nabla{}f$, /// and optionally $F_1$, $JF_1$, $C$ and $F_2$ @@ -72,14 +79,16 @@ pub struct AlmFactory< Cost, CostGradient, SetC, + T = f64, > where - Cost: Fn(&[f64], &mut f64) -> FunctionCallResult, // f(u, result) - CostGradient: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // df(u, result) - MappingF1: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // f1(u, result) - JacobianMappingF1Trans: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, // jf1(u, d, result) - MappingF2: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // f2(u, result) - JacobianMappingF2Trans: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, // jf2(u, d, result) - SetC: Constraint, + T: Float + Sum + AddAssign, + Cost: Fn(&[T], &mut T) -> FunctionCallResult, // f(u, result) + CostGradient: Fn(&[T], &mut [T]) -> FunctionCallResult, // df(u, result) + MappingF1: Fn(&[T], &mut [T]) -> FunctionCallResult, // f1(u, result) + JacobianMappingF1Trans: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, // jf1(u, d, result) + MappingF2: Fn(&[T], &mut [T]) -> FunctionCallResult, // f2(u, result) + JacobianMappingF2Trans: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, // jf2(u, d, result) + SetC: Constraint, { f: Cost, df: CostGradient, @@ -89,6 +98,7 @@ pub struct AlmFactory< jacobian_mapping_f2_trans: Option, set_c: Option, n2: usize, + marker: PhantomData, } impl< @@ -99,6 +109,7 @@ impl< Cost, CostGradient, SetC, + T, > AlmFactory< MappingF1, @@ -108,15 +119,17 @@ impl< Cost, CostGradient, SetC, + T, > where - Cost: Fn(&[f64], &mut f64) -> FunctionCallResult, // f(u, result) - CostGradient: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // df(u, result) - MappingF1: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // f1(u, result) - JacobianMappingF1Trans: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, // jf1(u, d, result) - MappingF2: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // f2(u, result) - JacobianMappingF2Trans: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, // jf2(u, d, result) - SetC: Constraint, + T: Float + Sum + AddAssign, + Cost: Fn(&[T], &mut T) -> FunctionCallResult, // f(u, result) + CostGradient: Fn(&[T], &mut [T]) -> FunctionCallResult, // df(u, result) + MappingF1: Fn(&[T], &mut [T]) -> FunctionCallResult, // f1(u, result) + JacobianMappingF1Trans: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, // jf1(u, d, result) + MappingF2: Fn(&[T], &mut [T]) -> FunctionCallResult, // f2(u, result) + JacobianMappingF2Trans: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, // jf2(u, d, result) + SetC: Constraint, { /// Construct a new instance of `MockFactory` /// @@ -190,6 +203,7 @@ where jacobian_mapping_f2_trans, set_c, n2, + marker: PhantomData, } } @@ -215,15 +229,20 @@ where /// This method returns `Ok(())` if the computation is successful or an appropriate /// `SolverError` otherwise. /// - pub fn psi(&self, u: &[f64], xi: &[f64], cost: &mut f64) -> FunctionCallResult { + pub fn psi(&self, u: &[T], xi: &[T], cost: &mut T) -> FunctionCallResult { (self.f)(u, cost)?; let ny = if !xi.is_empty() { xi.len() - 1 } else { 0 }; - let mut f1_u_plus_y_over_c = vec![0.0; ny]; - let mut s = vec![0.0; ny]; + let mut f1_u_plus_y_over_c = vec![T::zero(); ny]; + let mut s = vec![T::zero(); ny]; if let (Some(set_c), Some(mapping_f1)) = (&self.set_c, &self.mapping_f1) { let penalty_parameter = xi[0]; mapping_f1(u, &mut f1_u_plus_y_over_c)?; // f1_u = F1(u) let y_lagrange_mult = &xi[1..]; + let penalty_scale = if penalty_parameter > T::one() { + penalty_parameter + } else { + T::one() + }; // Note: In the first term below, we divide by 'max(c, 1)', instead of // just 'c'. The reason is that this allows to set c=0 and // retrieve the value of the original cost function @@ -231,18 +250,20 @@ where f1_u_plus_y_over_c .iter_mut() .zip(y_lagrange_mult.iter()) - .for_each(|(ti, yi)| *ti += yi / f64::max(penalty_parameter, 1.0)); + .for_each(|(ti, yi)| *ti = *ti + *yi / penalty_scale); s.copy_from_slice(&f1_u_plus_y_over_c); set_c.project(&mut s); - *cost += 0.5 - * penalty_parameter - * matrix_operations::norm2_squared_diff(&f1_u_plus_y_over_c, &s); + let dist_sq: T = matrix_operations::norm2_squared_diff(&f1_u_plus_y_over_c, &s); + let scaling: T = half::() * penalty_parameter; + *cost = *cost + scaling * dist_sq; } if let Some(f2) = &self.mapping_f2 { let c = xi[0]; - let mut z = vec![0.0; self.n2]; + let mut z = vec![T::zero(); self.n2]; f2(u, &mut z)?; - *cost += 0.5 * c * matrix_operations::norm2_squared(&z); + let norm_sq: T = matrix_operations::norm2_squared(&z); + let scaling: T = half::() * c; + *cost = *cost + scaling * norm_sq; } Ok(()) } @@ -267,7 +288,7 @@ where /// This method returns `Ok(())` if the computation is successful or an appropriate /// `SolverError` otherwise. /// - pub fn d_psi(&self, u: &[f64], xi: &[f64], grad: &mut [f64]) -> FunctionCallResult { + pub fn d_psi(&self, u: &[T], xi: &[T], grad: &mut [T]) -> FunctionCallResult { let nu = u.len(); // The following statement is needed to account for the case where n1=n2=0 @@ -285,16 +306,16 @@ where &self.jacobian_mapping_f1_trans, ) { let c_penalty_parameter = xi[0]; - let mut f1_u_plus_y_over_c = vec![0.0; ny]; - let mut s_aux_var = vec![0.0; ny]; // auxiliary variable `s` + let mut f1_u_plus_y_over_c = vec![T::zero(); ny]; + let mut s_aux_var = vec![T::zero(); ny]; // auxiliary variable `s` let y_lagrange_mult = &xi[1..]; - let mut jac_prod = vec![0.0; nu]; + let mut jac_prod = vec![T::zero(); nu]; mapping_f1(u, &mut f1_u_plus_y_over_c)?; // f1_u_plus_y_over_c = F1(u) // f1_u_plus_y_over_c = F1(u) + y/c f1_u_plus_y_over_c .iter_mut() .zip(y_lagrange_mult.iter()) - .for_each(|(ti, yi)| *ti += yi / c_penalty_parameter); + .for_each(|(ti, yi)| *ti = *ti + *yi / c_penalty_parameter); s_aux_var.copy_from_slice(&f1_u_plus_y_over_c); // s = t set_c.project(&mut s_aux_var); // s = Proj_C(F1(u) + y/c) @@ -302,29 +323,29 @@ where f1_u_plus_y_over_c .iter_mut() .zip(s_aux_var.iter()) - .for_each(|(ti, si)| *ti -= si); + .for_each(|(ti, si)| *ti = *ti - *si); jf1t(u, &f1_u_plus_y_over_c, &mut jac_prod)?; // grad += c*t grad.iter_mut() .zip(jac_prod.iter()) - .for_each(|(gradi, jac_prodi)| *gradi += c_penalty_parameter * jac_prodi); + .for_each(|(gradi, jac_prodi)| *gradi = *gradi + c_penalty_parameter * *jac_prodi); } // Compute second part: JF2(u)'*F2(u) if let (Some(f2), Some(jf2)) = (&self.mapping_f2, &self.jacobian_mapping_f2_trans) { let c = xi[0]; - let mut f2u_aux = vec![0.0; self.n2]; - let mut jf2u_times_f2u_aux = vec![0.0; nu]; + let mut f2u_aux = vec![T::zero(); self.n2]; + let mut jf2u_times_f2u_aux = vec![T::zero(); nu]; f2(u, &mut f2u_aux)?; // f2u_aux = F2(u) jf2(u, &f2u_aux, &mut jf2u_times_f2u_aux)?; // jf2u_times_f2u_aux = JF2(u)'*f2u_aux // = JF2(u)'*F2(u) // grad += c * jf2u_times_f2u_aux - grad.iter_mut() - .zip(jf2u_times_f2u_aux.iter()) - .for_each(|(gradi, jf2u_times_f2u_aux_i)| *gradi += c * jf2u_times_f2u_aux_i); + grad.iter_mut().zip(jf2u_times_f2u_aux.iter()).for_each( + |(gradi, jf2u_times_f2u_aux_i)| *gradi = *gradi + c * *jf2u_times_f2u_aux_i, + ); } Ok(()) } diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index c7d9ec08..65533c4d 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -1,19 +1,19 @@ use crate::{ alm::*, constraints, - core::{panoc::PANOCOptimizer, ExitStatus, Optimizer, Problem, SolverStatus}, + core::{panoc::PANOCOptimizer, ExitStatus, Problem, SolverStatus}, matrix_operations, FunctionCallResult, SolverError, }; +use lbfgs::LbfgsPrecision; +use num::Float; +use std::{iter::Sum, ops::AddAssign}; const DEFAULT_MAX_OUTER_ITERATIONS: usize = 50; const DEFAULT_MAX_INNER_ITERATIONS: usize = 5000; -const DEFAULT_EPSILON_TOLERANCE: f64 = 1e-6; -const DEFAULT_DELTA_TOLERANCE: f64 = 1e-4; -const DEFAULT_PENALTY_UPDATE_FACTOR: f64 = 5.0; -const DEFAULT_EPSILON_UPDATE_FACTOR: f64 = 0.1; -const DEFAULT_INFEAS_SUFFICIENT_DECREASE_FACTOR: f64 = 0.1; -const DEFAULT_INITIAL_TOLERANCE: f64 = 0.1; -const SMALL_EPSILON: f64 = f64::EPSILON; + +fn float(value: f64) -> T { + T::from(value).expect("floating-point constant must be representable") +} /// Internal/private structure used by method AlmOptimizer.step /// to return some minimal information about the inner problem @@ -124,17 +124,19 @@ pub struct AlmOptimizer< ConstraintsType, AlmSetC, LagrangeSetY, + T = f64, > where - MappingAlm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - MappingPm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - ParametricGradientType: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, - ParametricCostType: Fn(&[f64], &[f64], &mut f64) -> FunctionCallResult, - ConstraintsType: constraints::Constraint, - AlmSetC: constraints::Constraint, - LagrangeSetY: constraints::Constraint, + T: Float + LbfgsPrecision + Sum + AddAssign, + MappingAlm: Fn(&[T], &mut [T]) -> FunctionCallResult, + MappingPm: Fn(&[T], &mut [T]) -> FunctionCallResult, + ParametricGradientType: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, + ParametricCostType: Fn(&[T], &[T], &mut T) -> FunctionCallResult, + ConstraintsType: constraints::Constraint, + AlmSetC: constraints::Constraint, + LagrangeSetY: constraints::Constraint, { /// ALM cache (borrowed) - alm_cache: &'life mut AlmCache, + alm_cache: &'life mut AlmCache, /// ALM problem definition (oracle) alm_problem: AlmProblem< MappingAlm, @@ -144,6 +146,7 @@ pub struct AlmOptimizer< ConstraintsType, AlmSetC, LagrangeSetY, + T, >, /// Maximum number of outer iterations max_outer_iterations: usize, @@ -152,19 +155,19 @@ pub struct AlmOptimizer< /// Maximum duration max_duration: Option, /// epsilon for inner AKKT condition - epsilon_tolerance: f64, + epsilon_tolerance: T, /// delta for outer AKKT condition - delta_tolerance: f64, + delta_tolerance: T, /// At every outer iteration, c is multiplied by this scalar - penalty_update_factor: f64, + penalty_update_factor: T, /// The epsilon-tolerance is multiplied by this factor until /// it reaches its target value - epsilon_update_factor: f64, + epsilon_update_factor: T, /// If current_infeasibility <= sufficient_decrease_coeff * previous_infeasibility, /// then the penalty parameter is kept constant - sufficient_decrease_coeff: f64, + sufficient_decrease_coeff: T, // Initial tolerance (for the inner problem) - epsilon_inner_initial: f64, + epsilon_inner_initial: T, } impl< @@ -176,6 +179,7 @@ impl< ConstraintsType, AlmSetC, LagrangeSetY, + T, > AlmOptimizer< 'life, @@ -186,15 +190,17 @@ impl< ConstraintsType, AlmSetC, LagrangeSetY, + T, > where - MappingAlm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - MappingPm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - ParametricGradientType: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, - ParametricCostType: Fn(&[f64], &[f64], &mut f64) -> FunctionCallResult, - ConstraintsType: constraints::Constraint, - AlmSetC: constraints::Constraint, - LagrangeSetY: constraints::Constraint, + T: Float + LbfgsPrecision + Sum + AddAssign, + MappingAlm: Fn(&[T], &mut [T]) -> FunctionCallResult, + MappingPm: Fn(&[T], &mut [T]) -> FunctionCallResult, + ParametricGradientType: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, + ParametricCostType: Fn(&[T], &[T], &mut T) -> FunctionCallResult, + ConstraintsType: constraints::Constraint, + AlmSetC: constraints::Constraint, + LagrangeSetY: constraints::Constraint, { /* ---------------------------------------------------------------------------- */ /* CONSTRUCTOR */ @@ -250,7 +256,7 @@ where ///``` /// pub fn new( - alm_cache: &'life mut AlmCache, + alm_cache: &'life mut AlmCache, alm_problem: AlmProblem< MappingAlm, MappingPm, @@ -259,26 +265,25 @@ where ConstraintsType, AlmSetC, LagrangeSetY, + T, >, ) -> Self { // set the initial value of the inner tolerance; this step is // not necessary, however, because we set the initial tolerance // in #solve (see below) - alm_cache - .panoc_cache - .set_akkt_tolerance(DEFAULT_INITIAL_TOLERANCE); + alm_cache.panoc_cache.set_akkt_tolerance(float(0.1)); AlmOptimizer { alm_cache, alm_problem, max_outer_iterations: DEFAULT_MAX_OUTER_ITERATIONS, max_inner_iterations: DEFAULT_MAX_INNER_ITERATIONS, max_duration: None, - epsilon_tolerance: DEFAULT_EPSILON_TOLERANCE, - delta_tolerance: DEFAULT_DELTA_TOLERANCE, - penalty_update_factor: DEFAULT_PENALTY_UPDATE_FACTOR, - epsilon_update_factor: DEFAULT_EPSILON_UPDATE_FACTOR, - sufficient_decrease_coeff: DEFAULT_INFEAS_SUFFICIENT_DECREASE_FACTOR, - epsilon_inner_initial: DEFAULT_INITIAL_TOLERANCE, + epsilon_tolerance: float(1e-6), + delta_tolerance: float(1e-4), + penalty_update_factor: float(5.0), + epsilon_update_factor: float(0.1), + sufficient_decrease_coeff: float(0.1), + epsilon_inner_initial: float(0.1), } } @@ -367,8 +372,11 @@ where /// /// The method panics if the specified tolerance is not positive /// - pub fn with_delta_tolerance(mut self, delta_tolerance: f64) -> Self { - assert!(delta_tolerance > 0.0, "delta_tolerance must be positive"); + pub fn with_delta_tolerance(mut self, delta_tolerance: T) -> Self { + assert!( + delta_tolerance > T::zero(), + "delta_tolerance must be positive" + ); self.delta_tolerance = delta_tolerance; self } @@ -387,9 +395,9 @@ where /// /// The method panics if the specified tolerance is not positive /// - pub fn with_epsilon_tolerance(mut self, epsilon_tolerance: f64) -> Self { + pub fn with_epsilon_tolerance(mut self, epsilon_tolerance: T) -> Self { assert!( - epsilon_tolerance > 0.0, + epsilon_tolerance > T::zero(), "epsilon_tolerance must be positive" ); self.epsilon_tolerance = epsilon_tolerance; @@ -415,9 +423,9 @@ where /// The method panics if the update factor is not larger than `1.0 + f64::EPSILON` /// /// - pub fn with_penalty_update_factor(mut self, penalty_update_factor: f64) -> Self { + pub fn with_penalty_update_factor(mut self, penalty_update_factor: T) -> Self { assert!( - penalty_update_factor > 1.0 + SMALL_EPSILON, + penalty_update_factor > T::one() + T::epsilon(), "`penalty_update_factor` must be larger than 1.0 + f64::EPSILON" ); self.penalty_update_factor = penalty_update_factor; @@ -444,13 +452,10 @@ where /// The method panics if the specified tolerance update factor is not in the /// interval from `f64::EPSILON` to `1.0 - f64::EPSILON`. /// - pub fn with_inner_tolerance_update_factor( - mut self, - inner_tolerance_update_factor: f64, - ) -> Self { + pub fn with_inner_tolerance_update_factor(mut self, inner_tolerance_update_factor: T) -> Self { assert!( - inner_tolerance_update_factor > SMALL_EPSILON - && inner_tolerance_update_factor < 1.0 - SMALL_EPSILON, + inner_tolerance_update_factor > T::epsilon() + && inner_tolerance_update_factor < T::one() - T::epsilon(), "the tolerance update factor needs to be in (f64::EPSILON, 1)" ); self.epsilon_update_factor = inner_tolerance_update_factor; @@ -480,7 +485,7 @@ where /// `with_inner_tolerance` to do so before invoking `with_initial_inner_tolerance`. /// /// - pub fn with_initial_inner_tolerance(mut self, initial_inner_tolerance: f64) -> Self { + pub fn with_initial_inner_tolerance(mut self, initial_inner_tolerance: T) -> Self { assert!( initial_inner_tolerance >= self.epsilon_tolerance, "the initial tolerance should be no less than the target tolerance" @@ -514,11 +519,11 @@ where /// pub fn with_sufficient_decrease_coefficient( mut self, - sufficient_decrease_coefficient: f64, + sufficient_decrease_coefficient: T, ) -> Self { assert!( - sufficient_decrease_coefficient < 1.0 - SMALL_EPSILON - && sufficient_decrease_coefficient > SMALL_EPSILON, + sufficient_decrease_coefficient < T::one() - T::epsilon() + && sufficient_decrease_coefficient > T::epsilon(), "sufficient_decrease_coefficient must be in (f64::EPSILON, 1.0 - f64::EPSILON)" ); self.sufficient_decrease_coeff = sufficient_decrease_coefficient; @@ -540,7 +545,7 @@ where /// /// The method will panic if the length of `y_init` is not equal to `n1` /// - pub fn with_initial_lagrange_multipliers(mut self, y_init: &[f64]) -> Self { + pub fn with_initial_lagrange_multipliers(mut self, y_init: &[T]) -> Self { let cache = &mut self.alm_cache; assert!( y_init.len() == self.alm_problem.n1, @@ -570,9 +575,9 @@ where /// The method panics if the specified initial penalty parameter is not /// larger than `f64::EPSILON` /// - pub fn with_initial_penalty(self, c0: f64) -> Self { + pub fn with_initial_penalty(self, c0: T) -> Self { assert!( - c0 > SMALL_EPSILON, + c0 > T::epsilon(), "the initial penalty must be larger than f64::EPSILON" ); if let Some(xi_in_cache) = &mut self.alm_cache.xi { @@ -596,7 +601,7 @@ where } /// Computes PM infeasibility, that is, ||F2(u)|| - fn compute_pm_infeasibility(&mut self, u: &[f64]) -> FunctionCallResult { + fn compute_pm_infeasibility(&mut self, u: &[T]) -> FunctionCallResult { let problem = &self.alm_problem; // ALM problem let cache = &mut self.alm_cache; // ALM cache @@ -613,7 +618,7 @@ where /// /// `y_plus <-- y + c*[F1(u_plus) - Proj_C(F1(u_plus) + y/c)]` /// - fn update_lagrange_multipliers(&mut self, u: &[f64]) -> FunctionCallResult { + fn update_lagrange_multipliers(&mut self, u: &[T]) -> FunctionCallResult { let problem = &self.alm_problem; // ALM problem let cache = &mut self.alm_cache; // ALM cache @@ -647,7 +652,7 @@ where .iter_mut() .zip(y.iter()) .zip(w_alm_aux.iter()) - .for_each(|((y_plus_i, y_i), w_alm_aux_i)| *y_plus_i = w_alm_aux_i + y_i / c); + .for_each(|((y_plus_i, y_i), w_alm_aux_i)| *y_plus_i = *w_alm_aux_i + *y_i / c); // Step #3: y_plus := Proj_C(y_plus) alm_set_c.project(y_plus); @@ -659,7 +664,7 @@ where .zip(w_alm_aux.iter()) .for_each(|((y_plus_i, y_i), w_alm_aux_i)| { // y_plus := y + c * (w_alm_aux - y_plus) - *y_plus_i = y_i + c * (w_alm_aux_i - *y_plus_i) + *y_plus_i = *y_i + c * (*w_alm_aux_i - *y_plus_i) }); } @@ -696,7 +701,7 @@ where /// error in solving the inner problem. /// /// - fn solve_inner_problem(&mut self, u: &mut [f64]) -> Result { + fn solve_inner_problem(&mut self, u: &mut [T]) -> Result { let alm_problem = &self.alm_problem; // Problem let alm_cache = &mut self.alm_cache; // ALM cache @@ -704,7 +709,7 @@ where // empty vector, otherwise. We do that becaues the user has the option // to not use any ALM/PM constraints; in that case, `alm_cache.xi` is // `None` - let xi_empty = Vec::new(); + let xi_empty = Vec::::new(); let xi = if let Some(xi_cached) = &alm_cache.xi { xi_cached } else { @@ -713,11 +718,11 @@ where // Construct psi and psi_grad (as functions of `u` alone); it is // psi(u) = psi(u; xi) and psi_grad(u) = phi_grad(u; xi) // psi: R^nu --> R - let psi = |u: &[f64], psi_val: &mut f64| -> FunctionCallResult { + let psi = |u: &[T], psi_val: &mut T| -> FunctionCallResult { (alm_problem.parametric_cost)(u, xi, psi_val) }; // psi_grad: R^nu --> R^nu - let psi_grad = |u: &[f64], psi_grad: &mut [f64]| -> FunctionCallResult { + let psi_grad = |u: &[T], psi_grad: &mut [T]| -> FunctionCallResult { (alm_problem.parametric_gradient)(u, xi, psi_grad) }; // define the inner problem @@ -750,7 +755,7 @@ where || if let Some(xi) = &cache.xi { let c = xi[0]; cache.iteration > 0 - && cache.delta_y_norm_plus <= c * self.delta_tolerance + SMALL_EPSILON + && cache.delta_y_norm_plus <= c * self.delta_tolerance + T::epsilon() } else { true }; @@ -758,13 +763,13 @@ where // If n2 = 0, there are no PM-type constraints, so this // criterion is automatically satisfied let criterion_2 = - problem.n2 == 0 || cache.f2_norm_plus <= self.delta_tolerance + SMALL_EPSILON; + problem.n2 == 0 || cache.f2_norm_plus <= self.delta_tolerance + T::epsilon(); // Criterion 3: epsilon_nu <= epsilon // This function will panic is there is no akkt_tolerance // This should never happen because we set the AKKT tolerance // in the constructor and can never become `None` again let criterion_3 = - cache.panoc_cache.akkt_tolerance.unwrap() <= self.epsilon_tolerance + SMALL_EPSILON; + cache.panoc_cache.akkt_tolerance.unwrap() <= self.epsilon_tolerance + T::epsilon(); criterion_1 && criterion_2 && criterion_3 } @@ -781,9 +786,9 @@ where let is_alm = problem.n1 > 0; let is_pm = problem.n2 > 0; let criterion_alm = cache.delta_y_norm_plus - <= self.sufficient_decrease_coeff * cache.delta_y_norm + SMALL_EPSILON; + <= self.sufficient_decrease_coeff * cache.delta_y_norm + T::epsilon(); let criterion_pm = - cache.f2_norm_plus <= self.sufficient_decrease_coeff * cache.f2_norm + SMALL_EPSILON; + cache.f2_norm_plus <= self.sufficient_decrease_coeff * cache.f2_norm + T::epsilon(); if is_alm && !is_pm { return criterion_alm; } else if !is_alm && is_pm { @@ -798,17 +803,21 @@ where fn update_penalty_parameter(&mut self) { let cache = &mut self.alm_cache; if let Some(xi) = &mut cache.xi { - xi[0] *= self.penalty_update_factor; + xi[0] = xi[0] * self.penalty_update_factor; } } fn update_inner_akkt_tolerance(&mut self) { let cache = &mut self.alm_cache; // epsilon_{nu+1} := max(epsilon, beta*epsilon_nu) - cache.panoc_cache.set_akkt_tolerance(f64::max( - cache.panoc_cache.akkt_tolerance.unwrap() * self.epsilon_update_factor, - self.epsilon_tolerance, - )); + let next_tolerance = cache.panoc_cache.akkt_tolerance.unwrap() * self.epsilon_update_factor; + cache + .panoc_cache + .set_akkt_tolerance(if next_tolerance > self.epsilon_tolerance { + next_tolerance + } else { + self.epsilon_tolerance + }); } fn final_cache_update(&mut self) { @@ -837,7 +846,7 @@ where /// - Shrinks the inner tolerance and /// - Updates the ALM cache /// - fn step(&mut self, u: &mut [f64]) -> Result { + fn step(&mut self, u: &mut [T]) -> Result { // store the exit status of the inner problem in this problem // (we'll need to return it within `InnerProblemStatus`) let mut inner_exit_status: ExitStatus = ExitStatus::Converged; @@ -850,7 +859,8 @@ where // we should keep solving. self.solve_inner_problem(u).map(|status: SolverStatus| { let inner_iters = status.iterations(); - self.alm_cache.last_inner_problem_norm_fpr = status.norm_fpr(); + self.alm_cache.last_inner_problem_norm_fpr = + T::from(status.norm_fpr()).expect("inner problem norm FPR must fit in T"); self.alm_cache.inner_iteration_count += inner_iters; inner_exit_status = status.exit_status(); })?; @@ -885,18 +895,18 @@ where Ok(InnerProblemStatus::new(true, inner_exit_status)) // `true` means do continue the outer iterations } - fn compute_cost_at_solution(&mut self, u: &mut [f64]) -> Result { + fn compute_cost_at_solution(&mut self, u: &mut [T]) -> Result { /* WORK IN PROGRESS */ let alm_problem = &self.alm_problem; // Problem let alm_cache = &mut self.alm_cache; // ALM Cache let mut empty_vec = std::vec::Vec::new(); // Empty vector - let xi: &mut std::vec::Vec = alm_cache.xi.as_mut().unwrap_or(&mut empty_vec); - let mut __c: f64 = 0.0; + let xi: &mut std::vec::Vec = alm_cache.xi.as_mut().unwrap_or(&mut empty_vec); + let mut __c = T::zero(); if !xi.is_empty() { __c = xi[0]; - xi[0] = 0.0; + xi[0] = T::zero(); } - let mut cost_value: f64 = 0.0; + let mut cost_value = T::zero(); (alm_problem.parametric_cost)(u, xi, &mut cost_value)?; if !xi.is_empty() { xi[0] = __c; @@ -911,7 +921,7 @@ where /// Solve the specified ALM problem /// /// - pub fn solve(&mut self, u: &mut [f64]) -> Result { + pub fn solve(&mut self, u: &mut [T]) -> Result, SolverError> { let mut num_outer_iterations = 0; // let tic = std::time::Instant::now(); let tic = instant::Instant::now(); @@ -965,7 +975,7 @@ where let c = if let Some(xi) = &self.alm_cache.xi { xi[0] } else { - 0.0 + T::zero() }; let cost = self.compute_cost_at_solution(u)?; diff --git a/src/alm/alm_optimizer_status.rs b/src/alm/alm_optimizer_status.rs index 5c10e477..a0249aa0 100644 --- a/src/alm/alm_optimizer_status.rs +++ b/src/alm/alm_optimizer_status.rs @@ -1,4 +1,5 @@ use crate::core::ExitStatus; +use num::Float; /// Solution statistics for `AlmOptimizer` /// @@ -7,7 +8,10 @@ use crate::core::ExitStatus; /// `AlmOptimizerStatus` instances. /// #[derive(Debug)] -pub struct AlmOptimizerStatus { +pub struct AlmOptimizerStatus +where + T: Float, +{ /// Exit status exit_status: ExitStatus, /// Number of outer iterations @@ -18,23 +22,23 @@ pub struct AlmOptimizerStatus { /// inner solvers num_inner_iterations: usize, /// Norm of the fixed-point residual of the the problem - last_problem_norm_fpr: f64, + last_problem_norm_fpr: T, /// Lagrange multipliers vector - lagrange_multipliers: Option>, + lagrange_multipliers: Option>, /// Total solve time solve_time: std::time::Duration, /// Last value of penalty parameter - penalty: f64, + penalty: T, /// A measure of infeasibility of constraints F1(u; p) in C - delta_y_norm: f64, + delta_y_norm: T, /// Norm of F2 at the solution, which is a measure of infeasibility /// of constraints F2(u; p) = 0 - f2_norm: f64, + f2_norm: T, /// Value of cost function at optimal solution (optimal cost) - cost: f64, + cost: T, } -impl AlmOptimizerStatus { +impl AlmOptimizerStatus { /// Constructor for instances of `AlmOptimizerStatus` /// /// This method is only accessibly within this crate. @@ -60,13 +64,13 @@ impl AlmOptimizerStatus { exit_status, num_outer_iterations: 0, num_inner_iterations: 0, - last_problem_norm_fpr: -1.0, + last_problem_norm_fpr: -T::one(), lagrange_multipliers: None, solve_time: std::time::Duration::from_nanos(0), - penalty: 0.0, - delta_y_norm: 0.0, - f2_norm: 0.0, - cost: 0.0, + penalty: T::zero(), + delta_y_norm: T::zero(), + f2_norm: T::zero(), + cost: T::zero(), } } @@ -129,7 +133,7 @@ impl AlmOptimizerStatus { /// Does not panic; it is the responsibility of the caller to provide a vector of /// Lagrange multipliers of correct length /// - pub(crate) fn with_lagrange_multipliers(mut self, lagrange_multipliers: &[f64]) -> Self { + pub(crate) fn with_lagrange_multipliers(mut self, lagrange_multipliers: &[T]) -> Self { self.lagrange_multipliers = Some(vec![]); if let Some(y) = &mut self.lagrange_multipliers { y.extend_from_slice(lagrange_multipliers); @@ -144,9 +148,9 @@ impl AlmOptimizerStatus { /// /// The method panics if the provided penalty parameter is negative /// - pub(crate) fn with_penalty(mut self, penalty: f64) -> Self { + pub(crate) fn with_penalty(mut self, penalty: T) -> Self { assert!( - penalty >= 0.0, + penalty >= T::zero(), "the penalty parameter should not be negative" ); self.penalty = penalty; @@ -161,28 +165,31 @@ impl AlmOptimizerStatus { /// The method panics if the provided norm of the fixed-point residual is /// negative /// - pub(crate) fn with_last_problem_norm_fpr(mut self, last_problem_norm_fpr: f64) -> Self { + pub(crate) fn with_last_problem_norm_fpr(mut self, last_problem_norm_fpr: T) -> Self { assert!( - last_problem_norm_fpr >= 0.0, + last_problem_norm_fpr >= T::zero(), "last_problem_norm_fpr should not be negative" ); self.last_problem_norm_fpr = last_problem_norm_fpr; self } - pub(crate) fn with_delta_y_norm(mut self, delta_y_norm: f64) -> Self { - assert!(delta_y_norm >= 0.0, "delta_y_norm must be nonnegative"); + pub(crate) fn with_delta_y_norm(mut self, delta_y_norm: T) -> Self { + assert!( + delta_y_norm >= T::zero(), + "delta_y_norm must be nonnegative" + ); self.delta_y_norm = delta_y_norm; self } - pub(crate) fn with_f2_norm(mut self, f2_norm: f64) -> Self { - assert!(f2_norm >= 0.0, "f2_norm must be nonnegative"); + pub(crate) fn with_f2_norm(mut self, f2_norm: T) -> Self { + assert!(f2_norm >= T::zero(), "f2_norm must be nonnegative"); self.f2_norm = f2_norm; self } - pub(crate) fn with_cost(mut self, cost: f64) -> Self { + pub(crate) fn with_cost(mut self, cost: T) -> Self { self.cost = cost; self } @@ -192,17 +199,17 @@ impl AlmOptimizerStatus { // ------------------------------------------------- /// Update cost (to be used when the cost needs to be scaled as a result of preconditioning) - pub fn update_cost(&mut self, new_cost: f64) { + pub fn update_cost(&mut self, new_cost: T) { self.cost = new_cost; } /// Update ALM infeasibility - pub fn update_f1_infeasibility(&mut self, new_alm_infeasibility: f64) { + pub fn update_f1_infeasibility(&mut self, new_alm_infeasibility: T) { self.delta_y_norm = new_alm_infeasibility; } /// Update PM infeasibility - pub fn update_f2_norm(&mut self, new_pm_infeasibility: f64) { + pub fn update_f2_norm(&mut self, new_pm_infeasibility: T) { self.f2_norm = new_pm_infeasibility; } @@ -241,7 +248,7 @@ impl AlmOptimizerStatus { /// Vector of Lagrange multipliers at the solution /// - /// The method returns a reference to an `Option>` which contains + /// The method returns a reference to an `Option>` which contains /// the vector of Lagrange multipliers at the solution, or is `None` if /// the problem has no ALM-type constraints. /// @@ -249,7 +256,7 @@ impl AlmOptimizerStatus { /// /// Does not panic /// - pub fn lagrange_multipliers(&self) -> &Option> { + pub fn lagrange_multipliers(&self) -> &Option> { &self.lagrange_multipliers } @@ -259,7 +266,7 @@ impl AlmOptimizerStatus { /// /// Does not panic /// - pub fn last_problem_norm_fpr(&self) -> f64 { + pub fn last_problem_norm_fpr(&self) -> T { self.last_problem_norm_fpr } @@ -278,23 +285,23 @@ impl AlmOptimizerStatus { /// # Panics /// /// Does not panic - pub fn penalty(&self) -> f64 { + pub fn penalty(&self) -> T { self.penalty } /// Norm of Delta y divided by max{c, 1} - measure of infeasibility - pub fn delta_y_norm_over_c(&self) -> f64 { + pub fn delta_y_norm_over_c(&self) -> T { let c = self.penalty(); - self.delta_y_norm / if c < 1.0 { 1.0 } else { c } + self.delta_y_norm / if c < T::one() { T::one() } else { c } } /// Norm of F2(u) - measure of infeasibility of F2(u) = 0 - pub fn f2_norm(&self) -> f64 { + pub fn f2_norm(&self) -> T { self.f2_norm } /// Value of the cost function at the solution - pub fn cost(&self) -> f64 { + pub fn cost(&self) -> T { self.cost } } diff --git a/src/alm/alm_problem.rs b/src/alm/alm_problem.rs index 9f28bd2e..5dac1c8b 100644 --- a/src/alm/alm_problem.rs +++ b/src/alm/alm_problem.rs @@ -1,4 +1,6 @@ use crate::{constraints::Constraint, FunctionCallResult}; +use num::Float; +use std::marker::PhantomData; /// Definition of optimization problem to be solved with `AlmOptimizer`. The optimization /// problem has the general form @@ -32,16 +34,18 @@ pub struct AlmProblem< ConstraintsType, AlmSetC, LagrangeSetY, + T = f64, > where + T: Float, // This is function F1: R^xn --> R^n1 (ALM) - MappingAlm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, + MappingAlm: Fn(&[T], &mut [T]) -> FunctionCallResult, // This is function F2: R^xn --> R^n2 (PM) - MappingPm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - ParametricGradientType: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, - ParametricCostType: Fn(&[f64], &[f64], &mut f64) -> FunctionCallResult, - ConstraintsType: Constraint, - AlmSetC: Constraint, - LagrangeSetY: Constraint, + MappingPm: Fn(&[T], &mut [T]) -> FunctionCallResult, + ParametricGradientType: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, + ParametricCostType: Fn(&[T], &[T], &mut T) -> FunctionCallResult, + ConstraintsType: Constraint, + AlmSetC: Constraint, + LagrangeSetY: Constraint, { // // NOTE: the reason why we need to define different set types (ConstraintsType, @@ -67,6 +71,7 @@ pub struct AlmProblem< pub(crate) n1: usize, /// number of PM-type parameters (range dim of F2) pub(crate) n2: usize, + marker: PhantomData, } impl< @@ -77,6 +82,7 @@ impl< ConstraintsType, AlmSetC, LagrangeSetY, + T, > AlmProblem< MappingAlm, @@ -86,15 +92,17 @@ impl< ConstraintsType, AlmSetC, LagrangeSetY, + T, > where - MappingAlm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - MappingPm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - ParametricGradientType: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, - ParametricCostType: Fn(&[f64], &[f64], &mut f64) -> FunctionCallResult, - ConstraintsType: Constraint, - AlmSetC: Constraint, - LagrangeSetY: Constraint, + T: Float, + MappingAlm: Fn(&[T], &mut [T]) -> FunctionCallResult, + MappingPm: Fn(&[T], &mut [T]) -> FunctionCallResult, + ParametricGradientType: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, + ParametricCostType: Fn(&[T], &[T], &mut T) -> FunctionCallResult, + ConstraintsType: Constraint, + AlmSetC: Constraint, + LagrangeSetY: Constraint, { ///Constructs new instance of `AlmProblem` /// @@ -163,6 +171,7 @@ where mapping_f2, n1, n2, + marker: PhantomData, } } } diff --git a/src/alm/mod.rs b/src/alm/mod.rs index 2221c2cd..da10cfb2 100644 --- a/src/alm/mod.rs +++ b/src/alm/mod.rs @@ -45,15 +45,15 @@ pub use alm_problem::AlmProblem; /// Mappings $F_1$ and $F_2$ are computed by functions with signature /// /// ```ignore -/// fn mapping_f(&[f64], &mut [f64]) -> Result<(), crate::SolverError> +/// fn mapping_f(&[T], &mut [T]) -> Result<(), crate::SolverError> /// ``` -pub type MappingType = fn(&[f64], &mut [f64]) -> Result<(), crate::SolverError>; +pub type MappingType = fn(&[T], &mut [T]) -> Result<(), crate::SolverError>; /// Type of the Jacobian of mappings $F_1$ and $F_2$ /// /// These are mappings $(u, d) \mapsto JF_1(u)^\top d$, for given vectors $u\in\mathbb{R}$ /// and $d\in\mathbb{R}^{n_1}$ (similarly for $F_2$) -pub type JacobianMappingType = fn(&[f64], &[f64], &mut [f64]) -> Result<(), crate::SolverError>; +pub type JacobianMappingType = fn(&[T], &[T], &mut [T]) -> Result<(), crate::SolverError>; /// No mapping $F_1(u)$ or $F_2(u)$ is specified pub const NO_MAPPING: Option = None::; @@ -65,6 +65,22 @@ pub const NO_JACOBIAN_MAPPING: Option = None:: = None::; +/// Helper for the generic case where no mapping is provided. +pub fn no_mapping() -> Option> { + None::> +} + +/// Helper for the generic case where no Jacobian mapping is provided. +pub fn no_jacobian_mapping() -> Option> { + None::> +} + +/// Helper for the generic case where no set is provided. +pub fn no_set() -> Option { + let _ = std::marker::PhantomData::; + None:: +} + /* ---------------------------------------------------------------------------- */ /* TESTS */ /* ---------------------------------------------------------------------------- */ diff --git a/src/alm/tests.rs b/src/alm/tests.rs index 0fd1c2e7..f85ec324 100644 --- a/src/alm/tests.rs +++ b/src/alm/tests.rs @@ -234,6 +234,84 @@ fn t_alm_only_penalty_method() { assert!(r.f2_norm() < 1e-6); } +#[test] +fn t_alm_only_penalty_method_f32() { + let tolerance = 1e-4_f32; + let nx = 3; + let n1 = 0; + let n2 = 1; + let lbfgs_mem = 5; + let panoc_cache = PANOCCache::::new(nx, tolerance, lbfgs_mem); + let mut alm_cache = AlmCache::::new(panoc_cache, n1, n2); + + let bounds = NoConstraints::new(); + + let f = |u: &[f32], cost: &mut f32| -> Result<(), SolverError> { + *cost = 0.5_f32 * matrix_operations::norm2_squared(u) + matrix_operations::sum(u); + Ok(()) + }; + + let df = |u: &[f32], grad: &mut [f32]| -> Result<(), SolverError> { + grad.iter_mut() + .zip(u.iter()) + .for_each(|(grad_i, u_i)| *grad_i = *u_i + 1.0_f32); + Ok(()) + }; + + let f2 = |u: &[f32], res: &mut [f32]| -> Result<(), SolverError> { + res[0] = matrix_operations::norm2_squared(u) - 1.0_f32; + Ok(()) + }; + let jf2t = |u: &[f32], d: &[f32], res: &mut [f32]| -> Result<(), crate::SolverError> { + res.iter_mut() + .zip(u.iter()) + .for_each(|(res_i, u_i)| *res_i = *u_i * d[0]); + Ok(()) + }; + + let factory = AlmFactory::new( + f, + df, + no_mapping::(), + no_jacobian_mapping::(), + Some(f2), + Some(jf2t), + no_set::(), + n2, + ); + + let alm_problem = AlmProblem::new( + bounds, + no_set::(), + no_set::(), + |u: &[f32], xi: &[f32], cost: &mut f32| -> Result<(), SolverError> { + factory.psi(u, xi, cost) + }, + |u: &[f32], xi: &[f32], grad: &mut [f32]| -> Result<(), SolverError> { + factory.d_psi(u, xi, grad) + }, + no_mapping::(), + Some(f2), + n1, + n2, + ); + + let mut alm_optimizer = AlmOptimizer::new(&mut alm_cache, alm_problem) + .with_delta_tolerance(1e-5_f32) + .with_epsilon_tolerance(1e-4_f32) + .with_max_outer_iterations(20) + .with_max_inner_iterations(1000) + .with_initial_penalty(5000.0_f32) + .with_penalty_update_factor(2.2_f32); + + let mut u = vec![0.1_f32; nx]; + let solver_result = alm_optimizer.solve(&mut u); + assert!(solver_result.is_ok()); + let r = solver_result.unwrap(); + assert_eq!(ExitStatus::Converged, r.exit_status()); + assert!(r.f2_norm() < 1e-4_f32); +} + #[test] fn t_alm_numeric_test_1() { let tolerance = 1e-8; diff --git a/src/core/panoc/panoc_optimizer.rs b/src/core/panoc/panoc_optimizer.rs index 98f87667..dfa184e7 100644 --- a/src/core/panoc/panoc_optimizer.rs +++ b/src/core/panoc/panoc_optimizer.rs @@ -8,6 +8,9 @@ use crate::{ }, matrix_operations, FunctionCallResult, SolverError, }; +use lbfgs::LbfgsPrecision; +use num::Float; +use std::iter::Sum; use std::time; const MAX_ITER: usize = 100_usize; @@ -15,23 +18,25 @@ const MAX_ITER: usize = 100_usize; /// Optimizer using the PANOC algorithm /// /// -pub struct PANOCOptimizer<'a, GradientType, ConstraintType, CostType> +pub struct PANOCOptimizer<'a, GradientType, ConstraintType, CostType, T = f64> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { - panoc_engine: PANOCEngine<'a, GradientType, ConstraintType, CostType>, + panoc_engine: PANOCEngine<'a, GradientType, ConstraintType, CostType, T>, max_iter: usize, max_duration: Option, } -impl<'a, GradientType, ConstraintType, CostType> - PANOCOptimizer<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> + PANOCOptimizer<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { /// Constructor of `PANOCOptimizer` /// @@ -44,8 +49,8 @@ where /// /// Does not panic pub fn new( - problem: Problem<'a, GradientType, ConstraintType, CostType>, - cache: &'a mut PANOCCache, + problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + cache: &'a mut PANOCCache, ) -> Self { PANOCOptimizer { panoc_engine: PANOCEngine::new(problem, cache), @@ -62,8 +67,8 @@ where /// ## Panics /// /// The method panics if the specified tolerance is not positive - pub fn with_tolerance(self, tolerance: f64) -> Self { - assert!(tolerance > 0.0, "tolerance must be larger than 0"); + pub fn with_tolerance(self, tolerance: T) -> Self { + assert!(tolerance > T::zero(), "tolerance must be larger than 0"); self.panoc_engine.cache.tolerance = tolerance; self @@ -90,8 +95,11 @@ where /// The method panics if the provided value of the AKKT-specific tolerance is /// not positive. /// - pub fn with_akkt_tolerance(self, akkt_tolerance: f64) -> Self { - assert!(akkt_tolerance > 0.0, "akkt_tolerance must be positive"); + pub fn with_akkt_tolerance(self, akkt_tolerance: T) -> Self { + assert!( + akkt_tolerance > T::zero(), + "akkt_tolerance must be positive" + ); self.panoc_engine.cache.set_akkt_tolerance(akkt_tolerance); self } @@ -113,16 +121,12 @@ where self.max_duration = Some(max_duation); self } -} -impl<'life, GradientType, ConstraintType, CostType> Optimizer - for PANOCOptimizer<'life, GradientType, ConstraintType, CostType> -where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult + 'life, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint + 'life, -{ - fn solve(&mut self, u: &mut [f64]) -> Result { + /// Solves the optimization problem for decision variables of scalar type `T`. + /// + /// The returned [`SolverStatus`] stores the reported residual norm and cost + /// value as `f64`, so these values are converted from `T`. + pub fn solve(&mut self, u: &mut [T]) -> Result { let now = instant::Instant::now(); /* @@ -182,12 +186,30 @@ where exit_status, num_iter, now.elapsed(), - self.panoc_engine.cache.best_norm_gamma_fpr, - best_cost_value, + self.panoc_engine + .cache + .best_norm_gamma_fpr + .to_f64() + .expect("best norm gamma FPR must be representable as f64"), + best_cost_value + .to_f64() + .expect("best cost value must be representable as f64"), )) } } +impl<'life, GradientType, ConstraintType, CostType> Optimizer + for PANOCOptimizer<'life, GradientType, ConstraintType, CostType, f64> +where + GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult + 'life, + CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, + ConstraintType: constraints::Constraint + 'life, +{ + fn solve(&mut self, u: &mut [f64]) -> Result { + PANOCOptimizer::solve(self, u) + } +} + /* --------------------------------------------------------------------------------------------- */ /* TESTS */ /* --------------------------------------------------------------------------------------------- */ From b745126f9e12492672cedce66205b3e204d3ddac Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 18:23:14 +0000 Subject: [PATCH 015/133] rust docs: generic float types --- src/alm/alm_cache.rs | 7 ++++++- src/alm/alm_factory.rs | 16 +++++++++++++--- src/alm/alm_optimizer.rs | 22 +++++++++++++++++----- src/alm/alm_optimizer_status.rs | 3 +++ src/alm/alm_problem.rs | 8 ++++++++ src/alm/mod.rs | 17 +++++++++++++++++ 6 files changed, 64 insertions(+), 9 deletions(-) diff --git a/src/alm/alm_cache.rs b/src/alm/alm_cache.rs index bae8bb12..e17400d3 100644 --- a/src/alm/alm_cache.rs +++ b/src/alm/alm_cache.rs @@ -14,7 +14,10 @@ fn default_initial_penalty() -> T { /// the algorithm *updates*. /// /// On the other hand, the problem data are provided in an instance -/// of `AlmProblem` +/// of `AlmProblem`. +/// +/// The scalar type `T` is generic and is typically `f64` or `f32`. The default +/// is `f64`. /// #[derive(Debug)] pub struct AlmCache @@ -65,6 +68,8 @@ where /// the inner problem /// - `n1`, `n2`: range dimensions of mappings `F1` and `F2` respectively /// + /// The scalar type `T` is inferred from `panoc_cache`. + /// /// # Panics /// /// Does not panic diff --git a/src/alm/alm_factory.rs b/src/alm/alm_factory.rs index 91ae3a1e..2a1085ae 100644 --- a/src/alm/alm_factory.rs +++ b/src/alm/alm_factory.rs @@ -23,7 +23,7 @@ fn half() -> T { /// by a function with signature: /// ///```rust,ignore -///fn f(u: &[f64], cost: &mut f64) -> FunctionCallResult +///fn f(u: &[T], cost: &mut T) -> FunctionCallResult ///``` /// /// where `cost` is updated with the value $f(u)$, @@ -32,7 +32,7 @@ fn half() -> T { /// which is computed by a function with signature /// /// ```rust,ignore -/// fn df(u: &[f64], grad: &mut [f64]) -> FunctionCallResult +/// fn df(u: &[T], grad: &mut [T]) -> FunctionCallResult /// ``` /// /// where on exit `grad` stores the @@ -42,7 +42,7 @@ fn half() -> T { /// with signature /// /// ```rust,ignore -/// fn mapping(u: &[f64], fu: &mut [f64]) -> FunctionCallResult +/// fn mapping(u: &[T], fu: &mut [T]) -> FunctionCallResult /// ``` /// /// - `JacobianMappingF1Trans` and `JacobianMappingF2Trans`: functions that compute @@ -52,6 +52,9 @@ fn half() -> T { /// - `SetC`: A set $C\subseteq \mathbb{R}^{n_1}$, which is used in the definition /// of the constraints $F_1(u) \in C$ /// +/// - `T`: scalar floating-point type used throughout the ALM data, typically +/// `f64` or `f32` +/// /// The above are used to compute $\psi:\mathbb{R}^{n_u}\to\mathbb{R}$ for given /// $u\in\mathbb{R}^{n_u}$ and $\xi=(c, y)\in\mathbb{R}^{n_1+1}$, where $c\in\mathbb{R}$ /// and $y\in\mathbb{R}^{n_1}$ are the penalty parameter and vector of Lagrange @@ -71,6 +74,8 @@ fn half() -> T { /// /// where $t(u) = F_1(u) + \bar{c}^{-1}y$. /// +/// The default scalar type is `f64`. +/// pub struct AlmFactory< MappingF1, JacobianMappingF1Trans, @@ -143,8 +148,13 @@ where /// - `set_c` (optional) set $C$ or `NO_SET` /// - `n2` image dimension of $F_2$ (can be 0) /// + /// The scalar type `T` is inferred from the supplied functions and set. + /// /// # Example /// + /// This example uses `f64` for simplicity, but the same API also works with + /// `f32`. + /// /// ```rust /// use optimization_engine::{constraints::Ball2, alm::*, FunctionCallResult}; /// diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index 65533c4d..8fd8d2b7 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -115,6 +115,9 @@ impl InnerProblemStatus { /// of $C$ and $\delta_{U}$, $\delta_{C^{\ast}}$ are the indicator functions of $U$ and $C^{\ast}$ /// respectively. /// +/// The scalar type `T` is generic and is typically `f64` or `f32`. The default +/// is `f64`. +/// pub struct AlmOptimizer< 'life, MappingAlm, @@ -216,9 +219,15 @@ where /// $\nabla_u \psi(u, \xi)$, $F_1(u)$ (if any), $F_2(u)$ (if any), and sets /// $C$, $U$ and $Y$) /// + /// The scalar type `T` is inferred from `alm_cache`, `alm_problem`, and the + /// supplied closures and sets. + /// /// /// # Example /// + /// This example uses `f64` for simplicity, but the same API also works with + /// `f32`. + /// /// ```rust /// use optimization_engine::{alm::*, FunctionCallResult, core::{panoc::*, constraints}}; /// @@ -420,7 +429,7 @@ where /// /// # Panics /// - /// The method panics if the update factor is not larger than `1.0 + f64::EPSILON` + /// The method panics if the update factor is not larger than `1.0 + T::epsilon()` /// /// pub fn with_penalty_update_factor(mut self, penalty_update_factor: T) -> Self { @@ -450,7 +459,7 @@ where /// # Panics /// /// The method panics if the specified tolerance update factor is not in the - /// interval from `f64::EPSILON` to `1.0 - f64::EPSILON`. + /// interval from `T::epsilon()` to `1.0 - T::epsilon()`. /// pub fn with_inner_tolerance_update_factor(mut self, inner_tolerance_update_factor: T) -> Self { assert!( @@ -515,7 +524,7 @@ where /// # Panics /// /// The method panics if the specified sufficient decrease coefficient is not - /// in the range `(f64::EPSILON, 1.0 - f64::EPSILON)` + /// in the range `(T::epsilon(), 1.0 - T::epsilon())` /// pub fn with_sufficient_decrease_coefficient( mut self, @@ -534,7 +543,7 @@ where /// /// # Arguments /// - /// - `y_init`: initial vector of Lagrange multipliers (type: `&[f64]`) of + /// - `y_init`: initial vector of Lagrange multipliers (type: `&[T]`) of /// length equal to `n1` /// /// # Returns @@ -573,7 +582,7 @@ where /// # Panics /// /// The method panics if the specified initial penalty parameter is not - /// larger than `f64::EPSILON` + /// larger than `T::epsilon()` /// pub fn with_initial_penalty(self, c0: T) -> Self { assert!( @@ -920,6 +929,9 @@ where /// Solve the specified ALM problem /// + /// The scalar type of `u` is the same generic floating-point type `T` used by + /// the optimizer, typically `f64` or `f32`. + /// /// pub fn solve(&mut self, u: &mut [T]) -> Result, SolverError> { let mut num_outer_iterations = 0; diff --git a/src/alm/alm_optimizer_status.rs b/src/alm/alm_optimizer_status.rs index a0249aa0..6805c030 100644 --- a/src/alm/alm_optimizer_status.rs +++ b/src/alm/alm_optimizer_status.rs @@ -7,6 +7,9 @@ use num::Float; /// The idea is that only Optimization Engine can create optimizer /// `AlmOptimizerStatus` instances. /// +/// The scalar type `T` is generic and is typically `f64` or `f32`. The default +/// is `f64`. +/// #[derive(Debug)] pub struct AlmOptimizerStatus where diff --git a/src/alm/alm_problem.rs b/src/alm/alm_problem.rs index 5dac1c8b..e806d8c6 100644 --- a/src/alm/alm_problem.rs +++ b/src/alm/alm_problem.rs @@ -26,6 +26,9 @@ use std::marker::PhantomData; /// are mappings with smooth partial derivatives, and /// - $C\subseteq\mathbb{R}^{n_1}$ is a convex closed set on which we can easily compute projections. /// +/// The scalar type `T` is generic and is typically `f64` or `f32`. The default +/// is `f64`. +/// pub struct AlmProblem< MappingAlm, MappingPm, @@ -124,8 +127,13 @@ where /// /// Instance of `AlmProblem` /// + /// The scalar type `T` is inferred from the closures and constraint types. + /// /// # Example /// + /// This example uses `f64` for simplicity, but the same API also works with + /// `f32`. + /// /// /// ```rust /// use optimization_engine::{FunctionCallResult, alm::*, constraints::Ball2}; diff --git a/src/alm/mod.rs b/src/alm/mod.rs index da10cfb2..c58358aa 100644 --- a/src/alm/mod.rs +++ b/src/alm/mod.rs @@ -16,6 +16,10 @@ //! the iterative procedure, such as the solution time, number of iterations, //! measures of accuracy and more, in the form of an [`AlmOptimizerStatus`] //! +//! All public ALM types are generic over a scalar type `T`, which is typically +//! `f64` or `f32`. The default is `f64`, so existing code can often omit `T`. +//! The examples in this module use `f64` for brevity. +//! //! When using `AlmOptimizer`, the user is expected to provide a modified cost //! function, `psi` (see [`AlmOptimizer`] for details). This should not be a problem //! for users that use Optimization Engine via its Python or MATLAB interfaces. @@ -42,6 +46,8 @@ pub use alm_problem::AlmProblem; /// Type of mappings $F_1(u)$ and $F_2(u)$ /// +/// The scalar type `T` is a floating-point type, typically `f64` or `f32`. +/// /// Mappings $F_1$ and $F_2$ are computed by functions with signature /// /// ```ignore @@ -51,6 +57,8 @@ pub type MappingType = fn(&[T], &mut [T]) -> Result<(), crate::SolverEr /// Type of the Jacobian of mappings $F_1$ and $F_2$ /// +/// The scalar type `T` is a floating-point type, typically `f64` or `f32`. +/// /// These are mappings $(u, d) \mapsto JF_1(u)^\top d$, for given vectors $u\in\mathbb{R}$ /// and $d\in\mathbb{R}^{n_1}$ (similarly for $F_2$) pub type JacobianMappingType = fn(&[T], &[T], &mut [T]) -> Result<(), crate::SolverError>; @@ -66,16 +74,25 @@ pub const NO_SET: Option = None::; /// Helper for the generic case where no mapping is provided. +/// +/// This is useful when the scalar type is not the default `f64`, for example +/// when using `f32`. pub fn no_mapping() -> Option> { None::> } /// Helper for the generic case where no Jacobian mapping is provided. +/// +/// This is useful when the scalar type is not the default `f64`, for example +/// when using `f32`. pub fn no_jacobian_mapping() -> Option> { None::> } /// Helper for the generic case where no set is provided. +/// +/// This is useful when the scalar type is not the default `f64`, for example +/// when using `f32`. pub fn no_set() -> Option { let _ = std::marker::PhantomData::; None:: From e61c3f9645fc5a0b9506bab4a4f5f9d666727171 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 18:32:33 +0000 Subject: [PATCH 016/133] final touch: support generic float types --- Cargo.toml | 2 +- src/alm/alm_optimizer.rs | 9 +- src/core/fbs/fbs_optimizer.rs | 28 ++---- src/core/fbs/tests.rs | 9 +- src/core/mod.rs | 9 +- src/core/panoc/panoc_optimizer.rs | 28 ++---- src/core/panoc/tests.rs | 4 +- src/core/solver_status.rs | 22 ++-- src/mocks.rs | 161 +++++++++++++++++------------- src/tests.rs | 4 +- 10 files changed, 148 insertions(+), 128 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e4850148..1eb2049e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,7 +42,7 @@ homepage = "https://alphaville.github.io/optimization-engine/" repository = "https://github.com/alphaville/optimization-engine" # Version of this crate (SemVer) -version = "0.11.1" +version = "0.12.0" edition = "2018" diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index 8fd8d2b7..c823ce1b 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -705,12 +705,12 @@ where /// /// # Returns /// - /// Returns an instance of `Result`, where `SolverStatus` + /// Returns an instance of `Result, SolverError>`, where `SolverStatus` /// is the solver status of the inner problem and `SolverError` is a potential /// error in solving the inner problem. /// /// - fn solve_inner_problem(&mut self, u: &mut [T]) -> Result { + fn solve_inner_problem(&mut self, u: &mut [T]) -> Result, SolverError> { let alm_problem = &self.alm_problem; // Problem let alm_cache = &mut self.alm_cache; // ALM cache @@ -866,10 +866,9 @@ where // If the inner problem fails miserably, the failure should be propagated // upstream (using `?`). If the inner problem has not converged, that is fine, // we should keep solving. - self.solve_inner_problem(u).map(|status: SolverStatus| { + self.solve_inner_problem(u).map(|status: SolverStatus| { let inner_iters = status.iterations(); - self.alm_cache.last_inner_problem_norm_fpr = - T::from(status.norm_fpr()).expect("inner problem norm FPR must fit in T"); + self.alm_cache.last_inner_problem_norm_fpr = status.norm_fpr(); self.alm_cache.inner_iteration_count += inner_iters; inner_exit_status = status.exit_status(); })?; diff --git a/src/core/fbs/fbs_optimizer.rs b/src/core/fbs/fbs_optimizer.rs index 284cb911..24b6a7a5 100644 --- a/src/core/fbs/fbs_optimizer.rs +++ b/src/core/fbs/fbs_optimizer.rs @@ -94,10 +94,7 @@ where } /// Solves the optimization problem for decision variables of scalar type `T`. - /// - /// The returned [`SolverStatus`] stores the reported norm of the fixed-point - /// residual and cost value as `f64`, so these values are converted from `T`. - pub fn solve(&mut self, u: &mut [T]) -> Result { + pub fn solve(&mut self, u: &mut [T]) -> Result, SolverError> { let now = instant::Instant::now(); self.fbs_engine.init(u)?; @@ -132,26 +129,21 @@ where }, num_iter, now.elapsed(), - self.fbs_engine - .cache - .norm_fpr - .to_f64() - .expect("norm_fpr must be representable as f64"), - cost_value - .to_f64() - .expect("cost value must be representable as f64"), + self.fbs_engine.cache.norm_fpr, + cost_value, )) } } -impl<'life, GradientType, ConstraintType, CostType> Optimizer - for FBSOptimizer<'life, GradientType, ConstraintType, CostType, f64> +impl<'life, GradientType, ConstraintType, CostType, T> Optimizer + for FBSOptimizer<'life, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult + 'life, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult + 'life, - ConstraintType: constraints::Constraint + 'life, + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult + 'life, + CostType: Fn(&[T], &mut T) -> FunctionCallResult + 'life, + ConstraintType: constraints::Constraint + 'life, { - fn solve(&mut self, u: &mut [f64]) -> Result { + fn solve(&mut self, u: &mut [T]) -> Result, SolverError> { FBSOptimizer::solve(self, u) } } diff --git a/src/core/fbs/tests.rs b/src/core/fbs/tests.rs index 28aef53c..bc83b8f5 100644 --- a/src/core/fbs/tests.rs +++ b/src/core/fbs/tests.rs @@ -114,7 +114,7 @@ fn t_solve_fbs() { assert!(status.has_converged()); assert!(status.norm_fpr() < tolerance); - unit_test_utils::assert_nearly_equal_array(&mocks::SOLUTION_A, &u, 1e-4, 1e-5, "u"); + unit_test_utils::assert_nearly_equal_array(&mocks::solution_a(), &u, 1e-4, 1e-5, "u"); } #[test] @@ -201,7 +201,8 @@ fn t_solve_fbs_f32() { let status = optimizer.solve(&mut u).unwrap(); assert!(status.has_converged()); - assert!(status.norm_fpr() < tolerance as f64); - assert!((u[0] - crate::mocks::SOLUTION_A[0] as f32).abs() < 1e-4); - assert!((u[1] - crate::mocks::SOLUTION_A[1] as f32).abs() < 1e-4); + let expected = crate::mocks::solution_a::(); + assert!(status.norm_fpr() < tolerance); + assert!((u[0] - expected[0]).abs() < 1e-4); + assert!((u[1] - expected[1]).abs() < 1e-4); } diff --git a/src/core/mod.rs b/src/core/mod.rs index 6e59fe05..f0ea08a8 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -3,6 +3,8 @@ //! //! +use num::Float; + pub mod fbs; pub mod panoc; pub mod problem; @@ -29,12 +31,15 @@ pub enum ExitStatus { } /// A general optimizer -pub trait Optimizer { +pub trait Optimizer +where + T: Float, +{ /// solves a given problem and updates the initial estimate `u` with the solution /// /// Returns the solver status /// - fn solve(&mut self, u: &mut [f64]) -> Result; + fn solve(&mut self, u: &mut [T]) -> Result, SolverError>; } /// Engine supporting an algorithm diff --git a/src/core/panoc/panoc_optimizer.rs b/src/core/panoc/panoc_optimizer.rs index dfa184e7..1056597b 100644 --- a/src/core/panoc/panoc_optimizer.rs +++ b/src/core/panoc/panoc_optimizer.rs @@ -123,10 +123,7 @@ where } /// Solves the optimization problem for decision variables of scalar type `T`. - /// - /// The returned [`SolverStatus`] stores the reported residual norm and cost - /// value as `f64`, so these values are converted from `T`. - pub fn solve(&mut self, u: &mut [T]) -> Result { + pub fn solve(&mut self, u: &mut [T]) -> Result, SolverError> { let now = instant::Instant::now(); /* @@ -186,26 +183,21 @@ where exit_status, num_iter, now.elapsed(), - self.panoc_engine - .cache - .best_norm_gamma_fpr - .to_f64() - .expect("best norm gamma FPR must be representable as f64"), - best_cost_value - .to_f64() - .expect("best cost value must be representable as f64"), + self.panoc_engine.cache.best_norm_gamma_fpr, + best_cost_value, )) } } -impl<'life, GradientType, ConstraintType, CostType> Optimizer - for PANOCOptimizer<'life, GradientType, ConstraintType, CostType, f64> +impl<'life, GradientType, ConstraintType, CostType, T> Optimizer + for PANOCOptimizer<'life, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult + 'life, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint + 'life, + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult + 'life, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint + 'life, { - fn solve(&mut self, u: &mut [f64]) -> Result { + fn solve(&mut self, u: &mut [T]) -> Result, SolverError> { PANOCOptimizer::solve(self, u) } } diff --git a/src/core/panoc/tests.rs b/src/core/panoc/tests.rs index afd89ef3..0edbed55 100644 --- a/src/core/panoc/tests.rs +++ b/src/core/panoc/tests.rs @@ -76,7 +76,7 @@ fn t_test_panoc_basic() { } println!("final |fpr| = {}", panoc_engine.cache.norm_gamma_fpr); assert!(panoc_engine.cache.norm_gamma_fpr <= tolerance); - unit_test_utils::assert_nearly_equal_array(&u, &mocks::SOLUTION_A, 1e-6, 1e-8, ""); + unit_test_utils::assert_nearly_equal_array(&u, &mocks::solution_a(), 1e-6, 1e-8, ""); } #[test] @@ -111,7 +111,7 @@ fn t_test_panoc_hard() { println!("\nsol = {:?}", u); assert!(panoc_engine.cache.norm_gamma_fpr <= tolerance_fpr); - unit_test_utils::assert_nearly_equal_array(&u, &mocks::SOLUTION_HARD, 1e-6, 1e-8, ""); + unit_test_utils::assert_nearly_equal_array(&u, &mocks::solution_hard(), 1e-6, 1e-8, ""); } #[test] diff --git a/src/core/solver_status.rs b/src/core/solver_status.rs index e4f7a138..8c75201e 100644 --- a/src/core/solver_status.rs +++ b/src/core/solver_status.rs @@ -2,6 +2,7 @@ //! //! use crate::core::ExitStatus; +use num::Float; use std::time; /// Solver status @@ -10,7 +11,10 @@ use std::time; /// `SolverStatus` are returned by optimizers. /// #[derive(Debug, PartialEq, Copy, Clone)] -pub struct SolverStatus { +pub struct SolverStatus +where + T: Float, +{ /// exit status of the algorithm exit_status: ExitStatus, /// number of iterations for convergence @@ -18,12 +22,12 @@ pub struct SolverStatus { /// time it took to solve solve_time: time::Duration, /// norm of the fixed-point residual (FPR) - fpr_norm: f64, + fpr_norm: T, /// cost value at the candidate solution - cost_value: f64, + cost_value: T, } -impl SolverStatus { +impl SolverStatus { /// Constructs a new instance of SolverStatus /// /// ## Arguments @@ -39,9 +43,9 @@ impl SolverStatus { exit_status: ExitStatus, num_iter: usize, solve_time: time::Duration, - fpr_norm: f64, - cost_value: f64, - ) -> SolverStatus { + fpr_norm: T, + cost_value: T, + ) -> SolverStatus { SolverStatus { exit_status, num_iter, @@ -67,12 +71,12 @@ impl SolverStatus { } /// norm of the fixed point residual - pub fn norm_fpr(&self) -> f64 { + pub fn norm_fpr(&self) -> T { self.fpr_norm } /// value of the cost at the solution - pub fn cost_value(&self) -> f64 { + pub fn cost_value(&self) -> T { self.cost_value } diff --git a/src/mocks.rs b/src/mocks.rs index a051da21..797b8bc9 100644 --- a/src/mocks.rs +++ b/src/mocks.rs @@ -1,81 +1,106 @@ use crate::{matrix_operations, SolverError}; +use num::Float; +use std::iter::Sum; +use std::ops::Mul; -pub const SOLUTION_A: [f64; 2] = [-0.148_959_718_255_77, 0.133_457_867_273_39]; -pub const SOLUTION_HARD: [f64; 3] = [ - -0.041_123_164_672_281, - -0.028_440_417_469_206, - 0.000_167_276_757_790, -]; - -pub fn lipschitz_mock(u: &[f64], g: &mut [f64]) -> Result<(), SolverError> { - g[0] = 3.0 * u[0]; - g[1] = 2.0 * u[1]; - g[2] = 4.5; +fn cast(value: f64) -> T { + T::from(value).expect("floating-point constant must be representable") +} + +pub fn solution_a() -> [T; 2] { + [ + cast::(-0.148_959_718_255_77), + cast::(0.133_457_867_273_39), + ] +} + +pub fn solution_hard() -> [T; 3] { + [ + cast::(-0.041_123_164_672_281), + cast::(-0.028_440_417_469_206), + cast::(0.000_167_276_757_790), + ] +} + +pub fn lipschitz_mock(u: &[T], g: &mut [T]) -> Result<(), SolverError> { + g[0] = cast::(3.0) * u[0]; + g[1] = cast::(2.0) * u[1]; + g[2] = cast::(4.5); Ok(()) } -pub fn void_parameteric_cost(_u: &[f64], _p: &[f64], _cost: &mut f64) -> Result<(), SolverError> { +pub fn void_parameteric_cost( + _u: &[T], + _p: &[T], + _cost: &mut T, +) -> Result<(), SolverError> { Ok(()) } -pub fn void_parameteric_gradient( - _u: &[f64], - _p: &[f64], - _grad: &mut [f64], +pub fn void_parameteric_gradient( + _u: &[T], + _p: &[T], + _grad: &mut [T], ) -> Result<(), SolverError> { Ok(()) } -pub fn void_mapping(_u: &[f64], _result: &mut [f64]) -> Result<(), SolverError> { +pub fn void_mapping(_u: &[T], _result: &mut [T]) -> Result<(), SolverError> { Ok(()) } -pub fn void_cost(_u: &[f64], _cost: &mut f64) -> Result<(), SolverError> { +pub fn void_cost(_u: &[T], _cost: &mut T) -> Result<(), SolverError> { Ok(()) } -pub fn void_gradient(_u: &[f64], _grad: &mut [f64]) -> Result<(), SolverError> { +pub fn void_gradient(_u: &[T], _grad: &mut [T]) -> Result<(), SolverError> { Ok(()) } -pub fn my_cost(u: &[f64], cost: &mut f64) -> Result<(), SolverError> { - *cost = 0.5 * (u[0].powi(2) + 2. * u[1].powi(2) + 2.0 * u[0] * u[1]) + u[0] - u[1] + 3.0; +pub fn my_cost(u: &[T], cost: &mut T) -> Result<(), SolverError> { + *cost = cast::(0.5) + * (u[0].powi(2) + cast::(2.0) * u[1].powi(2) + cast::(2.0) * u[0] * u[1]) + + u[0] + - u[1] + + cast::(3.0); Ok(()) } -pub fn my_gradient(u: &[f64], grad: &mut [f64]) -> Result<(), SolverError> { - grad[0] = u[0] + u[1] + 1.0; - grad[1] = u[0] + 2. * u[1] - 1.0; +pub fn my_gradient(u: &[T], grad: &mut [T]) -> Result<(), SolverError> { + grad[0] = u[0] + u[1] + T::one(); + grad[1] = u[0] + cast::(2.0) * u[1] - T::one(); Ok(()) } -pub fn rosenbrock_cost(a: f64, b: f64, u: &[f64]) -> f64 { +pub fn rosenbrock_cost(a: T, b: T, u: &[T]) -> T { (a - u[0]).powi(2) + b * (u[1] - u[0].powi(2)).powi(2) } -pub fn rosenbrock_grad(a: f64, b: f64, u: &[f64], grad: &mut [f64]) { - grad[0] = 2.0 * u[0] - 2.0 * a - 4.0 * b * u[0] * (-u[0].powi(2) + u[1]); - grad[1] = b * (-2.0 * u[0].powi(2) + 2.0 * u[1]); +pub fn rosenbrock_grad(a: T, b: T, u: &[T], grad: &mut [T]) { + grad[0] = cast::(2.0) * u[0] + - cast::(2.0) * a + - cast::(4.0) * b * u[0] * (-u[0].powi(2) + u[1]); + grad[1] = b * (-cast::(2.0) * u[0].powi(2) + cast::(2.0) * u[1]); } -pub fn hard_quadratic_cost(u: &[f64], cost: &mut f64) -> Result<(), SolverError> { - *cost = (4. * u[0].powi(2)) / 2. - + 5.5 * u[1].powi(2) - + 500.5 * u[2].powi(2) - + 5. * u[0] * u[1] - + 25. * u[0] * u[2] - + 5. * u[1] * u[2] +pub fn hard_quadratic_cost(u: &[T], cost: &mut T) -> Result<(), SolverError> { + *cost = (cast::(4.0) * u[0].powi(2)) / cast::(2.0) + + cast::(5.5) * u[1].powi(2) + + cast::(500.5) * u[2].powi(2) + + cast::(5.0) * u[0] * u[1] + + cast::(25.0) * u[0] * u[2] + + cast::(5.0) * u[1] * u[2] + u[0] + u[1] + u[2]; Ok(()) } -pub fn hard_quadratic_gradient(u: &[f64], grad: &mut [f64]) -> Result<(), SolverError> { +pub fn hard_quadratic_gradient(u: &[T], grad: &mut [T]) -> Result<(), SolverError> { // norm(Hessian) = 1000.653 (Lipschitz gradient) - grad[0] = 4. * u[0] + 5. * u[1] + 25. * u[2] + 1.; - grad[1] = 5. * u[0] + 11. * u[1] + 5. * u[2] + 1.; - grad[2] = 25. * u[0] + 5. * u[1] + 1001. * u[2] + 1.; + grad[0] = cast::(4.0) * u[0] + cast::(5.0) * u[1] + cast::(25.0) * u[2] + T::one(); + grad[1] = cast::(5.0) * u[0] + cast::(11.0) * u[1] + cast::(5.0) * u[2] + T::one(); + grad[2] = cast::(25.0) * u[0] + cast::(5.0) * u[1] + cast::(1001.0) * u[2] + T::one(); Ok(()) } @@ -85,28 +110,27 @@ pub fn hard_quadratic_gradient(u: &[f64], grad: &mut [f64]) -> Result<(), Solver /// /// where `m` is the length of `xi`. It is assumed that the length of /// `u` is larger than the length of `xi` -pub fn psi_cost_dummy(u: &[f64], xi: &[f64], cost: &mut f64) -> Result<(), SolverError> { +pub fn psi_cost_dummy(u: &[T], xi: &[T], cost: &mut T) -> Result<(), SolverError> +where + T: Float + Sum + Mul, +{ let u_len = u.len(); let xi_len = xi.len(); assert!(u_len > xi_len); - let sum_u = u.iter().fold(0.0, |mut sum, ui| { - sum += ui; - sum - }); + let sum_u = u.iter().fold(T::zero(), |sum, ui| sum + *ui); // psi_cost = 0.5*SUM(ui^2) + xi[0] * sum_u - *cost = - 0.5 * u.iter().fold(0.0, |mut sum_of_squares, ui| { - sum_of_squares += ui.powi(2); - sum_of_squares - }) + xi[0] * sum_u; + *cost = cast::(0.5) + * u.iter() + .fold(T::zero(), |sum_of_squares, ui| sum_of_squares + ui.powi(2)) + + xi[0] * sum_u; // psi_cost += xi[1..m]'*u[0..m-1] let m = std::cmp::min(u_len, xi_len - 1); - *cost += matrix_operations::inner_product(&u[..m], &xi[1..=m]); + *cost = *cost + matrix_operations::inner_product(&u[..m], &xi[1..=m]); Ok(()) } /// Gradient of `psi_cost` -pub fn psi_gradient_dummy(u: &[f64], xi: &[f64], grad: &mut [f64]) -> Result<(), SolverError> { +pub fn psi_gradient_dummy(u: &[T], xi: &[T], grad: &mut [T]) -> Result<(), SolverError> { let u_len = u.len(); let xi_len = xi.len(); assert!( @@ -115,11 +139,11 @@ pub fn psi_gradient_dummy(u: &[f64], xi: &[f64], grad: &mut [f64]) -> Result<(), ); assert!(u_len == grad.len(), "u and grad must have equal lengths"); grad.copy_from_slice(u); - grad.iter_mut().for_each(|grad_i| *grad_i += xi[0]); + grad.iter_mut().for_each(|grad_i| *grad_i = *grad_i + xi[0]); xi[1..] .iter() .zip(grad.iter_mut()) - .for_each(|(xi_i, grad_i)| *grad_i += xi_i); + .for_each(|(xi_i, grad_i)| *grad_i = *grad_i + *xi_i); Ok(()) } @@ -132,11 +156,11 @@ pub fn psi_gradient_dummy(u: &[f64], xi: &[f64], grad: &mut [f64]) -> Result<(), /// /// It is `F1: R^3 --> R^2` /// -pub fn mapping_f1_affine(u: &[f64], f1u: &mut [f64]) -> Result<(), SolverError> { +pub fn mapping_f1_affine(u: &[T], f1u: &mut [T]) -> Result<(), SolverError> { assert!(u.len() == 3, "the length of u must be equal to 3"); assert!(f1u.len() == 2, "the length of F1(u) must be equal to 2"); - f1u[0] = 2.0 * u[0] + u[2] - 1.0; - f1u[1] = u[0] + 3.0 * u[1]; + f1u[0] = cast::(2.0) * u[0] + u[2] - T::one(); + f1u[1] = u[0] + cast::(3.0) * u[1]; Ok(()) } @@ -150,16 +174,16 @@ pub fn mapping_f1_affine(u: &[f64], f1u: &mut [f64]) -> Result<(), SolverError> /// 3*d2 /// d1 ] /// ``` -/// -pub fn mapping_f1_affine_jacobian_product( - _u: &[f64], - d: &[f64], - res: &mut [f64], +/// +pub fn mapping_f1_affine_jacobian_product( + _u: &[T], + d: &[T], + res: &mut [T], ) -> Result<(), SolverError> { assert!(d.len() == 2, "the length of d must be equal to 3"); assert!(res.len() == 3, "the length of res must be equal to 3"); - res[0] = 2.0 * d[0] + d[1]; - res[1] = 3.0 * d[1]; + res[0] = cast::(2.0) * d[0] + d[1]; + res[1] = cast::(3.0) * d[1]; res[2] = d[0]; Ok(()) } @@ -169,15 +193,18 @@ pub fn mapping_f1_affine_jacobian_product( /// ``` /// f0(u) = 0.5*u'*u + 1'*u /// ``` -pub fn f0(u: &[f64], cost: &mut f64) -> Result<(), SolverError> { - *cost = 0.5 * matrix_operations::norm2_squared(u) + matrix_operations::sum(u); +pub fn f0(u: &[T], cost: &mut T) -> Result<(), SolverError> +where + T: Float + Sum + Mul, +{ + *cost = cast::(0.5) * matrix_operations::norm2_squared(u) + matrix_operations::sum(u); Ok(()) } -pub fn d_f0(u: &[f64], grad: &mut [f64]) -> Result<(), SolverError> { +pub fn d_f0(u: &[T], grad: &mut [T]) -> Result<(), SolverError> { grad.iter_mut() .zip(u.iter()) - .for_each(|(grad_i, u_i)| *grad_i = u_i + 1.0); + .for_each(|(grad_i, u_i)| *grad_i = *u_i + T::one()); Ok(()) } diff --git a/src/tests.rs b/src/tests.rs index b8c5e1ca..c11c575d 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -21,6 +21,6 @@ fn t_access() { assert!(status.has_converged()); assert!(status.norm_fpr() < tolerance); - assert!((-0.14896 - u[0]).abs() < 1e-4); - assert!((0.13346 - u[1]).abs() < 1e-4); + assert!((-0.14896_f64 - u[0]).abs() < 1e-4); + assert!((0.13346_f64 - u[1]).abs() < 1e-4); } From 2cba707ee35bc7a02e96ab275c9a0dfe70159b11 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 18:38:21 +0000 Subject: [PATCH 017/133] more thorough testing (f32+f64) --- .github/pull_request_template.md | 4 ++ src/alm/tests.rs | 74 +++++++++++++++++++++++++++++++ src/constraints/tests.rs | 43 ++++++++++++++++++ src/core/panoc/panoc_optimizer.rs | 38 ++++++++++++++++ 4 files changed, 159 insertions(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 9703b739..c4742060 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -13,6 +13,10 @@ - OpEn version ... - opengen version ... +## Merge dependencies + +- PR xyz should be merged first + ## Checklist - [ ] Documentation diff --git a/src/alm/tests.rs b/src/alm/tests.rs index f85ec324..4d8e29da 100644 --- a/src/alm/tests.rs +++ b/src/alm/tests.rs @@ -390,6 +390,80 @@ fn t_alm_numeric_test_1() { ); } +#[test] +fn t_alm_numeric_test_1_f32() { + let tolerance = 1e-4_f32; + let nx = 3; + let n1 = 2; + let n2 = 0; + let lbfgs_mem = 3; + let panoc_cache = PANOCCache::::new(nx, tolerance, lbfgs_mem); + let mut alm_cache = AlmCache::::new(panoc_cache, n1, n2); + + let set_c = Ball2::new(None, 1.0_f32); + let bounds = Ball2::new(None, 10.0_f32); + let set_y = Ball2::new(None, 10_000.0_f32); + + let factory = AlmFactory::new( + mocks::f0::, + mocks::d_f0::, + Some(mocks::mapping_f1_affine::), + Some(mocks::mapping_f1_affine_jacobian_product::), + no_mapping::(), + no_jacobian_mapping::(), + Some(set_c), + n2, + ); + + let set_c_b = Ball2::new(None, 1.0_f32); + let alm_problem = AlmProblem::new( + bounds, + Some(set_c_b), + Some(set_y), + |u: &[f32], xi: &[f32], cost: &mut f32| -> FunctionCallResult { factory.psi(u, xi, cost) }, + |u: &[f32], xi: &[f32], grad: &mut [f32]| -> FunctionCallResult { + factory.d_psi(u, xi, grad) + }, + Some(mocks::mapping_f1_affine::), + no_mapping::(), + n1, + n2, + ); + + let mut alm_optimizer = AlmOptimizer::new(&mut alm_cache, alm_problem) + .with_delta_tolerance(1e-3_f32) + .with_max_outer_iterations(30) + .with_epsilon_tolerance(1e-4_f32) + .with_initial_inner_tolerance(1e-2_f32) + .with_inner_tolerance_update_factor(0.5_f32) + .with_initial_penalty(1.0_f32) + .with_penalty_update_factor(1.2_f32) + .with_sufficient_decrease_coefficient(0.1_f32) + .with_initial_lagrange_multipliers(&vec![5.0_f32; n1]); + + let mut u = vec![0.0_f32; nx]; + let solver_result = alm_optimizer.solve(&mut u); + assert!(solver_result.is_ok()); + let r = solver_result.unwrap(); + assert_eq!(ExitStatus::Converged, r.exit_status()); + assert!(r.num_outer_iterations() > 0 && r.num_outer_iterations() <= 30); + assert!(r.last_problem_norm_fpr() < tolerance); + assert!(r.delta_y_norm_over_c() < 1e-3_f32); + + let mut f1u = vec![0.0_f32; n1]; + assert!(mocks::mapping_f1_affine(&u, &mut f1u).is_ok()); + let mut projection = f1u.clone(); + let set_c_check = Ball2::new(None, 1.0_f32); + set_c_check.project(&mut projection); + assert!((f1u[0] - projection[0]).abs() < 2e-3_f32); + assert!((f1u[1] - projection[1]).abs() < 2e-3_f32); + + let cost_actual = r.cost(); + let mut cost_expected = 0.0_f32; + assert!(mocks::f0(&u, &mut cost_expected).is_ok()); + assert!((cost_actual - cost_expected).abs() < 2e-3_f32); +} + fn mapping_f2(u: &[f64], res: &mut [f64]) -> FunctionCallResult { res[0] = u[0]; res[1] = u[1]; diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index 0db9549c..a281b4d6 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -491,6 +491,27 @@ fn t_cartesian_product_ball_and_rectangle() { ); } +#[test] +fn t_cartesian_product_ball_and_rectangle_f32() { + let xmin1 = vec![-1.0_f32; 2]; + let xmax1 = vec![1.0_f32; 2]; + let rectangle1 = Rectangle::new(Some(&xmin1), Some(&xmax1)); + + let radius = 1.0_f32; + let ball = Ball2::new(None, radius); + + let cart_prod = CartesianProduct::new() + .add_constraint(2, rectangle1) + .add_constraint(5, ball); + + let mut x = [-4.0_f32, 0.25_f32, 2.0_f32, -1.0_f32, 2.0_f32]; + cart_prod.project(&mut x); + + assert_eq!([-1.0_f32, 0.25_f32], x[..2]); + let ball_norm = crate::matrix_operations::norm2(&x[2..5]); + assert!((ball_norm - radius).abs() < 1e-5_f32); +} + #[test] fn t_second_order_cone_case_i() { let soc = SecondOrderCone::new(1.0); @@ -529,6 +550,17 @@ fn t_second_order_cone_case_iii() { assert!((norm_z - alpha * x[2]).abs() <= 1e-7); } +#[test] +fn t_second_order_cone_case_iii_f32() { + let alpha = 1.5_f32; + let soc = SecondOrderCone::new(alpha); + let mut x = vec![1.0_f32, 1.0_f32, 0.1_f32]; + soc.project(&mut x); + let norm_z = crate::matrix_operations::norm2(&x[..=1]); + assert!(norm_z <= alpha * x[2] + 1e-5_f32); + assert!((norm_z - alpha * x[2]).abs() <= 1e-4_f32); +} + #[test] #[should_panic] fn t_second_order_cone_illegal_alpha_i() { @@ -885,6 +917,17 @@ fn t_ball1_random_optimality_conditions() { } } +#[test] +fn t_ball1_projection_f32() { + let ball1 = Ball1::new(None, 1.0_f32); + let mut x = [2.0_f32, -1.0_f32, 0.0_f32]; + ball1.project(&mut x); + assert!((x[0] - 1.0_f32).abs() < 1e-6_f32); + assert!(x[1].abs() < 1e-6_f32); + assert!(x[2].abs() < 1e-6_f32); + assert!(crate::matrix_operations::norm1(&x) <= 1.0_f32 + 1e-6_f32); +} + #[test] fn t_ball1_random_optimality_conditions_centered() { for n in (10..=60).step_by(10) { diff --git a/src/core/panoc/panoc_optimizer.rs b/src/core/panoc/panoc_optimizer.rs index 1056597b..8c7018ce 100644 --- a/src/core/panoc/panoc_optimizer.rs +++ b/src/core/panoc/panoc_optimizer.rs @@ -263,6 +263,44 @@ mod tests { ); } + #[test] + fn t_panoc_optimizer_rosenbrock_f32() { + let tolerance = 1e-4_f32; + let a_param = 1.0_f32; + let b_param = 200.0_f32; + let n_dimension = 2; + let lbfgs_memory = 8; + let max_iters = 120; + let mut u_solution = [-1.5_f32, 0.9_f32]; + + let cost_gradient = |u: &[f32], grad: &mut [f32]| -> FunctionCallResult { + mocks::rosenbrock_grad(a_param, b_param, u, grad); + Ok(()) + }; + let cost_function = |u: &[f32], c: &mut f32| -> FunctionCallResult { + *c = mocks::rosenbrock_cost(a_param, b_param, u); + Ok(()) + }; + + let radius = 2.0_f32; + let bounds = constraints::Ball2::new(None, radius); + let mut panoc_cache = PANOCCache::::new(n_dimension, tolerance, lbfgs_memory); + let problem = Problem::new(&bounds, cost_gradient, cost_function); + let mut panoc = PANOCOptimizer::new(problem, &mut panoc_cache).with_max_iter(max_iters); + let status = panoc.solve(&mut u_solution).unwrap(); + + assert_eq!(max_iters, panoc.max_iter); + assert!(status.has_converged()); + assert!(status.iterations() < max_iters); + assert!(status.norm_fpr() < tolerance); + + let mut u_project = [0.0_f32; 2]; + u_project.copy_from_slice(&u_solution); + bounds.project(&mut u_project); + assert!((u_solution[0] - u_project[0]).abs() < 1e-5_f32); + assert!((u_solution[1] - u_project[1]).abs() < 1e-5_f32); + } + #[test] fn t_panoc_in_loop() { /* USER PARAMETERS */ From 8e8cb3ecdf0b219aad21eb0d5c705b6a76f745e1 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 18:48:57 +0000 Subject: [PATCH 018/133] expand f32 coverage - update error messages - unit tests for AffineSpace and BallP - and for ALM/PM solver --- src/alm/alm_optimizer.rs | 16 ++++---- src/alm/tests.rs | 84 ++++++++++++++++++++++++++++++++++++++++ src/constraints/tests.rs | 48 +++++++++++++++++++++++ src/core/fbs/tests.rs | 36 +++++++++++++++++ 4 files changed, 176 insertions(+), 8 deletions(-) diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index c823ce1b..e028be61 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -435,7 +435,7 @@ where pub fn with_penalty_update_factor(mut self, penalty_update_factor: T) -> Self { assert!( penalty_update_factor > T::one() + T::epsilon(), - "`penalty_update_factor` must be larger than 1.0 + f64::EPSILON" + "`penalty_update_factor` must be larger than 1.0 + T::epsilon()" ); self.penalty_update_factor = penalty_update_factor; self @@ -465,7 +465,7 @@ where assert!( inner_tolerance_update_factor > T::epsilon() && inner_tolerance_update_factor < T::one() - T::epsilon(), - "the tolerance update factor needs to be in (f64::EPSILON, 1)" + "the tolerance update factor needs to be in (T::epsilon(), 1)" ); self.epsilon_update_factor = inner_tolerance_update_factor; self @@ -533,7 +533,7 @@ where assert!( sufficient_decrease_coefficient < T::one() - T::epsilon() && sufficient_decrease_coefficient > T::epsilon(), - "sufficient_decrease_coefficient must be in (f64::EPSILON, 1.0 - f64::EPSILON)" + "sufficient_decrease_coefficient must be in (T::epsilon(), 1.0 - T::epsilon())" ); self.sufficient_decrease_coeff = sufficient_decrease_coefficient; self @@ -587,7 +587,7 @@ where pub fn with_initial_penalty(self, c0: T) -> Self { assert!( c0 > T::epsilon(), - "the initial penalty must be larger than f64::EPSILON" + "the initial penalty must be larger than T::epsilon()" ); if let Some(xi_in_cache) = &mut self.alm_cache.xi { xi_in_cache[0] = c0; @@ -685,10 +685,10 @@ where let problem = &self.alm_problem; if let Some(y_set) = &problem.alm_set_y { // NOTE: as_mut() converts from &mut Option to Option<&mut T> - // * cache.y is Option> - // * cache.y.as_mut is Option<&mut Vec> - // * which can be treated as Option<&mut [f64]> - // * y_vec is &mut [f64] + // * cache.y is Option> + // * cache.y.as_mut is Option<&mut Vec> + // * which can be treated as Option<&mut [T]> + // * y_vec is &mut [T] if let Some(xi_vec) = self.alm_cache.xi.as_mut() { y_set.project(&mut xi_vec[1..]); } diff --git a/src/alm/tests.rs b/src/alm/tests.rs index 4d8e29da..09b6e909 100644 --- a/src/alm/tests.rs +++ b/src/alm/tests.rs @@ -479,6 +479,25 @@ fn jac_mapping_f2_tr(_u: &[f64], d: &[f64], res: &mut [f64]) -> FunctionCallResu Ok(()) } +fn mapping_f2_generic(u: &[T], res: &mut [T]) -> FunctionCallResult { + res[0] = u[0]; + res[1] = u[1]; + res[2] = u[2] - u[0]; + res[3] = u[2] - u[0] - u[1]; + Ok(()) +} + +fn jac_mapping_f2_tr_generic( + _u: &[T], + d: &[T], + res: &mut [T], +) -> FunctionCallResult { + res[0] = d[0] - d[2] - d[3]; + res[1] = d[1] - d[3]; + res[2] = d[2] + d[3]; + Ok(()) +} + #[test] fn t_alm_numeric_test_2() { let tolerance = 1e-8; @@ -543,6 +562,71 @@ fn t_alm_numeric_test_2() { println!("y = {:#?}", r.lagrange_multipliers()); } +#[test] +fn t_alm_numeric_test_2_f32() { + let tolerance = 1e-4_f32; + let nx = 3; + let n1 = 2; + let n2 = 4; + let lbfgs_mem = 3; + let panoc_cache = PANOCCache::::new(nx, tolerance, lbfgs_mem); + let mut alm_cache = AlmCache::::new(panoc_cache, n1, n2); + + let set_c = Ball2::new(None, 1.0_f32); + let bounds = Ball2::new(None, 10.0_f32); + let set_y = Ball2::new(None, 10_000.0_f32); + + let factory = AlmFactory::new( + mocks::f0::, + mocks::d_f0::, + Some(mocks::mapping_f1_affine::), + Some(mocks::mapping_f1_affine_jacobian_product::), + Some(mapping_f2_generic::), + Some(jac_mapping_f2_tr_generic::), + Some(set_c), + n2, + ); + + let set_c_b = Ball2::new(None, 1.0_f32); + let alm_problem = AlmProblem::new( + bounds, + Some(set_c_b), + Some(set_y), + |u: &[f32], xi: &[f32], cost: &mut f32| -> FunctionCallResult { factory.psi(u, xi, cost) }, + |u: &[f32], xi: &[f32], grad: &mut [f32]| -> FunctionCallResult { + factory.d_psi(u, xi, grad) + }, + Some(mocks::mapping_f1_affine::), + Some(mapping_f2_generic::), + n1, + n2, + ); + + let mut alm_optimizer = AlmOptimizer::new(&mut alm_cache, alm_problem) + .with_delta_tolerance(1e-3_f32) + .with_epsilon_tolerance(1e-4_f32) + .with_initial_inner_tolerance(1e-3_f32); + + let mut u = vec![0.0_f32; nx]; + let solver_result = alm_optimizer.solve(&mut u); + assert!(solver_result.is_ok()); + let r = solver_result.unwrap(); + assert_eq!(ExitStatus::Converged, r.exit_status()); + assert!(r.num_outer_iterations() > 0 && r.num_outer_iterations() <= 10); + assert!(r.last_problem_norm_fpr() < tolerance); + + let mut f1u = vec![0.0_f32; n1]; + assert!(mocks::mapping_f1_affine(&u, &mut f1u).is_ok()); + let mut f1_proj = f1u.clone(); + Ball2::new(None, 1.0_f32).project(&mut f1_proj); + assert!((f1u[0] - f1_proj[0]).abs() < 2e-3_f32); + assert!((f1u[1] - f1_proj[1]).abs() < 2e-3_f32); + + let mut f2u = vec![0.0_f32; n2]; + assert!(mapping_f2_generic(&u, &mut f2u).is_ok()); + assert!(crate::matrix_operations::norm2(&f2u) < 1e-3_f32); +} + // Trait alias (type aliases are not stable yet, so the alternative is to use // the following trait definition, i.e., to "extend" Fn and implement it) // See https://bit.ly/2zJvd6g diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index a281b4d6..71e86e01 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -1135,6 +1135,31 @@ fn t_affine_space() { ); } +#[test] +fn t_affine_space_f32() { + let a = vec![ + 0.5_f32, 0.1, 0.2, -0.3, -0.6, 0.3, 0.0, 0.5, 1.0, 0.1, -1.0, -0.4, + ]; + let b = vec![1.0_f32, 2.0, -0.5]; + let affine_set = AffineSpace::new(a.clone(), b.clone()); + let mut x = [1.0_f32, -2.0, -0.3, 0.5]; + affine_set.project(&mut x); + + let x_correct = [1.888_564_3_f32, 5.629_857_f32, 1.796_204_9_f32, 2.888_363_f32]; + assert!((x[0] - x_correct[0]).abs() < 1e-4_f32); + assert!((x[1] - x_correct[1]).abs() < 1e-4_f32); + assert!((x[2] - x_correct[2]).abs() < 1e-4_f32); + assert!((x[3] - x_correct[3]).abs() < 1e-4_f32); + + for (row, bi) in a.chunks_exact(4).zip(b.iter()) { + let ax_i = row + .iter() + .zip(x.iter()) + .fold(0.0_f32, |sum, (aij, xj)| sum + (*aij) * (*xj)); + assert!((ax_i - *bi).abs() < 1e-4_f32); + } +} + #[test] fn t_affine_space_larger() { let a = vec![ @@ -1364,3 +1389,26 @@ fn t_ballp_at_xc_projection() { "wrong projection on lp-ball centered at xc != 0", ); } + +#[test] +fn t_ballp_at_xc_projection_f32() { + let radius = 0.8_f32; + let mut x = [0.0_f32, 0.1]; + let x_center = [1.0_f32, 3.0]; + let p = 4.0_f32; + let tol = 1e-6_f32; + let max_iters: usize = 200; + let ball = BallP::new(Some(&x_center), radius, p, tol, max_iters); + ball.project(&mut x); + + let nrm = x + .iter() + .zip(x_center.iter()) + .fold(0.0_f32, |s, (xi, yi)| s + (*xi - *yi).abs().powf(p)) + .powf(1.0_f32 / p); + assert!((radius - nrm).abs() < 1e-4_f32); + + let proj_expected = [0.517_872_75_f32, 2.227_798_2_f32]; + assert!((x[0] - proj_expected[0]).abs() < 1e-4_f32); + assert!((x[1] - proj_expected[1]).abs() < 1e-4_f32); +} diff --git a/src/core/fbs/tests.rs b/src/core/fbs/tests.rs index bc83b8f5..e3ed7a31 100644 --- a/src/core/fbs/tests.rs +++ b/src/core/fbs/tests.rs @@ -9,6 +9,13 @@ const N_DIM: usize = 2; #[cfg(test)] use crate::mocks; +fn solve_with_optimizer_trait( + optimizer: &mut impl crate::core::Optimizer, + u: &mut [f32], +) -> Result, crate::SolverError> { + optimizer.solve(u) +} + #[test] fn t_solve_fbs_hard() { let bounds = constraints::NoConstraints::new(); @@ -206,3 +213,32 @@ fn t_solve_fbs_f32() { assert!((u[0] - expected[0]).abs() < 1e-4); assert!((u[1] - expected[1]).abs() < 1e-4); } + +#[test] +fn t_solve_fbs_f32_via_optimizer_trait() { + let radius = 0.2_f32; + let box_constraints = constraints::Ball2::new(None, radius); + let problem = Problem::new( + &box_constraints, + |u: &[f32], grad: &mut [f32]| -> FunctionCallResult { + grad[0] = u[0] + u[1] + 1.0; + grad[1] = u[0] + 2.0 * u[1] - 1.0; + Ok(()) + }, + |u: &[f32], cost: &mut f32| -> FunctionCallResult { + *cost = u[0] * u[0] + 2.0 * u[1] * u[1] + u[0] - u[1] + 3.0; + Ok(()) + }, + ); + let gamma = 0.1_f32; + let tolerance = 1e-6_f32; + + let mut fbs_cache = FBSCache::::new(NonZeroUsize::new(N_DIM).unwrap(), gamma, tolerance); + let mut u = [0.0_f32; N_DIM]; + let mut optimizer = FBSOptimizer::new(problem, &mut fbs_cache); + + let status = solve_with_optimizer_trait(&mut optimizer, &mut u).unwrap(); + + assert!(status.has_converged()); + assert!(status.norm_fpr() < tolerance); +} From 9482b22e89d071a2980450f300b81677de39eefa Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 19:15:40 +0000 Subject: [PATCH 019/133] fix clippy issues --- src/alm/alm_factory.rs | 12 ++++++------ src/alm/alm_optimizer.rs | 30 ++++++++++++++++++------------ src/cholesky_factorizer.rs | 7 +------ src/constraints/tests.rs | 26 +++++++++++++------------- 4 files changed, 38 insertions(+), 37 deletions(-) diff --git a/src/alm/alm_factory.rs b/src/alm/alm_factory.rs index 2a1085ae..bc94122e 100644 --- a/src/alm/alm_factory.rs +++ b/src/alm/alm_factory.rs @@ -260,12 +260,12 @@ where f1_u_plus_y_over_c .iter_mut() .zip(y_lagrange_mult.iter()) - .for_each(|(ti, yi)| *ti = *ti + *yi / penalty_scale); + .for_each(|(ti, yi)| *ti += *yi / penalty_scale); s.copy_from_slice(&f1_u_plus_y_over_c); set_c.project(&mut s); let dist_sq: T = matrix_operations::norm2_squared_diff(&f1_u_plus_y_over_c, &s); let scaling: T = half::() * penalty_parameter; - *cost = *cost + scaling * dist_sq; + *cost += scaling * dist_sq; } if let Some(f2) = &self.mapping_f2 { let c = xi[0]; @@ -273,7 +273,7 @@ where f2(u, &mut z)?; let norm_sq: T = matrix_operations::norm2_squared(&z); let scaling: T = half::() * c; - *cost = *cost + scaling * norm_sq; + *cost += scaling * norm_sq; } Ok(()) } @@ -325,7 +325,7 @@ where f1_u_plus_y_over_c .iter_mut() .zip(y_lagrange_mult.iter()) - .for_each(|(ti, yi)| *ti = *ti + *yi / c_penalty_parameter); + .for_each(|(ti, yi)| *ti += *yi / c_penalty_parameter); s_aux_var.copy_from_slice(&f1_u_plus_y_over_c); // s = t set_c.project(&mut s_aux_var); // s = Proj_C(F1(u) + y/c) @@ -340,7 +340,7 @@ where // grad += c*t grad.iter_mut() .zip(jac_prod.iter()) - .for_each(|(gradi, jac_prodi)| *gradi = *gradi + c_penalty_parameter * *jac_prodi); + .for_each(|(gradi, jac_prodi)| *gradi += c_penalty_parameter * *jac_prodi); } // Compute second part: JF2(u)'*F2(u) @@ -354,7 +354,7 @@ where // grad += c * jf2u_times_f2u_aux grad.iter_mut().zip(jf2u_times_f2u_aux.iter()).for_each( - |(gradi, jf2u_times_f2u_aux_i)| *gradi = *gradi + c * *jf2u_times_f2u_aux_i, + |(gradi, jf2u_times_f2u_aux_i)| *gradi += c * *jf2u_times_f2u_aux_i, ); } Ok(()) diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index e028be61..8936eeb1 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -1027,21 +1027,27 @@ mod tests { FunctionCallResult, }; + type DummyParametricGradient = fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult; + type DummyParametricCost = fn(&[f64], &[f64], &mut f64) -> FunctionCallResult; + type DummyMapping = MappingType; + type DummyConstraint = Ball2<'static>; + type DummyAlmProblem = AlmProblem< + DummyMapping, + DummyMapping, + DummyParametricGradient, + DummyParametricCost, + DummyConstraint, + DummyConstraint, + DummyConstraint, + >; + fn make_dummy_alm_problem( n1: usize, n2: usize, - ) -> AlmProblem< - impl Fn(&[f64], &mut [f64]) -> FunctionCallResult, - impl Fn(&[f64], &mut [f64]) -> FunctionCallResult, - impl Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, - impl Fn(&[f64], &[f64], &mut f64) -> FunctionCallResult, - impl Constraint, - impl Constraint, - impl Constraint, - > { + ) -> DummyAlmProblem { // Main problem data - let psi = void_parameteric_cost; - let d_psi = void_parameteric_gradient; + let psi: DummyParametricCost = void_parameteric_cost; + let d_psi: DummyParametricGradient = void_parameteric_gradient; let bounds = Ball2::new(None, 10.0); // ALM-type data let f1: Option = if n1 == 0 { @@ -1085,7 +1091,7 @@ mod tests { // Test: with_initial_penalty let alm_optimizer = alm_optimizer.with_initial_penalty(7.0); - assert!(!alm_optimizer.alm_cache.xi.is_none()); + assert!(alm_optimizer.alm_cache.xi.is_some()); if let Some(xi) = &alm_optimizer.alm_cache.xi { unit_test_utils::assert_nearly_equal( 7.0, diff --git a/src/cholesky_factorizer.rs b/src/cholesky_factorizer.rs index 78c91757..963cef00 100644 --- a/src/cholesky_factorizer.rs +++ b/src/cholesky_factorizer.rs @@ -241,12 +241,7 @@ mod tests { let _ = factorizer.factorize(&a); assert!(3 == factorizer.dimension(), "wrong dimension"); let expected_l = [2.0, 0.0, 0.0, 6.0, 1.0, 0.0, -8.0, 5.0, 3.0]; - unit_test_utils::nearly_equal_array( - &expected_l, - &factorizer.cholesky_factor(), - 1e-10, - 1e-12, - ); + unit_test_utils::nearly_equal_array(&expected_l, factorizer.cholesky_factor(), 1e-10, 1e-12); } #[test] diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index 71e86e01..c5de49c8 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -177,8 +177,8 @@ fn t_finite_set_project_wrong_dimension() { #[test] fn t_rectangle_bounded() { - let xmin = vec![2.0; 5]; - let xmax = vec![4.5; 5]; + let xmin = [2.0; 5]; + let xmax = [4.5; 5]; let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); let mut x = [1.0, 2.0, 3.0, 4.0, 5.0]; @@ -195,8 +195,8 @@ fn t_rectangle_bounded() { #[test] fn t_rectangle_bounded_f32() { - let xmin = vec![2.0_f32; 3]; - let xmax = vec![4.5_f32; 3]; + let xmin = [2.0_f32; 3]; + let xmax = [4.5_f32; 3]; let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); let mut x = [1.0_f32, 3.0, 5.0]; @@ -226,8 +226,8 @@ fn t_rectangle_infinite_bounds() { #[test] #[should_panic] fn t_rectangle_incompatible_dims() { - let xmin = vec![1.0; 5]; - let xmax = vec![2.0; 4]; + let xmin = [1.0; 5]; + let xmax = [2.0; 4]; let _rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); } @@ -259,7 +259,7 @@ fn t_rectangle_bounded_negative_entries() { #[test] fn t_rectangle_only_xmin() { - let xmin = vec![2.0; 5]; + let xmin = [2.0; 5]; let rectangle = Rectangle::new(Some(&xmin[..]), None); let mut x = [1.0, 2.0, 3.0, 4.0, 5.0]; @@ -276,7 +276,7 @@ fn t_rectangle_only_xmin() { #[test] fn t_rectangle_only_xmax() { - let xmax = vec![-3.0; 5]; + let xmax = [-3.0; 5]; let rectangle = Rectangle::new(None, Some(&xmax[..])); let mut x = [-10.0, -20.0, 0.0, 5.0, 3.0]; @@ -1059,7 +1059,7 @@ fn t_ball1_alpha_negative() { fn t_epigraph_squared_norm_inside() { let epi = EpigraphSquaredNorm::new(); let mut x = [1., 2., 10.]; - let x_correct = x.clone(); + let x_correct = x; epi.project(&mut x); unit_test_utils::assert_nearly_equal_array( &x_correct, @@ -1088,7 +1088,7 @@ fn t_epigraph_squared_norm_correctness() { let mut x = [1., 2., 3., 4.]; let x_correct = [ 0.560142228903570, - 1.120284457807140, + 1.120_284_457_807_14, 1.680426686710711, 4.392630432414829, ]; @@ -1123,7 +1123,7 @@ fn t_affine_space() { let x_correct = [ 1.888564346697095, 5.629857182200888, - 1.796204902230790, + 1.796_204_902_230_79, 2.888362906715977, ]; unit_test_utils::assert_nearly_equal_array( @@ -1334,7 +1334,7 @@ fn is_norm_p_projection( fn t_ballp_at_origin_projection() { let radius = 0.8; let mut x = [1.0, -1.0, 6.0]; - let x0 = x.clone(); + let x0 = x; let p = 3.; let tol = 1e-16; let max_iters: usize = 200; @@ -1347,7 +1347,7 @@ fn t_ballp_at_origin_projection() { fn t_ballp_at_origin_x_already_inside() { let radius = 1.5; let mut x = [0.5, -0.2, 0.1]; - let x0 = x.clone(); + let x0 = x; let p = 3.; let tol = 1e-16; let max_iters: usize = 1200; From 9bcc61f8e8e53f3a95fd3084155d81efd1370c04 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Mon, 23 Mar 2026 19:18:00 +0000 Subject: [PATCH 020/133] cargo fmt --- src/alm/alm_factory.rs | 6 +++--- src/alm/alm_optimizer.rs | 5 +---- src/cholesky_factorizer.rs | 7 ++++++- src/constraints/tests.rs | 7 ++++++- 4 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/alm/alm_factory.rs b/src/alm/alm_factory.rs index bc94122e..e118e90e 100644 --- a/src/alm/alm_factory.rs +++ b/src/alm/alm_factory.rs @@ -353,9 +353,9 @@ where // = JF2(u)'*F2(u) // grad += c * jf2u_times_f2u_aux - grad.iter_mut().zip(jf2u_times_f2u_aux.iter()).for_each( - |(gradi, jf2u_times_f2u_aux_i)| *gradi += c * *jf2u_times_f2u_aux_i, - ); + grad.iter_mut() + .zip(jf2u_times_f2u_aux.iter()) + .for_each(|(gradi, jf2u_times_f2u_aux_i)| *gradi += c * *jf2u_times_f2u_aux_i); } Ok(()) } diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index 8936eeb1..93ac5dcf 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -1041,10 +1041,7 @@ mod tests { DummyConstraint, >; - fn make_dummy_alm_problem( - n1: usize, - n2: usize, - ) -> DummyAlmProblem { + fn make_dummy_alm_problem(n1: usize, n2: usize) -> DummyAlmProblem { // Main problem data let psi: DummyParametricCost = void_parameteric_cost; let d_psi: DummyParametricGradient = void_parameteric_gradient; diff --git a/src/cholesky_factorizer.rs b/src/cholesky_factorizer.rs index 963cef00..0732e3fe 100644 --- a/src/cholesky_factorizer.rs +++ b/src/cholesky_factorizer.rs @@ -241,7 +241,12 @@ mod tests { let _ = factorizer.factorize(&a); assert!(3 == factorizer.dimension(), "wrong dimension"); let expected_l = [2.0, 0.0, 0.0, 6.0, 1.0, 0.0, -8.0, 5.0, 3.0]; - unit_test_utils::nearly_equal_array(&expected_l, factorizer.cholesky_factor(), 1e-10, 1e-12); + unit_test_utils::nearly_equal_array( + &expected_l, + factorizer.cholesky_factor(), + 1e-10, + 1e-12, + ); } #[test] diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index c5de49c8..c20bfea4 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -1145,7 +1145,12 @@ fn t_affine_space_f32() { let mut x = [1.0_f32, -2.0, -0.3, 0.5]; affine_set.project(&mut x); - let x_correct = [1.888_564_3_f32, 5.629_857_f32, 1.796_204_9_f32, 2.888_363_f32]; + let x_correct = [ + 1.888_564_3_f32, + 5.629_857_f32, + 1.796_204_9_f32, + 2.888_363_f32, + ]; assert!((x[0] - x_correct[0]).abs() < 1e-4_f32); assert!((x[1] - x_correct[1]).abs() < 1e-4_f32); assert!((x[2] - x_correct[2]).abs() < 1e-4_f32); From 2b8f72e3f70bdab793eae9caf9a37962d0b5af9b Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 11:18:08 +0000 Subject: [PATCH 021/133] [ci skip] website docs --- docs/openrust-arithmetic.mdx | 215 +++++++++++++++++++++++++++++++++++ website/sidebars.js | 2 +- 2 files changed, 216 insertions(+), 1 deletion(-) create mode 100644 docs/openrust-arithmetic.mdx diff --git a/docs/openrust-arithmetic.mdx b/docs/openrust-arithmetic.mdx new file mode 100644 index 00000000..eab443cc --- /dev/null +++ b/docs/openrust-arithmetic.mdx @@ -0,0 +1,215 @@ +--- +id: openrust-arithmetic +title: Single and double precision +description: OpEn with f32 and f64 number types +--- + +:::note Info +The functionality presented here was introduced in OpEn version [`0.12.0`](https://pypi.org/project/opengen/#history). +The new API is fully backward-compatible with previous versions of OpEn. +::: + +## Overview + +OpEn's Rust API supports both `f64` and `f32`. + +Most public Rust types are generic over a scalar type `T` with `T: num::Float`, and in most places the default type is `f64`. This means: + +- if you do nothing special, you will usually get `f64` +- if you want single precision, you can explicitly use `f32` +- all quantities involved in one solver instance should use the same scalar type + +In particular, this applies to: + +- cost and gradient functions +- constraints +- `Problem` +- caches such as `PANOCCache`, `FBSCache`, and `AlmCache` +- optimizers such as `PANOCOptimizer`, `FBSOptimizer`, and `AlmOptimizer` +- solver status types such as `SolverStatus` and `AlmOptimizerStatus` + +## When to use `f64` and when to use `f32` + +### `f64` + +Use `f64` when you want maximum numerical robustness and accuracy. This is the safest default for: + +- desktop applications +- difficult nonlinear problems +- problems with tight tolerances +- problems that are sensitive to conditioning + +### `f32` + +Use `f32` when memory footprint and throughput matter more than ultimate accuracy. This is often useful for: + +- embedded applications +- high-rate MPC loops +- applications where moderate tolerances are acceptable + +In general, `f32` may require: + +- slightly looser tolerances +- more careful scaling of the problem +- fewer expectations about extremely small residuals + +## The default: `f64` + +If your functions, constants, and vectors use `f64`, you can often omit the scalar type completely. + +```rust +use optimization_engine::{constraints, panoc::PANOCCache, Problem, SolverError}; +use optimization_engine::panoc::PANOCOptimizer; + +let tolerance = 1e-6; +let lbfgs_memory = 10; +let radius = 1.0; + +let bounds = constraints::Ball2::new(None, radius); + +let df = |u: &[f64], grad: &mut [f64]| -> Result<(), SolverError> { + grad[0] = u[0] + u[1] + 1.0; + grad[1] = u[0] + 2.0 * u[1] - 1.0; + Ok(()) +}; + +let f = |u: &[f64], cost: &mut f64| -> Result<(), SolverError> { + *cost = 0.5 * (u[0] * u[0] + u[1] * u[1]); + Ok(()) +}; + +let problem = Problem::new(&bounds, df, f); +let mut cache = PANOCCache::new(2, tolerance, lbfgs_memory); +let mut optimizer = PANOCOptimizer::new(problem, &mut cache); + +let mut u = [0.0, 0.0]; +let status = optimizer.solve(&mut u).unwrap(); +assert!(status.has_converged()); +``` + +Because all literals and function signatures above are `f64`, the compiler infers `T = f64`. + +## Using `f32` + +To use single precision, make the scalar type explicit throughout the problem definition. + +```rust +use optimization_engine::{constraints, panoc::PANOCCache, Problem, SolverError}; +use optimization_engine::panoc::PANOCOptimizer; + +let tolerance = 1e-4_f32; +let lbfgs_memory = 10; +let radius = 1.0_f32; + +let bounds = constraints::Ball2::new(None, radius); + +let df = |u: &[f32], grad: &mut [f32]| -> Result<(), SolverError> { + grad[0] = u[0] + u[1] + 1.0_f32; + grad[1] = u[0] + 2.0_f32 * u[1] - 1.0_f32; + Ok(()) +}; + +let f = |u: &[f32], cost: &mut f32| -> Result<(), SolverError> { + *cost = 0.5_f32 * (u[0] * u[0] + u[1] * u[1]); + Ok(()) +}; + +let problem = Problem::new(&bounds, df, f); +let mut cache = PANOCCache::::new(2, tolerance, lbfgs_memory); +let mut optimizer = PANOCOptimizer::new(problem, &mut cache); + +let mut u = [0.0_f32, 0.0_f32]; +let status = optimizer.solve(&mut u).unwrap(); +assert!(status.has_converged()); +``` + +The key idea is that the same scalar type must be used consistently in: + +- the initial guess `u` +- the closures for the cost and gradient +- the constraints +- the cache +- any tolerances and numerical constants + +## Example with FBS + +The same pattern applies to other solvers. + +```rust +use optimization_engine::{constraints, Problem, SolverError}; +use optimization_engine::fbs::{FBSCache, FBSOptimizer}; +use std::num::NonZeroUsize; + +let bounds = constraints::Ball2::new(None, 0.2_f32); + +let df = |u: &[f32], grad: &mut [f32]| -> Result<(), SolverError> { + grad[0] = u[0] + u[1] + 1.0_f32; + grad[1] = u[0] + 2.0_f32 * u[1] - 1.0_f32; + Ok(()) +}; + +let f = |u: &[f32], cost: &mut f32| -> Result<(), SolverError> { + *cost = u[0] * u[0] + 2.0_f32 * u[1] * u[1] + u[0] - u[1] + 3.0_f32; + Ok(()) +}; + +let problem = Problem::new(&bounds, df, f); +let mut cache = FBSCache::::new(NonZeroUsize::new(2).unwrap(), 0.1_f32, 1e-6_f32); +let mut optimizer = FBSOptimizer::new(problem, &mut cache); + +let mut u = [0.0_f32, 0.0_f32]; +let status = optimizer.solve(&mut u).unwrap(); +assert!(status.has_converged()); +``` + +## Example with ALM + +ALM also supports both precisions. As with PANOC and FBS, the scalar type should be chosen once and then used consistently throughout the ALM problem, cache, mappings, and tolerances. + +For example, if you use: + +- `AlmCache::` +- `PANOCCache::` +- `Ball2::` +- closures of type `|u: &[f32], ...|` + +then the whole ALM solve runs in single precision. + +If instead you use plain `f64` literals and `&[f64]` closures, the solver runs in double precision. + +## Type inference tips + +Rust usually infers the scalar type correctly, but explicit annotations are often helpful for `f32`. + +Good ways to make `f32` intent clear are: + +- suffix literals, for example `1.0_f32` and `1e-4_f32` +- annotate vectors and arrays, for example `let mut u = [0.0_f32; 2];` +- annotate caches explicitly, for example `PANOCCache::::new(...)` +- annotate closure arguments, for example `|u: &[f32], grad: &mut [f32]|` + +## Important rule: do not mix `f32` and `f64` + +The following combinations are problematic: + +- `u: &[f32]` with a cost function writing to `&mut f64` +- `Ball2::new(None, 1.0_f64)` together with `PANOCCache::` +- `tolerance = 1e-6` in one place and `1e-6_f32` elsewhere if inference becomes ambiguous + +Choose one scalar type per optimization problem and use it everywhere. + +## Choosing tolerances + +When moving from `f64` to `f32`, it is often a good idea to relax tolerances. + +Typical starting points are: + +- `f64`: `1e-6`, `1e-8`, or smaller if needed +- `f32`: `1e-4` or `1e-5` + +The right choice depends on: + +- scaling of the problem +- conditioning +- solver settings +- whether the problem is solved repeatedly in real time diff --git a/website/sidebars.js b/website/sidebars.js index 94729cf6..752f7944 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -26,7 +26,7 @@ module.exports = { { type: 'category', label: 'Rust', - items: ['openrust-basic', 'openrust-alm', 'openrust-features'], + items: ['openrust-basic', 'openrust-alm', 'openrust-features', 'openrust-arithmetic'], }, { type: 'category', From e87c3cdaeeb99002ab6be1c5c5eb956279f7a249 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 12:32:45 +0000 Subject: [PATCH 022/133] ROS2 support - first implementation of ROS2 pkg generation - basic unit tests (it works) --- .../opengen/builder/optimizer_builder.py | 9 +- open-codegen/opengen/builder/ros_builder.py | 321 ++++++++++-------- open-codegen/opengen/config/build_config.py | 26 ++ open-codegen/opengen/config/ros_config.py | 11 +- .../opengen/templates/ros/open_optimizer.cpp | 3 +- .../opengen/templates/ros2/CMakeLists.txt | 55 +++ .../templates/ros2/OptimizationParameters.msg | 4 + .../templates/ros2/OptimizationResult.msg | 18 + open-codegen/opengen/templates/ros2/README.md | 126 +++++++ .../opengen/templates/ros2/open_optimizer.cpp | 181 ++++++++++ .../opengen/templates/ros2/open_optimizer.hpp | 40 +++ .../templates/ros2/open_optimizer.launch.py | 20 ++ .../opengen/templates/ros2/open_params.yaml | 5 + .../opengen/templates/ros2/package.xml | 24 ++ open-codegen/test/test.py | 41 +++ 15 files changed, 727 insertions(+), 157 deletions(-) create mode 100644 open-codegen/opengen/templates/ros2/CMakeLists.txt create mode 100644 open-codegen/opengen/templates/ros2/OptimizationParameters.msg create mode 100644 open-codegen/opengen/templates/ros2/OptimizationResult.msg create mode 100644 open-codegen/opengen/templates/ros2/README.md create mode 100644 open-codegen/opengen/templates/ros2/open_optimizer.cpp create mode 100644 open-codegen/opengen/templates/ros2/open_optimizer.hpp create mode 100644 open-codegen/opengen/templates/ros2/open_optimizer.launch.py create mode 100644 open-codegen/opengen/templates/ros2/open_params.yaml create mode 100644 open-codegen/opengen/templates/ros2/package.xml diff --git a/open-codegen/opengen/builder/optimizer_builder.py b/open-codegen/opengen/builder/optimizer_builder.py index 316f5422..51500422 100644 --- a/open-codegen/opengen/builder/optimizer_builder.py +++ b/open-codegen/opengen/builder/optimizer_builder.py @@ -12,7 +12,7 @@ import sys from importlib.metadata import version -from .ros_builder import RosBuilder +from .ros_builder import ROS2Builder, RosBuilder _AUTOGEN_COST_FNAME = 'auto_casadi_cost.c' _AUTOGEN_GRAD_FNAME = 'auto_casadi_grad.c' @@ -920,4 +920,11 @@ def build(self): self.__solver_config) ros_builder.build() + if self.__build_config.ros2_config is not None: + ros2_builder = ROS2Builder( + self.__meta, + self.__build_config, + self.__solver_config) + ros2_builder.build() + return self.__info() diff --git a/open-codegen/opengen/builder/ros_builder.py b/open-codegen/opengen/builder/ros_builder.py index 0108a8a5..0b6c537c 100644 --- a/open-codegen/opengen/builder/ros_builder.py +++ b/open-codegen/opengen/builder/ros_builder.py @@ -1,12 +1,11 @@ import opengen.definitions as og_dfn -import os +import datetime import logging -import jinja2 +import os import shutil -import datetime -_ROS_PREFIX = 'ros_node_' +import jinja2 def make_dir_if_not_exists(directory): @@ -14,209 +13,231 @@ def make_dir_if_not_exists(directory): os.makedirs(directory) -def get_template(name): - file_loader = jinja2.FileSystemLoader(og_dfn.templates_dir()) - env = jinja2.Environment(loader=file_loader, autoescape=True) - return env.get_template(name) - - -def get_ros_template(name): - file_loader = jinja2.FileSystemLoader(og_dfn.templates_subdir('ros')) +def get_ros_template(template_subdir, name): + file_loader = jinja2.FileSystemLoader(og_dfn.templates_subdir(template_subdir)) env = jinja2.Environment(loader=file_loader, autoescape=True) return env.get_template(name) -class RosBuilder: +class _BaseRosBuilder: """ - Code generation for ROS-related files + Shared code generation for ROS-related packages For internal use """ + _template_subdir = None + _logger_name = None + _logger_tag = None + _launch_file_name = None + def __init__(self, meta, build_config, solver_config): - self.__meta = meta - self.__build_config = build_config - self.__solver_config = solver_config - self.__logger = logging.getLogger('opengen.builder.RosBuilder') + self._meta = meta + self._build_config = build_config + self._solver_config = solver_config + self._logger = logging.getLogger(self._logger_name) stream_handler = logging.StreamHandler() stream_handler.setLevel(1) - c_format = logging.Formatter('[%(levelname)s] <> %(message)s') + c_format = logging.Formatter( + f'[%(levelname)s] <<{self._logger_tag}>> %(message)s') stream_handler.setFormatter(c_format) - self.__logger.setLevel(1) - self.__logger.addHandler(stream_handler) + self._logger.setLevel(1) + self._logger.handlers.clear() + self._logger.addHandler(stream_handler) + self._logger.propagate = False + + @property + def _ros_config(self): + raise NotImplementedError - def __target_dir(self): + def _template(self, name): + return get_ros_template(self._template_subdir, name) + + def _target_dir(self): return os.path.abspath( os.path.join( - self.__build_config.build_dir, - self.__meta.optimizer_name)) + self._build_config.build_dir, + self._meta.optimizer_name)) - def __ros_target_dir(self): - ros_config = self.__build_config.ros_config - ros_target_dir_name = ros_config.package_name + def _ros_target_dir(self): return os.path.abspath( os.path.join( - self.__build_config.build_dir, - self.__meta.optimizer_name, ros_target_dir_name)) + self._build_config.build_dir, + self._meta.optimizer_name, + self._ros_config.package_name)) - def __generate_ros_dir_structure(self): - self.__logger.info("Generating directory structure") - target_ros_dir = self.__ros_target_dir() + def _generate_ros_dir_structure(self): + self._logger.info("Generating directory structure") + target_ros_dir = self._ros_target_dir() make_dir_if_not_exists(target_ros_dir) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'include'))) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'extern_lib'))) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'src'))) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'msg'))) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'config'))) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'launch'))) - - def __generate_ros_package_xml(self): - self.__logger.info("Generating package.xml") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('package.xml') - output_template = template.render( - meta=self.__meta, ros=self.__build_config.ros_config) + for directory_name in ('include', 'extern_lib', 'src', 'msg', 'config', 'launch'): + make_dir_if_not_exists(os.path.abspath( + os.path.join(target_ros_dir, directory_name))) + + def _generate_ros_package_xml(self): + self._logger.info("Generating package.xml") + target_ros_dir = self._ros_target_dir() + template = self._template('package.xml') + output_template = template.render(meta=self._meta, ros=self._ros_config) target_rospkg_path = os.path.join(target_ros_dir, "package.xml") with open(target_rospkg_path, "w") as fh: fh.write(output_template) - def __generate_ros_cmakelists(self): - self.__logger.info("Generating CMakeLists") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('CMakeLists.txt') - output_template = template.render(meta=self.__meta, - ros=self.__build_config.ros_config) + def _generate_ros_cmakelists(self): + self._logger.info("Generating CMakeLists") + target_ros_dir = self._ros_target_dir() + template = self._template('CMakeLists.txt') + output_template = template.render(meta=self._meta, ros=self._ros_config) target_rospkg_path = os.path.join(target_ros_dir, "CMakeLists.txt") with open(target_rospkg_path, "w") as fh: fh.write(output_template) - def __copy__ros_files(self): - self.__logger.info("Copying external dependencies") - # 1. --- copy header file - target_ros_dir = self.__ros_target_dir() - header_file_name = self.__meta.optimizer_name + '_bindings.hpp' + def _copy_ros_files(self): + self._logger.info("Copying external dependencies") + target_ros_dir = self._ros_target_dir() + + header_file_name = self._meta.optimizer_name + '_bindings.hpp' target_include_filename = os.path.abspath( - os.path.join( - target_ros_dir, 'include', header_file_name)) + os.path.join(target_ros_dir, 'include', header_file_name)) original_include_file = os.path.abspath( - os.path.join(self.__target_dir(), header_file_name)) + os.path.join(self._target_dir(), header_file_name)) shutil.copyfile(original_include_file, target_include_filename) - # 2. --- copy library file - lib_file_name = 'lib' + self.__meta.optimizer_name + '.a' - target_lib_file_name = \ - os.path.abspath( - os.path.join( - target_ros_dir, 'extern_lib', lib_file_name)) + lib_file_name = 'lib' + self._meta.optimizer_name + '.a' + target_lib_file_name = os.path.abspath( + os.path.join(target_ros_dir, 'extern_lib', lib_file_name)) original_lib_file = os.path.abspath( os.path.join( - self.__target_dir(), + self._target_dir(), 'target', - self.__build_config.build_mode, + self._build_config.build_mode, lib_file_name)) shutil.copyfile(original_lib_file, target_lib_file_name) - # 3. --- copy msg file OptimizationParameters.msg - original_params_msg = os.path.abspath( - os.path.join( - og_dfn.templates_dir(), 'ros', 'OptimizationParameters.msg')) - target_params_msg = \ - os.path.abspath( - os.path.join( - target_ros_dir, 'msg', 'OptimizationParameters.msg')) - shutil.copyfile(original_params_msg, target_params_msg) - - # 4. --- copy msg file OptimizationResult.msg - original_result_msg = os.path.abspath( - os.path.join( - og_dfn.templates_dir(), 'ros', 'OptimizationResult.msg')) - target_result_msg = \ - os.path.abspath( + for message_name in ('OptimizationParameters.msg', 'OptimizationResult.msg'): + original_message = os.path.abspath( os.path.join( - target_ros_dir, 'msg', 'OptimizationResult.msg')) - shutil.copyfile(original_result_msg, target_result_msg) - - def __generate_ros_params_file(self): - self.__logger.info("Generating open_params.yaml") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('open_params.yaml') - output_template = template.render(meta=self.__meta, - ros=self.__build_config.ros_config) - target_yaml_fname \ - = os.path.join(target_ros_dir, "config", "open_params.yaml") + og_dfn.templates_dir(), + self._template_subdir, + message_name)) + target_message = os.path.abspath( + os.path.join(target_ros_dir, 'msg', message_name)) + shutil.copyfile(original_message, target_message) + + def _generate_ros_params_file(self): + self._logger.info("Generating open_params.yaml") + target_ros_dir = self._ros_target_dir() + template = self._template('open_params.yaml') + output_template = template.render(meta=self._meta, ros=self._ros_config) + target_yaml_fname = os.path.join(target_ros_dir, "config", "open_params.yaml") with open(target_yaml_fname, "w") as fh: fh.write(output_template) - def __generate_ros_node_header(self): - self.__logger.info("Generating open_optimizer.hpp") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('open_optimizer.hpp') - output_template = template.render(meta=self.__meta, - ros=self.__build_config.ros_config, - solver_config=self.__solver_config) - target_rosnode_header_path \ - = os.path.join(target_ros_dir, "include", "open_optimizer.hpp") + def _generate_ros_node_header(self): + self._logger.info("Generating open_optimizer.hpp") + target_ros_dir = self._ros_target_dir() + template = self._template('open_optimizer.hpp') + output_template = template.render( + meta=self._meta, + ros=self._ros_config, + solver_config=self._solver_config) + target_rosnode_header_path = os.path.join( + target_ros_dir, "include", "open_optimizer.hpp") with open(target_rosnode_header_path, "w") as fh: fh.write(output_template) - def __generate_ros_node_cpp(self): - self.__logger.info("Generating open_optimizer.cpp") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('open_optimizer.cpp') - output_template = template.render(meta=self.__meta, - ros=self.__build_config.ros_config, - timestamp_created=datetime.datetime.now()) - target_rosnode_cpp_path \ - = os.path.join(target_ros_dir, "src", "open_optimizer.cpp") + def _generate_ros_node_cpp(self): + self._logger.info("Generating open_optimizer.cpp") + target_ros_dir = self._ros_target_dir() + template = self._template('open_optimizer.cpp') + output_template = template.render( + meta=self._meta, + ros=self._ros_config, + timestamp_created=datetime.datetime.now()) + target_rosnode_cpp_path = os.path.join(target_ros_dir, "src", "open_optimizer.cpp") with open(target_rosnode_cpp_path, "w") as fh: fh.write(output_template) - def __generate_ros_launch_file(self): - self.__logger.info("Generating open_optimizer.launch") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('open_optimizer.launch') - output_template = template.render(meta=self.__meta, - ros=self.__build_config.ros_config) - target_rosnode_launch_path \ - = os.path.join(target_ros_dir, "launch", "open_optimizer.launch") + def _generate_ros_launch_file(self): + self._logger.info("Generating %s", self._launch_file_name) + target_ros_dir = self._ros_target_dir() + template = self._template(self._launch_file_name) + output_template = template.render(meta=self._meta, ros=self._ros_config) + target_rosnode_launch_path = os.path.join( + target_ros_dir, "launch", self._launch_file_name) with open(target_rosnode_launch_path, "w") as fh: fh.write(output_template) - def __generate_ros_readme_file(self): - self.__logger.info("Generating README.md") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('README.md') - output_template = template.render( - ros=self.__build_config.ros_config) - target_readme_path \ - = os.path.join(target_ros_dir, "README.md") + def _generate_ros_readme_file(self): + self._logger.info("Generating README.md") + target_ros_dir = self._ros_target_dir() + template = self._template('README.md') + output_template = template.render(ros=self._ros_config) + target_readme_path = os.path.join(target_ros_dir, "README.md") with open(target_readme_path, "w") as fh: fh.write(output_template) - def __symbolic_link_info_message(self): - target_ros_dir = self.__ros_target_dir() - self.__logger.info("ROS package was built successfully. Now run:") - self.__logger.info("ln -s %s ~/catkin_ws/src/", target_ros_dir) - self.__logger.info("cd ~/catkin_ws/; catkin_make") + def _symbolic_link_info_message(self): + raise NotImplementedError def build(self): """ Build ROS-related files """ - self.__generate_ros_dir_structure() # generate necessary folders - self.__generate_ros_package_xml() # generate package.xml - self.__generate_ros_cmakelists() # generate CMakeLists.txt - self.__copy__ros_files() # Copy certain files - # # - C++ bindings, library, msg - self.__generate_ros_params_file() # generate params file - self.__generate_ros_node_header() # generate node .hpp file - self.__generate_ros_node_cpp() # generate main node .cpp file - self.__generate_ros_launch_file() # generate launch file - self.__generate_ros_readme_file() # final touch: create README.md - self.__symbolic_link_info_message() # Info: create symbolic link + self._generate_ros_dir_structure() + self._generate_ros_package_xml() + self._generate_ros_cmakelists() + self._copy_ros_files() + self._generate_ros_params_file() + self._generate_ros_node_header() + self._generate_ros_node_cpp() + self._generate_ros_launch_file() + self._generate_ros_readme_file() + self._symbolic_link_info_message() + + +class RosBuilder(_BaseRosBuilder): + """ + Code generation for ROS-related files + + For internal use + """ + + _template_subdir = 'ros' + _logger_name = 'opengen.builder.RosBuilder' + _logger_tag = 'ROS' + _launch_file_name = 'open_optimizer.launch' + + @property + def _ros_config(self): + return self._build_config.ros_config + + def _symbolic_link_info_message(self): + target_ros_dir = self._ros_target_dir() + self._logger.info("ROS package was built successfully. Now run:") + self._logger.info("ln -s %s ~/catkin_ws/src/", target_ros_dir) + self._logger.info("cd ~/catkin_ws/; catkin_make") + + +class ROS2Builder(_BaseRosBuilder): + """ + Code generation for ROS2-related files + + For internal use + """ + + _template_subdir = 'ros2' + _logger_name = 'opengen.builder.ROS2Builder' + _logger_tag = 'ROS2' + _launch_file_name = 'open_optimizer.launch.py' + + @property + def _ros_config(self): + return self._build_config.ros2_config + + def _symbolic_link_info_message(self): + target_ros_dir = self._ros_target_dir() + self._logger.info("ROS2 package was built successfully. Now run:") + self._logger.info("ln -s %s ~/ros2_ws/src/", target_ros_dir) + self._logger.info("cd ~/ros2_ws/; colcon build --packages-select %s", + self._ros_config.package_name) diff --git a/open-codegen/opengen/config/build_config.py b/open-codegen/opengen/config/build_config.py index 939150d1..41d6f64f 100644 --- a/open-codegen/opengen/config/build_config.py +++ b/open-codegen/opengen/config/build_config.py @@ -57,6 +57,7 @@ def __init__(self, build_dir="."): self.__build_c_bindings = False self.__build_python_bindings = False self.__ros_config = None + self.__ros2_config = None self.__tcp_interface_config = None self.__local_path = None self.__allocator = RustAllocator.DefaultAllocator @@ -135,6 +136,14 @@ def ros_config(self) -> RosConfiguration: """ return self.__ros_config + @property + def ros2_config(self) -> RosConfiguration: + """ROS2 package configuration + + :return: instance of RosConfiguration + """ + return self.__ros2_config + @property def allocator(self) -> RustAllocator: """ @@ -257,6 +266,21 @@ def with_ros(self, ros_config: RosConfiguration): """ self.__build_c_bindings = True # no C++ bindings, no ROS package mate self.__ros_config = ros_config + self.__ros2_config = None + return self + + def with_ros2(self, ros_config: RosConfiguration): + """ + Activates the generation of a ROS2 package. The caller must provide an + instance of RosConfiguration + + :param ros_config: Configuration of ROS2 package + + :return: current instance of BuildConfiguration + """ + self.__build_c_bindings = True # no C++ bindings, no ROS package + self.__ros2_config = ros_config + self.__ros_config = None return self def with_tcp_interface_config(self, tcp_interface_config=TcpServerConfiguration()): @@ -300,4 +324,6 @@ def to_dict(self): build_dict["tcp_interface_config"] = self.__tcp_interface_config.to_dict() if self.__ros_config is not None: build_dict["ros_config"] = self.__ros_config.to_dict() + if self.__ros2_config is not None: + build_dict["ros2_config"] = self.__ros2_config.to_dict() return build_dict diff --git a/open-codegen/opengen/config/ros_config.py b/open-codegen/opengen/config/ros_config.py index 206051c1..4e0a6c91 100644 --- a/open-codegen/opengen/config/ros_config.py +++ b/open-codegen/opengen/config/ros_config.py @@ -3,7 +3,7 @@ class RosConfiguration: """ - Configuration of auto-generated ROS package + Configuration of an auto-generated ROS or ROS2 package """ def __init__(self): @@ -61,7 +61,7 @@ def description(self): @property def rate(self): - """ROS node rate in Hz + """ROS/ROS2 node rate in Hz :return: rate, defaults to `10.0` """ @@ -87,7 +87,7 @@ def params_topic_queue_size(self): def with_package_name(self, pkg_name): """ Set the package name, which is the same as the name - of the folder that will store the auto-generated ROS node. + of the folder that will store the auto-generated ROS/ROS2 node. The node name can contain lowercase and uppercase characters and underscores, but not spaces or other symbols @@ -124,6 +124,7 @@ def with_node_name(self, node_name): def with_rate(self, rate): """ Set the rate of the ROS node + or ROS2 node :param rate: rate in Hz :type rate: float @@ -135,7 +136,7 @@ def with_rate(self, rate): def with_description(self, description): """ - Set the description of the ROS package + Set the description of the ROS or ROS2 package :param description: description, defaults to "parametric optimization with OpEn" :type description: string @@ -149,7 +150,7 @@ def with_queue_sizes(self, result_topic_queue_size=100, parameter_topic_queue_size=100): """ - Set queue sizes for ROS node + Set queue sizes for ROS or ROS2 node :param result_topic_queue_size: queue size of results, defaults to 100 :type result_topic_queue_size: int, optional diff --git a/open-codegen/opengen/templates/ros/open_optimizer.cpp b/open-codegen/opengen/templates/ros/open_optimizer.cpp index ad9b4f1b..542fb3c9 100644 --- a/open-codegen/opengen/templates/ros/open_optimizer.cpp +++ b/open-codegen/opengen/templates/ros/open_optimizer.cpp @@ -1,6 +1,7 @@ /** * This is an auto-generated file by Optimization Engine (OpEn) - * OpEn is a free open-source software - see doc.optimization-engine.xyz + * OpEn is a free open-source software - + * see https://alphaville.github.io/optimization-engine * dually licensed under the MIT and Apache v2 licences. * */ diff --git a/open-codegen/opengen/templates/ros2/CMakeLists.txt b/open-codegen/opengen/templates/ros2/CMakeLists.txt new file mode 100644 index 00000000..398512b2 --- /dev/null +++ b/open-codegen/opengen/templates/ros2/CMakeLists.txt @@ -0,0 +1,55 @@ +cmake_minimum_required(VERSION 3.8) +project({{ros.package_name}}) + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +find_package(ament_cmake REQUIRED) +find_package(rclcpp REQUIRED) +find_package(Python3 REQUIRED COMPONENTS Interpreter Development NumPy) +set(Python_EXECUTABLE ${Python3_EXECUTABLE}) +set(Python_INCLUDE_DIRS ${Python3_INCLUDE_DIRS}) +set(Python_LIBRARIES ${Python3_LIBRARIES}) +set(Python_NumPy_INCLUDE_DIRS ${Python3_NumPy_INCLUDE_DIRS}) +find_package(rosidl_default_generators REQUIRED) + +set(msg_files + "msg/OptimizationResult.msg" + "msg/OptimizationParameters.msg" +) + +rosidl_generate_interfaces(${PROJECT_NAME} + ${msg_files} +) + +ament_export_dependencies(rosidl_default_runtime) + +include_directories( + ${PROJECT_SOURCE_DIR}/include +) + +set(NODE_NAME {{ros.node_name}}) +add_executable(${NODE_NAME} src/open_optimizer.cpp) +ament_target_dependencies(${NODE_NAME} rclcpp) +target_link_libraries( + ${NODE_NAME} + ${PROJECT_SOURCE_DIR}/extern_lib/lib{{meta.optimizer_name}}.a + m + dl +) +rosidl_get_typesupport_target(cpp_typesupport_target ${PROJECT_NAME} "rosidl_typesupport_cpp") +target_link_libraries(${NODE_NAME} "${cpp_typesupport_target}") + +install(TARGETS + ${NODE_NAME} + DESTINATION lib/${PROJECT_NAME} +) + +install(DIRECTORY + config + launch + DESTINATION share/${PROJECT_NAME} +) + +ament_package() diff --git a/open-codegen/opengen/templates/ros2/OptimizationParameters.msg b/open-codegen/opengen/templates/ros2/OptimizationParameters.msg new file mode 100644 index 00000000..870d2981 --- /dev/null +++ b/open-codegen/opengen/templates/ros2/OptimizationParameters.msg @@ -0,0 +1,4 @@ +float64[] parameter # parameter p (mandatory) +float64[] initial_guess # u0 (optional/recommended) +float64[] initial_y # y0 (optional) +float64 initial_penalty # initial penalty (optional) diff --git a/open-codegen/opengen/templates/ros2/OptimizationResult.msg b/open-codegen/opengen/templates/ros2/OptimizationResult.msg new file mode 100644 index 00000000..890e0c23 --- /dev/null +++ b/open-codegen/opengen/templates/ros2/OptimizationResult.msg @@ -0,0 +1,18 @@ +# Constants match the enumeration of status codes +uint8 STATUS_CONVERGED=0 +uint8 STATUS_NOT_CONVERGED_ITERATIONS=1 +uint8 STATUS_NOT_CONVERGED_OUT_OF_TIME=2 +uint8 STATUS_NOT_CONVERGED_COST=3 +uint8 STATUS_NOT_CONVERGED_FINITE_COMPUTATION=4 + +float64[] solution # optimizer (solution) +uint8 inner_iterations # number of inner iterations +uint16 outer_iterations # number of outer iterations +uint8 status # status code +float64 cost # cost at solution +float64 norm_fpr # norm of FPR of last inner problem +float64 penalty # penalty value +float64[] lagrange_multipliers # vector of Lagrange multipliers +float64 infeasibility_f1 # infeasibility wrt F1 +float64 infeasibility_f2 # infeasibility wrt F2 +float64 solve_time_ms # solution time in ms diff --git a/open-codegen/opengen/templates/ros2/README.md b/open-codegen/opengen/templates/ros2/README.md new file mode 100644 index 00000000..cb9963a1 --- /dev/null +++ b/open-codegen/opengen/templates/ros2/README.md @@ -0,0 +1,126 @@ +# ROS2 Package {{ros.package_name}} + + +## Installation and Setup + +Move or link the auto-generated ROS2 package (folder `{{ros.package_name}}`) to your workspace source tree (typically `~/ros2_ws/src/`). + +Compile with: + +```console +cd ~/ros2_ws/ +colcon build --packages-select {{ros.package_name}} +source install/setup.bash +``` + +If you build the package in-place from its own directory instead of a larger +workspace, source the generated setup script from `install/`: + +```console +# bash +source install/setup.bash + +# zsh +source install/setup.zsh +``` + +On macOS, ROS2 logging may need an explicit writable directory: + +```console +mkdir -p .ros_log +export ROS_LOG_DIR="$PWD/.ros_log" +``` + + +## Launch and Use + +Start the optimizer in one terminal. The process stays in the foreground while +the node is running. + +```console +# terminal 1 +source install/setup.bash # or: source install/setup.zsh +ros2 run {{ros.package_name}} {{ros.node_name}} +``` + +In a second terminal, source the same environment and verify discovery: + +```console +# terminal 2 +source install/setup.bash # or: source install/setup.zsh +ros2 node list --no-daemon --spin-time 5 +ros2 topic list --no-daemon --spin-time 5 +``` + +You should see the node `/{{ros.node_name}}`, the input topic +`/{{ros.subscriber_subtopic}}`, and the output topic +`/{{ros.publisher_subtopic}}`. + +Then publish a request to the configured parameters topic +(default: `/{{ros.subscriber_subtopic}}`): + +```console +ros2 topic pub --once /{{ros.subscriber_subtopic}} {{ros.package_name}}/msg/OptimizationParameters "{parameter: [YOUR_PARAMETER_VECTOR], initial_guess: [INITIAL_GUESS_OPTIONAL], initial_y: [], initial_penalty: 15.0}" +``` + +The result will be announced on the configured result topic +(default: `/{{ros.publisher_subtopic}}`): + +```console +ros2 topic echo /{{ros.publisher_subtopic}} +``` + +To get the optimal solution you can do: + +```console +ros2 topic echo /{{ros.publisher_subtopic}} --field solution +``` + + +## Messages + +This package involves two messages: `OptimizationParameters` +and `OptimizationResult`, which are used to define the input +and output values to the node. `OptimizationParameters` specifies +the parameter vector, the initial guess (optional), the initial +guess for the vector of Lagrange multipliers and the initial value +of the penalty value. `OptimizationResult` is a message containing +all information related to the solution of the optimization +problem, including the optimal solution, the solver status, +solution time, Lagrange multiplier vector and more. + +The message structures are defined in the following msg files: + +- [`OptimizationParameters.msg`](msg/OptimizationParameters.msg) +- [`OptimizationResult.msg`](msg/OptimizationResult.msg) + + +## Configure + +You can configure the rate and topic names by editing +[`config/open_params.yaml`](config/open_params.yaml). + + +## Directory structure and contents + +The following auto-generated files are included in your ROS2 package: + +```txt +├── CMakeLists.txt +├── config +│   └── open_params.yaml +├── extern_lib +│   └── librosenbrock.a +├── include +│   ├── open_optimizer.hpp +│   └── rosenbrock_bindings.hpp +├── launch +│   └── open_optimizer.launch.py +├── msg +│   ├── OptimizationParameters.msg +│   └── OptimizationResult.msg +├── package.xml +├── README.md +└── src + └── open_optimizer.cpp +``` diff --git a/open-codegen/opengen/templates/ros2/open_optimizer.cpp b/open-codegen/opengen/templates/ros2/open_optimizer.cpp new file mode 100644 index 00000000..e7c718f5 --- /dev/null +++ b/open-codegen/opengen/templates/ros2/open_optimizer.cpp @@ -0,0 +1,181 @@ +/** + * This is an auto-generated file by Optimization Engine (OpEn) + * OpEn is a free open-source software - see doc.optimization-engine.xyz + * dually licensed under the MIT and Apache v2 licences. + * + */ +#include +#include +#include +#include +#include +#include + +#include "rclcpp/rclcpp.hpp" +#include "{{ros.package_name}}/msg/optimization_parameters.hpp" +#include "{{ros.package_name}}/msg/optimization_result.hpp" +#include "{{meta.optimizer_name}}_bindings.hpp" +#include "open_optimizer.hpp" + +namespace {{ros.package_name}} { +class OptimizationEngineNode : public rclcpp::Node { +private: + using OptimizationParametersMsg = {{ros.package_name}}::msg::OptimizationParameters; + using OptimizationResultMsg = {{ros.package_name}}::msg::OptimizationResult; + + OptimizationParametersMsg params_; + OptimizationResultMsg results_; + bool has_received_request_ = false; + double p_[{{meta.optimizer_name|upper}}_NUM_PARAMETERS] = { 0 }; + double u_[{{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES] = { 0 }; + double* y_ = nullptr; + {{meta.optimizer_name}}Cache* cache_ = nullptr; + double init_penalty_ = ROS2_NODE_{{meta.optimizer_name|upper}}_DEFAULT_INITIAL_PENALTY; + + rclcpp::Publisher::SharedPtr publisher_; + rclcpp::Subscription::SharedPtr subscriber_; + rclcpp::TimerBase::SharedPtr timer_; + + static std::chrono::milliseconds rateToPeriod(double rate) + { + if (rate <= 0.0) { + return std::chrono::milliseconds(100); + } + int period_ms = static_cast(1000.0 / rate); + if (period_ms < 1) { + period_ms = 1; + } + return std::chrono::milliseconds(period_ms); + } + + void updateInputData() + { + init_penalty_ = (params_.initial_penalty > 1.0) + ? params_.initial_penalty + : ROS2_NODE_{{meta.optimizer_name|upper}}_DEFAULT_INITIAL_PENALTY; + + if (params_.parameter.size() == {{meta.optimizer_name|upper}}_NUM_PARAMETERS) { + for (size_t i = 0; i < {{meta.optimizer_name|upper}}_NUM_PARAMETERS; ++i) { + p_[i] = params_.parameter[i]; + } + } + + if (params_.initial_guess.size() == {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES) { + for (size_t i = 0; i < {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES; ++i) { + u_[i] = params_.initial_guess[i]; + } + } + + if (params_.initial_y.size() == {{meta.optimizer_name|upper}}_N1) { + for (size_t i = 0; i < {{meta.optimizer_name|upper}}_N1; ++i) { + y_[i] = params_.initial_y[i]; + } + } + } + + {{meta.optimizer_name}}SolverStatus solve() + { + return {{meta.optimizer_name}}_solve(cache_, u_, p_, y_, &init_penalty_); + } + + void initializeSolverIfNeeded() + { + if (y_ == nullptr) { + y_ = new double[{{meta.optimizer_name|upper}}_N1](); + } + if (cache_ == nullptr) { + cache_ = {{meta.optimizer_name}}_new(); + } + } + + void updateResults({{meta.optimizer_name}}SolverStatus& status) + { + results_.solution.clear(); + for (size_t i = 0; i < {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES; ++i) { + results_.solution.push_back(u_[i]); + } + + results_.lagrange_multipliers.clear(); + for (size_t i = 0; i < {{meta.optimizer_name|upper}}_N1; ++i) { + results_.lagrange_multipliers.push_back(status.lagrange[i]); + } + + results_.inner_iterations = status.num_inner_iterations; + results_.outer_iterations = status.num_outer_iterations; + results_.norm_fpr = status.last_problem_norm_fpr; + results_.cost = status.cost; + results_.penalty = status.penalty; + results_.status = static_cast(status.exit_status); + results_.solve_time_ms = static_cast(status.solve_time_ns) / 1000000.0; + results_.infeasibility_f2 = status.f2_norm; + results_.infeasibility_f1 = status.delta_y_norm_over_c; + } + + void receiveRequestCallback(const OptimizationParametersMsg::ConstSharedPtr msg) + { + params_ = *msg; + has_received_request_ = true; + } + + void solveAndPublish() + { + if (!has_received_request_) { + return; + } + initializeSolverIfNeeded(); + updateInputData(); + {{meta.optimizer_name}}SolverStatus status = solve(); + updateResults(status); + publisher_->publish(results_); + } + +public: + OptimizationEngineNode() + : Node(ROS2_NODE_{{meta.optimizer_name|upper}}_NODE_NAME) + { + this->declare_parameter( + "result_topic", + std::string(ROS2_NODE_{{meta.optimizer_name|upper}}_RESULT_TOPIC)); + this->declare_parameter( + "params_topic", + std::string(ROS2_NODE_{{meta.optimizer_name|upper}}_PARAMS_TOPIC)); + this->declare_parameter( + "rate", + double(ROS2_NODE_{{meta.optimizer_name|upper}}_RATE)); + + std::string result_topic = this->get_parameter("result_topic").as_string(); + std::string params_topic = this->get_parameter("params_topic").as_string(); + double rate = this->get_parameter("rate").as_double(); + + publisher_ = this->create_publisher( + result_topic, + ROS2_NODE_{{meta.optimizer_name|upper}}_RESULT_TOPIC_QUEUE_SIZE); + subscriber_ = this->create_subscription( + params_topic, + ROS2_NODE_{{meta.optimizer_name|upper}}_PARAMS_TOPIC_QUEUE_SIZE, + std::bind(&OptimizationEngineNode::receiveRequestCallback, this, std::placeholders::_1)); + timer_ = this->create_wall_timer( + rateToPeriod(rate), + std::bind(&OptimizationEngineNode::solveAndPublish, this)); + } + + ~OptimizationEngineNode() override + { + if (y_ != nullptr) { + delete[] y_; + } + if (cache_ != nullptr) { + {{meta.optimizer_name}}_free(cache_); + } + } +}; +} /* end of namespace {{ros.package_name}} */ + +int main(int argc, char** argv) +{ + rclcpp::init(argc, argv); + auto node = std::make_shared<{{ros.package_name}}::OptimizationEngineNode>(); + rclcpp::spin(node); + rclcpp::shutdown(); + return 0; +} diff --git a/open-codegen/opengen/templates/ros2/open_optimizer.hpp b/open-codegen/opengen/templates/ros2/open_optimizer.hpp new file mode 100644 index 00000000..a8482fd2 --- /dev/null +++ b/open-codegen/opengen/templates/ros2/open_optimizer.hpp @@ -0,0 +1,40 @@ +#ifndef ROS2_NODE_{{meta.optimizer_name|upper}}_H +#define ROS2_NODE_{{meta.optimizer_name|upper}}_H + +/** + * Default node name + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_NODE_NAME "{{ros.node_name}}" + +/** + * Default result (publisher) topic name + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_RESULT_TOPIC "{{ros.publisher_subtopic}}" + +/** + * Default parameters (subscriber) topic name + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_PARAMS_TOPIC "{{ros.subscriber_subtopic}}" + +/** + * Default execution rate (in Hz) + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_RATE {{ros.rate}} + +/** + * Default result topic queue size + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_RESULT_TOPIC_QUEUE_SIZE {{ros.result_topic_queue_size}} + +/** + * Default parameters topic queue size + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_PARAMS_TOPIC_QUEUE_SIZE {{ros.params_topic_queue_size}} + +/** + * Default initial penalty + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_DEFAULT_INITIAL_PENALTY {{solver_config.initial_penalty}} + + +#endif /* Header Sentinel: ROS2_NODE_{{meta.optimizer_name|upper}}_H */ diff --git a/open-codegen/opengen/templates/ros2/open_optimizer.launch.py b/open-codegen/opengen/templates/ros2/open_optimizer.launch.py new file mode 100644 index 00000000..45d7aa60 --- /dev/null +++ b/open-codegen/opengen/templates/ros2/open_optimizer.launch.py @@ -0,0 +1,20 @@ +from launch import LaunchDescription +from launch.substitutions import PathJoinSubstitution +from launch_ros.actions import Node +from launch_ros.substitutions import FindPackageShare + + +def generate_launch_description(): + return LaunchDescription([ + Node( + package="{{ros.package_name}}", + executable="{{ros.node_name}}", + name="{{ros.node_name}}", + output="screen", + parameters=[PathJoinSubstitution([ + FindPackageShare("{{ros.package_name}}"), + "config", + "open_params.yaml", + ])], + ) + ]) diff --git a/open-codegen/opengen/templates/ros2/open_params.yaml b/open-codegen/opengen/templates/ros2/open_params.yaml new file mode 100644 index 00000000..b1ae266e --- /dev/null +++ b/open-codegen/opengen/templates/ros2/open_params.yaml @@ -0,0 +1,5 @@ +/**: + ros__parameters: + result_topic: "{{ros.publisher_subtopic}}" + params_topic: "{{ros.subscriber_subtopic}}" + rate: {{ros.rate}} diff --git a/open-codegen/opengen/templates/ros2/package.xml b/open-codegen/opengen/templates/ros2/package.xml new file mode 100644 index 00000000..c183538d --- /dev/null +++ b/open-codegen/opengen/templates/ros2/package.xml @@ -0,0 +1,24 @@ + + + {{ros.package_name}} + {{meta.version}} + {{ros.description}} + chung + {{meta.licence}} + + ament_cmake + + rosidl_default_generators + + launch + launch_ros + rclcpp + + rosidl_default_runtime + + rosidl_interface_packages + + + ament_cmake + + diff --git a/open-codegen/test/test.py b/open-codegen/test/test.py index d56f3075..a4a1579f 100644 --- a/open-codegen/test/test.py +++ b/open-codegen/test/test.py @@ -172,6 +172,36 @@ def setUpRosPackageGeneration(cls): solver_configuration=cls.solverConfig()) \ .build() + @classmethod + def setUpRos2PackageGeneration(cls): + u = cs.MX.sym("u", 5) # decision variable (nu = 5) + p = cs.MX.sym("p", 2) # parameter (np = 2) + phi = og.functions.rosenbrock(u, p) + c = cs.vertcat(1.5 * u[0] - u[1], + cs.fmax(0.0, u[2] - u[3] + 0.1)) + bounds = og.constraints.Ball2(None, 1.5) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name("rosenbrock_ros2") + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) \ + .with_penalty_constraints(c) + ros_config = og.config.RosConfiguration() \ + .with_package_name("parametric_optimizer_ros2") \ + .with_node_name("open_node_ros2") \ + .with_rate(35) \ + .with_description("really cool ROS2 node") + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ + .with_build_directory(RustBuildTestCase.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_build_c_bindings() \ + .with_ros2(ros_config) + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=cls.solverConfig()) \ + .build() + @classmethod def setUpOnlyParametricF2(cls): u = cs.MX.sym("u", 5) # decision variable (nu = 5) @@ -231,6 +261,7 @@ def setUpHalfspace(cls): def setUpClass(cls): cls.setUpPythonBindings() cls.setUpRosPackageGeneration() + cls.setUpRos2PackageGeneration() cls.setUpOnlyF1() cls.setUpOnlyF2() cls.setUpOnlyF2(is_preconditioned=True) @@ -238,6 +269,16 @@ def setUpClass(cls): cls.setUpOnlyParametricF2() cls.setUpHalfspace() + def test_ros2_package_generation(self): + ros2_dir = os.path.join( + RustBuildTestCase.TEST_DIR, + "rosenbrock_ros2", + "parametric_optimizer_ros2") + self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "package.xml"))) + self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "CMakeLists.txt"))) + self.assertTrue(os.path.isfile( + os.path.join(ros2_dir, "launch", "open_optimizer.launch.py"))) + def test_python_bindings(self): import sys import os From 7bebcd590b3a0b4ab459b526bdb2c527986ab942 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 13:02:41 +0000 Subject: [PATCH 023/133] update ROS2 auto-generated README --- open-codegen/opengen/templates/ros2/README.md | 45 ++++++++----------- 1 file changed, 18 insertions(+), 27 deletions(-) diff --git a/open-codegen/opengen/templates/ros2/README.md b/open-codegen/opengen/templates/ros2/README.md index cb9963a1..b8e768cc 100644 --- a/open-codegen/opengen/templates/ros2/README.md +++ b/open-codegen/opengen/templates/ros2/README.md @@ -1,32 +1,21 @@ -# ROS2 Package {{ros.package_name}} +# ROS2 Package: {{ros.package_name}} ## Installation and Setup Move or link the auto-generated ROS2 package (folder `{{ros.package_name}}`) to your workspace source tree (typically `~/ros2_ws/src/`). -Compile with: +From within the folder `{{ros.package_name}}`, compile with: -```console -cd ~/ros2_ws/ +```bash colcon build --packages-select {{ros.package_name}} -source install/setup.bash +source install/setup.bash +# or source install/setup.zsh on MacOS ``` -If you build the package in-place from its own directory instead of a larger -workspace, source the generated setup script from `install/`: +If you want to activate logging (recommended), do -```console -# bash -source install/setup.bash - -# zsh -source install/setup.zsh -``` - -On macOS, ROS2 logging may need an explicit writable directory: - -```console +```bash mkdir -p .ros_log export ROS_LOG_DIR="$PWD/.ros_log" ``` @@ -37,17 +26,19 @@ export ROS_LOG_DIR="$PWD/.ros_log" Start the optimizer in one terminal. The process stays in the foreground while the node is running. -```console -# terminal 1 -source install/setup.bash # or: source install/setup.zsh +```bash +# Terminal 1 +source install/setup.bash +# or: source install/setup.zsh ros2 run {{ros.package_name}} {{ros.node_name}} ``` In a second terminal, source the same environment and verify discovery: -```console -# terminal 2 -source install/setup.bash # or: source install/setup.zsh +```bash +# Terminal 2 +source install/setup.bash +# or: source install/setup.zsh ros2 node list --no-daemon --spin-time 5 ros2 topic list --no-daemon --spin-time 5 ``` @@ -59,20 +50,20 @@ You should see the node `/{{ros.node_name}}`, the input topic Then publish a request to the configured parameters topic (default: `/{{ros.subscriber_subtopic}}`): -```console +```bash ros2 topic pub --once /{{ros.subscriber_subtopic}} {{ros.package_name}}/msg/OptimizationParameters "{parameter: [YOUR_PARAMETER_VECTOR], initial_guess: [INITIAL_GUESS_OPTIONAL], initial_y: [], initial_penalty: 15.0}" ``` The result will be announced on the configured result topic (default: `/{{ros.publisher_subtopic}}`): -```console +```bash ros2 topic echo /{{ros.publisher_subtopic}} ``` To get the optimal solution you can do: -```console +```bash ros2 topic echo /{{ros.publisher_subtopic}} --field solution ``` From 31a3b40070a7e431d1e089f99284c13c67f99ea2 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 13:07:35 +0000 Subject: [PATCH 024/133] api docs --- open-codegen/opengen/builder/ros_builder.py | 98 +++++++++++++++++++-- 1 file changed, 91 insertions(+), 7 deletions(-) diff --git a/open-codegen/opengen/builder/ros_builder.py b/open-codegen/opengen/builder/ros_builder.py index 0b6c537c..7db3aeb2 100644 --- a/open-codegen/opengen/builder/ros_builder.py +++ b/open-codegen/opengen/builder/ros_builder.py @@ -1,3 +1,5 @@ +"""Builders for auto-generated ROS1 and ROS2 package wrappers.""" + import opengen.definitions as og_dfn import datetime @@ -9,11 +11,27 @@ def make_dir_if_not_exists(directory): + """Create ``directory`` if it does not already exist. + + :param directory: Path to the directory to create. + :type directory: str + """ if not os.path.exists(directory): os.makedirs(directory) def get_ros_template(template_subdir, name): + """Load a Jinja template from a ROS-specific template subdirectory. + + :param template_subdir: Template subdirectory name, e.g. ``"ros"`` or + ``"ros2"``. + :type template_subdir: str + :param name: Template file name. + :type name: str + + :return: Loaded Jinja template. + :rtype: jinja2.Template + """ file_loader = jinja2.FileSystemLoader(og_dfn.templates_subdir(template_subdir)) env = jinja2.Environment(loader=file_loader, autoescape=True) return env.get_template(name) @@ -21,17 +39,35 @@ def get_ros_template(template_subdir, name): class _BaseRosBuilder: """ - Shared code generation for ROS-related packages + Shared code generation logic for ROS-related packages. - For internal use + This base class contains the common file-generation pipeline used by both + :class:`RosBuilder` and :class:`ROS2Builder`. Subclasses specialize the + process by providing the package configuration object, template + subdirectory, launch file name, and final user-facing instructions. + + :ivar _meta: Optimizer metadata used to render the package templates. + :ivar _build_config: Global build configuration for the generated solver. + :ivar _solver_config: Solver configuration used when rendering node code. + :ivar _logger: Logger dedicated to the concrete builder implementation. """ + #: Template subdirectory under ``opengen/templates`` used by the builder. _template_subdir = None + #: Fully-qualified logger name for the concrete builder. _logger_name = None + #: Short logger tag shown in log messages. _logger_tag = None + #: Launch file generated by the concrete builder. _launch_file_name = None def __init__(self, meta, build_config, solver_config): + """Initialise a shared ROS package builder. + + :param meta: Optimizer metadata. + :param build_config: Build configuration object. + :param solver_config: Solver configuration object. + """ self._meta = meta self._build_config = build_config self._solver_config = solver_config @@ -48,18 +84,41 @@ def __init__(self, meta, build_config, solver_config): @property def _ros_config(self): + """Return the ROS/ROS2 package configuration for the subclass. + + :return: ROS configuration object used by the concrete builder. + :raises NotImplementedError: If a subclass does not provide this hook. + """ raise NotImplementedError def _template(self, name): + """Return a template from the builder's template subdirectory. + + :param name: Template file name. + :type name: str + + :return: Loaded Jinja template. + :rtype: jinja2.Template + """ return get_ros_template(self._template_subdir, name) def _target_dir(self): + """Return the root directory of the generated optimizer project. + + :return: Absolute path to the generated optimizer directory. + :rtype: str + """ return os.path.abspath( os.path.join( self._build_config.build_dir, self._meta.optimizer_name)) def _ros_target_dir(self): + """Return the root directory of the generated ROS package. + + :return: Absolute path to the generated ROS/ROS2 package directory. + :rtype: str + """ return os.path.abspath( os.path.join( self._build_config.build_dir, @@ -67,6 +126,7 @@ def _ros_target_dir(self): self._ros_config.package_name)) def _generate_ros_dir_structure(self): + """Create the directory structure for the generated ROS package.""" self._logger.info("Generating directory structure") target_ros_dir = self._ros_target_dir() make_dir_if_not_exists(target_ros_dir) @@ -75,6 +135,7 @@ def _generate_ros_dir_structure(self): os.path.join(target_ros_dir, directory_name))) def _generate_ros_package_xml(self): + """Render and write ``package.xml`` for the generated package.""" self._logger.info("Generating package.xml") target_ros_dir = self._ros_target_dir() template = self._template('package.xml') @@ -84,6 +145,7 @@ def _generate_ros_package_xml(self): fh.write(output_template) def _generate_ros_cmakelists(self): + """Render and write the package ``CMakeLists.txt`` file.""" self._logger.info("Generating CMakeLists") target_ros_dir = self._ros_target_dir() template = self._template('CMakeLists.txt') @@ -93,6 +155,7 @@ def _generate_ros_cmakelists(self): fh.write(output_template) def _copy_ros_files(self): + """Copy generated bindings, static library, and message files.""" self._logger.info("Copying external dependencies") target_ros_dir = self._ros_target_dir() @@ -125,6 +188,7 @@ def _copy_ros_files(self): shutil.copyfile(original_message, target_message) def _generate_ros_params_file(self): + """Render and write the runtime parameter YAML file.""" self._logger.info("Generating open_params.yaml") target_ros_dir = self._ros_target_dir() template = self._template('open_params.yaml') @@ -134,6 +198,7 @@ def _generate_ros_params_file(self): fh.write(output_template) def _generate_ros_node_header(self): + """Render and write the generated node header file.""" self._logger.info("Generating open_optimizer.hpp") target_ros_dir = self._ros_target_dir() template = self._template('open_optimizer.hpp') @@ -147,6 +212,7 @@ def _generate_ros_node_header(self): fh.write(output_template) def _generate_ros_node_cpp(self): + """Render and write the generated node implementation file.""" self._logger.info("Generating open_optimizer.cpp") target_ros_dir = self._ros_target_dir() template = self._template('open_optimizer.cpp') @@ -159,6 +225,7 @@ def _generate_ros_node_cpp(self): fh.write(output_template) def _generate_ros_launch_file(self): + """Render and write the package launch file.""" self._logger.info("Generating %s", self._launch_file_name) target_ros_dir = self._ros_target_dir() template = self._template(self._launch_file_name) @@ -169,6 +236,7 @@ def _generate_ros_launch_file(self): fh.write(output_template) def _generate_ros_readme_file(self): + """Render and write the generated package README.""" self._logger.info("Generating README.md") target_ros_dir = self._ros_target_dir() template = self._template('README.md') @@ -178,11 +246,19 @@ def _generate_ros_readme_file(self): fh.write(output_template) def _symbolic_link_info_message(self): + """Emit final user-facing setup instructions for the generated package. + + :raises NotImplementedError: If a subclass does not provide this hook. + """ raise NotImplementedError def build(self): """ - Build ROS-related files + Generate all ROS/ROS2 wrapper files for the current optimizer. + + This method creates the package directory structure, copies the + generated solver artefacts, renders all templates, and logs final setup + instructions for the user. """ self._generate_ros_dir_structure() self._generate_ros_package_xml() @@ -198,9 +274,11 @@ def build(self): class RosBuilder(_BaseRosBuilder): """ - Code generation for ROS-related files + Builder for ROS1 package generation. - For internal use + This specialization uses the ``templates/ros`` template set and the + ROS1-specific configuration stored in + :attr:`opengen.config.build_config.BuildConfiguration.ros_config`. """ _template_subdir = 'ros' @@ -210,9 +288,11 @@ class RosBuilder(_BaseRosBuilder): @property def _ros_config(self): + """Return the ROS1 package configuration.""" return self._build_config.ros_config def _symbolic_link_info_message(self): + """Log the final ROS1 workspace integration instructions.""" target_ros_dir = self._ros_target_dir() self._logger.info("ROS package was built successfully. Now run:") self._logger.info("ln -s %s ~/catkin_ws/src/", target_ros_dir) @@ -221,9 +301,11 @@ def _symbolic_link_info_message(self): class ROS2Builder(_BaseRosBuilder): """ - Code generation for ROS2-related files + Builder for ROS2 package generation. - For internal use + This specialization uses the ``templates/ros2`` template set and the + ROS2-specific configuration stored in + :attr:`opengen.config.build_config.BuildConfiguration.ros2_config`. """ _template_subdir = 'ros2' @@ -233,9 +315,11 @@ class ROS2Builder(_BaseRosBuilder): @property def _ros_config(self): + """Return the ROS2 package configuration.""" return self._build_config.ros2_config def _symbolic_link_info_message(self): + """Log the final ROS2 workspace integration instructions.""" target_ros_dir = self._ros_target_dir() self._logger.info("ROS2 package was built successfully. Now run:") self._logger.info("ln -s %s ~/ros2_ws/src/", target_ros_dir) From 933157dbd75aa5205762e5835ecf154efd59d5de Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 13:39:21 +0000 Subject: [PATCH 025/133] ROS2 tests in CI --- .github/workflows/ci.yml | 35 ++++- ci/script.sh | 10 ++ open-codegen/test/README.md | 3 +- open-codegen/test/test.py | 41 ------ open-codegen/test/test_ros2.py | 229 +++++++++++++++++++++++++++++++++ 5 files changed, 275 insertions(+), 43 deletions(-) create mode 100644 open-codegen/test/test_ros2.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 982b2960..46176b15 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -43,7 +43,9 @@ jobs: python_tests: name: Python tests (${{ matrix.name }}) - needs: rust_tests + needs: + - rust_tests + - ros2_tests runs-on: ${{ matrix.os }} timeout-minutes: 45 strategy: @@ -102,6 +104,37 @@ jobs: if: runner.os == 'macOS' run: bash ./ci/script.sh python-tests + ros2_tests: + name: ROS2 tests + needs: rust_tests + runs-on: ubuntu-latest + timeout-minutes: 45 + container: + image: ubuntu:noble + env: + DO_DOCKER: 0 + steps: + - uses: actions/checkout@v5 + + - uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: stable + rustflags: "" + + - uses: actions/setup-python@v6 + with: + python-version: "3.12" + cache: "pip" + cache-dependency-path: open-codegen/setup.py + + - name: Setup ROS 2 + uses: ros-tooling/setup-ros@v0.7 + with: + required-ros-distributions: jazzy + + - name: Run ROS2 Python tests + run: bash ./ci/script.sh ros2-tests + ocp_tests: name: OCP tests (${{ matrix.name }}) needs: python_tests diff --git a/ci/script.sh b/ci/script.sh index 18f1f2b7..dc8bdcef 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -60,6 +60,11 @@ run_python_core_tests() { generated_clippy_tests } +run_python_ros2_tests() { + export PYTHONPATH=. + python -W ignore test/test_ros2.py -v +} + run_python_ocp_tests() { export PYTHONPATH=. python -W ignore test/test_ocp.py -v @@ -84,6 +89,11 @@ main() { setup_python_test_env run_python_core_tests ;; + ros2-tests) + echo "Running ROS2 Python tests" + setup_python_test_env + run_python_ros2_tests + ;; ocp-tests) echo "Running OCP Python tests" setup_python_test_env diff --git a/open-codegen/test/README.md b/open-codegen/test/README.md index 75cd8379..e946a477 100644 --- a/open-codegen/test/README.md +++ b/open-codegen/test/README.md @@ -47,5 +47,6 @@ The generated benchmark looks like this: Run ``` python -W ignore test/test_constraints.py -v +python -W ignore test/test_ros2.py -v python -W ignore test/test.py -v -``` \ No newline at end of file +``` diff --git a/open-codegen/test/test.py b/open-codegen/test/test.py index a4a1579f..d56f3075 100644 --- a/open-codegen/test/test.py +++ b/open-codegen/test/test.py @@ -172,36 +172,6 @@ def setUpRosPackageGeneration(cls): solver_configuration=cls.solverConfig()) \ .build() - @classmethod - def setUpRos2PackageGeneration(cls): - u = cs.MX.sym("u", 5) # decision variable (nu = 5) - p = cs.MX.sym("p", 2) # parameter (np = 2) - phi = og.functions.rosenbrock(u, p) - c = cs.vertcat(1.5 * u[0] - u[1], - cs.fmax(0.0, u[2] - u[3] + 0.1)) - bounds = og.constraints.Ball2(None, 1.5) - meta = og.config.OptimizerMeta() \ - .with_optimizer_name("rosenbrock_ros2") - problem = og.builder.Problem(u, p, phi) \ - .with_constraints(bounds) \ - .with_penalty_constraints(c) - ros_config = og.config.RosConfiguration() \ - .with_package_name("parametric_optimizer_ros2") \ - .with_node_name("open_node_ros2") \ - .with_rate(35) \ - .with_description("really cool ROS2 node") - build_config = og.config.BuildConfiguration() \ - .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ - .with_build_directory(RustBuildTestCase.TEST_DIR) \ - .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ - .with_build_c_bindings() \ - .with_ros2(ros_config) - og.builder.OpEnOptimizerBuilder(problem, - metadata=meta, - build_configuration=build_config, - solver_configuration=cls.solverConfig()) \ - .build() - @classmethod def setUpOnlyParametricF2(cls): u = cs.MX.sym("u", 5) # decision variable (nu = 5) @@ -261,7 +231,6 @@ def setUpHalfspace(cls): def setUpClass(cls): cls.setUpPythonBindings() cls.setUpRosPackageGeneration() - cls.setUpRos2PackageGeneration() cls.setUpOnlyF1() cls.setUpOnlyF2() cls.setUpOnlyF2(is_preconditioned=True) @@ -269,16 +238,6 @@ def setUpClass(cls): cls.setUpOnlyParametricF2() cls.setUpHalfspace() - def test_ros2_package_generation(self): - ros2_dir = os.path.join( - RustBuildTestCase.TEST_DIR, - "rosenbrock_ros2", - "parametric_optimizer_ros2") - self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "package.xml"))) - self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "CMakeLists.txt"))) - self.assertTrue(os.path.isfile( - os.path.join(ros2_dir, "launch", "open_optimizer.launch.py"))) - def test_python_bindings(self): import sys import os diff --git a/open-codegen/test/test_ros2.py b/open-codegen/test/test_ros2.py new file mode 100644 index 00000000..5609ec48 --- /dev/null +++ b/open-codegen/test/test_ros2.py @@ -0,0 +1,229 @@ +import logging +import os +import shutil +import subprocess +import time +import unittest + +import casadi.casadi as cs +import opengen as og + + +class Ros2BuildTestCase(unittest.TestCase): + """Integration tests for auto-generated ROS2 packages.""" + + TEST_DIR = ".python_test_build" + OPTIMIZER_NAME = "rosenbrock_ros2" + PACKAGE_NAME = "parametric_optimizer_ros2" + NODE_NAME = "open_node_ros2" + + @staticmethod + def get_open_local_absolute_path(): + """Return the absolute path to the local OpEn repository root.""" + cwd = os.getcwd() + return cwd.split('open-codegen')[0] + + @classmethod + def solverConfig(cls): + """Return a solver configuration shared by the ROS2 tests.""" + return og.config.SolverConfiguration() \ + .with_lbfgs_memory(15) \ + .with_tolerance(1e-4) \ + .with_initial_tolerance(1e-4) \ + .with_delta_tolerance(1e-4) \ + .with_initial_penalty(15.0) \ + .with_penalty_weight_update_factor(10.0) \ + .with_max_inner_iterations(155) \ + .with_max_duration_micros(1e8) \ + .with_max_outer_iterations(50) \ + .with_sufficient_decrease_coefficient(0.05) \ + .with_cbfgs_parameters(1.5, 1e-10, 1e-12) \ + .with_preconditioning(False) + + @classmethod + def setUpRos2PackageGeneration(cls): + """Generate the ROS2 package used by the ROS2 integration tests.""" + u = cs.MX.sym("u", 5) + p = cs.MX.sym("p", 2) + phi = og.functions.rosenbrock(u, p) + c = cs.vertcat(1.5 * u[0] - u[1], + cs.fmax(0.0, u[2] - u[3] + 0.1)) + bounds = og.constraints.Ball2(None, 1.5) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name(cls.OPTIMIZER_NAME) + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) \ + .with_penalty_constraints(c) + ros_config = og.config.RosConfiguration() \ + .with_package_name(cls.PACKAGE_NAME) \ + .with_node_name(cls.NODE_NAME) \ + .with_rate(35) \ + .with_description("really cool ROS2 node") + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=cls.get_open_local_absolute_path()) \ + .with_build_directory(cls.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_build_c_bindings() \ + .with_ros2(ros_config) + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=cls.solverConfig()) \ + .build() + + @classmethod + def setUpClass(cls): + """Generate the ROS2 package once before all tests run.""" + if shutil.which("ros2") is None or shutil.which("colcon") is None: + raise unittest.SkipTest("ROS2 CLI tools are not available in PATH") + cls.setUpRos2PackageGeneration() + + @classmethod + def ros2_package_dir(cls): + """Return the filesystem path to the generated ROS2 package.""" + return os.path.join( + cls.TEST_DIR, + cls.OPTIMIZER_NAME, + cls.PACKAGE_NAME) + + @classmethod + def ros2_test_env(cls): + """Return the subprocess environment used by ROS2 integration tests.""" + env = os.environ.copy() + ros2_dir = cls.ros2_package_dir() + os.makedirs(os.path.join(ros2_dir, ".ros_log"), exist_ok=True) + env["ROS_LOG_DIR"] = os.path.join(ros2_dir, ".ros_log") + env.setdefault("RMW_IMPLEMENTATION", "rmw_fastrtps_cpp") + env.pop("ROS_LOCALHOST_ONLY", None) + return env + + @staticmethod + def _bash(command, cwd, env=None, timeout=180, check=True): + """Run a bash command and return the completed process.""" + return subprocess.run( + ["/bin/bash", "-lc", command], + cwd=cwd, + env=env, + text=True, + capture_output=True, + timeout=timeout, + check=check) + + def test_ros2_package_generation(self): + """Verify the ROS2 package files are generated.""" + ros2_dir = self.ros2_package_dir() + self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "package.xml"))) + self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "CMakeLists.txt"))) + self.assertTrue(os.path.isfile( + os.path.join(ros2_dir, "launch", "open_optimizer.launch.py"))) + + def test_generated_ros2_package_works(self): + """Build, run, and call the generated ROS2 package.""" + ros2_dir = self.ros2_package_dir() + env = self.ros2_test_env() + + self._bash( + f"source install/setup.bash >/dev/null 2>&1 || true; " + f"colcon build --packages-select {self.PACKAGE_NAME}", + cwd=ros2_dir, + env=env, + timeout=600) + + node_process = subprocess.Popen( + [ + "/bin/bash", + "-lc", + f"source install/setup.bash && " + f"ros2 run {self.PACKAGE_NAME} {self.NODE_NAME}" + ], + cwd=ros2_dir, + env=env, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + try: + node_seen = False + topics_seen = False + for _ in range(6): + node_result = self._bash( + "source install/setup.bash && " + "ros2 node list --no-daemon --spin-time 5", + cwd=ros2_dir, + env=env, + timeout=30, + check=False) + topic_result = self._bash( + "source install/setup.bash && " + "ros2 topic list --no-daemon --spin-time 5", + cwd=ros2_dir, + env=env, + timeout=30, + check=False) + node_seen = f"/{self.NODE_NAME}" in node_result.stdout + topics_seen = "/parameters" in topic_result.stdout and "/result" in topic_result.stdout + if node_seen and topics_seen: + break + time.sleep(1) + + if not (node_seen and topics_seen): + process_output = "" + if node_process.poll() is None: + node_process.terminate() + try: + node_process.wait(timeout=10) + except subprocess.TimeoutExpired: + node_process.kill() + node_process.wait(timeout=10) + if node_process.stdout is not None: + process_output = node_process.stdout.read() + self.fail( + "Generated ROS2 node did not become discoverable.\n" + f"ros2 node list output:\n{node_result.stdout}\n" + f"ros2 topic list output:\n{topic_result.stdout}\n" + f"node process output:\n{process_output}") + + echo_process = subprocess.Popen( + [ + "/bin/bash", + "-lc", + "source install/setup.bash && " + "ros2 topic echo /result --once" + ], + cwd=ros2_dir, + env=env, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + try: + time.sleep(1) + self._bash( + "source install/setup.bash && " + "ros2 topic pub --once /parameters " + f"{self.PACKAGE_NAME}/msg/OptimizationParameters " + "'{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}'", + cwd=ros2_dir, + env=env, + timeout=60) + echo_stdout, _ = echo_process.communicate(timeout=60) + finally: + if echo_process.poll() is None: + echo_process.terminate() + echo_process.wait(timeout=10) + + self.assertIn("solution", echo_stdout) + self.assertIn("solve_time_ms", echo_stdout) + finally: + if node_process.poll() is None: + node_process.terminate() + try: + node_process.wait(timeout=10) + except subprocess.TimeoutExpired: + node_process.kill() + node_process.wait(timeout=10) + + +if __name__ == '__main__': + logging.getLogger('retry').setLevel(logging.ERROR) + unittest.main() From cc6fce9a6091ffd28185dc6542b5c42974189fad Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 13:42:44 +0000 Subject: [PATCH 026/133] fix GA dependencies (ROS2 tests) --- .github/workflows/ci.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 46176b15..c02690a4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,6 +28,17 @@ jobs: steps: - uses: actions/checkout@v5 + - name: Install container bootstrap dependencies + run: | + apt-get update + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + curl \ + ca-certificates \ + git \ + gnupg2 \ + locales \ + lsb-release + - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: stable From 620f9a7e28ae1336476fe0f0416610b99458bdc2 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 13:50:20 +0000 Subject: [PATCH 027/133] fix issues in ci.yml --- .github/workflows/ci.yml | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c02690a4..63ee634b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,17 +28,6 @@ jobs: steps: - uses: actions/checkout@v5 - - name: Install container bootstrap dependencies - run: | - apt-get update - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - curl \ - ca-certificates \ - git \ - gnupg2 \ - locales \ - lsb-release - - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: stable @@ -122,11 +111,23 @@ jobs: timeout-minutes: 45 container: image: ubuntu:noble + options: --user 0 env: DO_DOCKER: 0 steps: - uses: actions/checkout@v5 + - name: Install container bootstrap dependencies + run: | + apt-get update + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + curl \ + ca-certificates \ + git \ + gnupg2 \ + locales \ + lsb-release + - uses: actions-rust-lang/setup-rust-toolchain@v1 with: toolchain: stable From 3e92b92555c2b6424bcb14c4b305b882ef8d3613 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 13:56:02 +0000 Subject: [PATCH 028/133] wip: working on GA issues --- .github/workflows/ci.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 63ee634b..2d9b0785 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -136,8 +136,6 @@ jobs: - uses: actions/setup-python@v6 with: python-version: "3.12" - cache: "pip" - cache-dependency-path: open-codegen/setup.py - name: Setup ROS 2 uses: ros-tooling/setup-ros@v0.7 From 9d3781c7c4fdeeef82720f6884a85531e9a679bc Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 14:02:25 +0000 Subject: [PATCH 029/133] GA actions configuration --- ci/script.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ci/script.sh b/ci/script.sh index dc8bdcef..12fc8418 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -62,6 +62,18 @@ run_python_core_tests() { run_python_ros2_tests() { export PYTHONPATH=. + if [ -n "${ROS_DISTRO:-}" ] && [ -f "/opt/ros/${ROS_DISTRO}/setup.bash" ]; then + # setup-ros installs the ROS underlay but does not source it for our shell + source "/opt/ros/${ROS_DISTRO}/setup.bash" + elif [ -f "/opt/ros/jazzy/setup.bash" ]; then + source "/opt/ros/jazzy/setup.bash" + else + echo "ROS2 environment setup script not found" + exit 1 + fi + + command -v ros2 >/dev/null + command -v colcon >/dev/null python -W ignore test/test_ros2.py -v } From 8b8068d25f7626c27eca92e1b3783dcfa6770322 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 14:10:56 +0000 Subject: [PATCH 030/133] fix issue in script.sh (set -u/+u) --- ci/script.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/script.sh b/ci/script.sh index 12fc8418..f188eb95 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -62,15 +62,18 @@ run_python_core_tests() { run_python_ros2_tests() { export PYTHONPATH=. + set +u if [ -n "${ROS_DISTRO:-}" ] && [ -f "/opt/ros/${ROS_DISTRO}/setup.bash" ]; then # setup-ros installs the ROS underlay but does not source it for our shell source "/opt/ros/${ROS_DISTRO}/setup.bash" elif [ -f "/opt/ros/jazzy/setup.bash" ]; then source "/opt/ros/jazzy/setup.bash" else + set -u echo "ROS2 environment setup script not found" exit 1 fi + set -u command -v ros2 >/dev/null command -v colcon >/dev/null From f7c5799b71ecdc03cb2460fa37127f3cfe122418 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 14:18:07 +0000 Subject: [PATCH 031/133] GA: let's try again --- .github/workflows/ci.yml | 2 ++ open-codegen/test/test_ros2.py | 12 ++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2d9b0785..75a7237b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -121,6 +121,8 @@ jobs: run: | apt-get update DEBIAN_FRONTEND=noninteractive apt-get install -y \ + build-essential \ + cmake \ curl \ ca-certificates \ git \ diff --git a/open-codegen/test/test_ros2.py b/open-codegen/test/test_ros2.py index 5609ec48..606f0006 100644 --- a/open-codegen/test/test_ros2.py +++ b/open-codegen/test/test_ros2.py @@ -100,14 +100,22 @@ def ros2_test_env(cls): @staticmethod def _bash(command, cwd, env=None, timeout=180, check=True): """Run a bash command and return the completed process.""" - return subprocess.run( + result = subprocess.run( ["/bin/bash", "-lc", command], cwd=cwd, env=env, text=True, capture_output=True, timeout=timeout, - check=check) + check=False) + if check and result.returncode != 0: + raise AssertionError( + "Command failed with exit code " + f"{result.returncode}: {command}\n" + f"stdout:\n{result.stdout}\n" + f"stderr:\n{result.stderr}" + ) + return result def test_ros2_package_generation(self): """Verify the ROS2 package files are generated.""" From 255a15210f33a80f0b6768089da4ee290fb9716f Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 14:42:10 +0000 Subject: [PATCH 032/133] ROS2 tests work OK locally --- open-codegen/test/test_ros2.py | 96 +++++++++++++++++++++------------- 1 file changed, 60 insertions(+), 36 deletions(-) diff --git a/open-codegen/test/test_ros2.py b/open-codegen/test/test_ros2.py index 606f0006..573777dc 100644 --- a/open-codegen/test/test_ros2.py +++ b/open-codegen/test/test_ros2.py @@ -1,5 +1,6 @@ import logging import os +import signal import shutil import subprocess import time @@ -97,11 +98,24 @@ def ros2_test_env(cls): env.pop("ROS_LOCALHOST_ONLY", None) return env - @staticmethod - def _bash(command, cwd, env=None, timeout=180, check=True): - """Run a bash command and return the completed process.""" + @classmethod + def ros2_shell(cls): + """Return the preferred shell executable and setup script for ROS2 commands.""" + shell_path = "/bin/bash" + setup_script = "install/setup.bash" + preferred_shell = os.path.basename(os.environ.get("SHELL", "")) + zsh_setup = os.path.join(cls.ros2_package_dir(), "install", "setup.zsh") + if preferred_shell == "zsh" and os.path.isfile(zsh_setup): + shell_path = "/bin/zsh" + setup_script = "install/setup.zsh" + return shell_path, setup_script + + @classmethod + def _run_shell(cls, command, cwd, env=None, timeout=180, check=True): + """Run a command in the preferred shell and return the completed process.""" + shell_path, _ = cls.ros2_shell() result = subprocess.run( - ["/bin/bash", "-lc", command], + [shell_path, "-lc", command], cwd=cwd, env=env, text=True, @@ -117,6 +131,28 @@ def _bash(command, cwd, env=None, timeout=180, check=True): ) return result + @staticmethod + def _terminate_process(process, timeout=10): + """Terminate a spawned shell process and its children, then collect output.""" + if process.poll() is None: + try: + os.killpg(process.pid, signal.SIGTERM) + except ProcessLookupError: + pass + try: + process.wait(timeout=timeout) + except subprocess.TimeoutExpired: + try: + os.killpg(process.pid, signal.SIGKILL) + except ProcessLookupError: + pass + process.wait(timeout=timeout) + try: + stdout, _ = process.communicate(timeout=1) + except subprocess.TimeoutExpired: + stdout = "" + return stdout or "" + def test_ros2_package_generation(self): """Verify the ROS2 package files are generated.""" ros2_dir = self.ros2_package_dir() @@ -129,9 +165,10 @@ def test_generated_ros2_package_works(self): """Build, run, and call the generated ROS2 package.""" ros2_dir = self.ros2_package_dir() env = self.ros2_test_env() + shell_path, setup_script = self.ros2_shell() - self._bash( - f"source install/setup.bash >/dev/null 2>&1 || true; " + self._run_shell( + f"source {setup_script} >/dev/null 2>&1 || true; " f"colcon build --packages-select {self.PACKAGE_NAME}", cwd=ros2_dir, env=env, @@ -139,30 +176,31 @@ def test_generated_ros2_package_works(self): node_process = subprocess.Popen( [ - "/bin/bash", + shell_path, "-lc", - f"source install/setup.bash && " + f"source {setup_script} && " f"ros2 run {self.PACKAGE_NAME} {self.NODE_NAME}" ], cwd=ros2_dir, env=env, text=True, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + stderr=subprocess.STDOUT, + start_new_session=True) try: node_seen = False topics_seen = False for _ in range(6): - node_result = self._bash( - "source install/setup.bash && " + node_result = self._run_shell( + f"source {setup_script} && " "ros2 node list --no-daemon --spin-time 5", cwd=ros2_dir, env=env, timeout=30, check=False) - topic_result = self._bash( - "source install/setup.bash && " + topic_result = self._run_shell( + f"source {setup_script} && " "ros2 topic list --no-daemon --spin-time 5", cwd=ros2_dir, env=env, @@ -175,16 +213,7 @@ def test_generated_ros2_package_works(self): time.sleep(1) if not (node_seen and topics_seen): - process_output = "" - if node_process.poll() is None: - node_process.terminate() - try: - node_process.wait(timeout=10) - except subprocess.TimeoutExpired: - node_process.kill() - node_process.wait(timeout=10) - if node_process.stdout is not None: - process_output = node_process.stdout.read() + process_output = self._terminate_process(node_process) self.fail( "Generated ROS2 node did not become discoverable.\n" f"ros2 node list output:\n{node_result.stdout}\n" @@ -193,21 +222,22 @@ def test_generated_ros2_package_works(self): echo_process = subprocess.Popen( [ - "/bin/bash", + shell_path, "-lc", - "source install/setup.bash && " + f"source {setup_script} && " "ros2 topic echo /result --once" ], cwd=ros2_dir, env=env, text=True, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + stderr=subprocess.STDOUT, + start_new_session=True) try: time.sleep(1) - self._bash( - "source install/setup.bash && " + self._run_shell( + f"source {setup_script} && " "ros2 topic pub --once /parameters " f"{self.PACKAGE_NAME}/msg/OptimizationParameters " "'{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}'", @@ -217,19 +247,13 @@ def test_generated_ros2_package_works(self): echo_stdout, _ = echo_process.communicate(timeout=60) finally: if echo_process.poll() is None: - echo_process.terminate() - echo_process.wait(timeout=10) + self._terminate_process(echo_process) self.assertIn("solution", echo_stdout) self.assertIn("solve_time_ms", echo_stdout) finally: if node_process.poll() is None: - node_process.terminate() - try: - node_process.wait(timeout=10) - except subprocess.TimeoutExpired: - node_process.kill() - node_process.wait(timeout=10) + self._terminate_process(node_process) if __name__ == '__main__': From ea7a2737e4436c85567ef03171af6892a43d1030 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 14:44:55 +0000 Subject: [PATCH 033/133] fix GA issues (hopefully) --- open-codegen/opengen/templates/ros2/CMakeLists.txt | 7 +++++++ open-codegen/test/test_ros2.py | 6 +++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/open-codegen/opengen/templates/ros2/CMakeLists.txt b/open-codegen/opengen/templates/ros2/CMakeLists.txt index 398512b2..5e786b5e 100644 --- a/open-codegen/opengen/templates/ros2/CMakeLists.txt +++ b/open-codegen/opengen/templates/ros2/CMakeLists.txt @@ -7,6 +7,13 @@ endif() find_package(ament_cmake REQUIRED) find_package(rclcpp REQUIRED) +set(Python3_FIND_VIRTUALENV FIRST) +if(NOT Python3_EXECUTABLE AND DEFINED ENV{VIRTUAL_ENV}) + set(_open_python3_executable "$ENV{VIRTUAL_ENV}/bin/python") + if(EXISTS "${_open_python3_executable}") + set(Python3_EXECUTABLE "${_open_python3_executable}") + endif() +endif() find_package(Python3 REQUIRED COMPONENTS Interpreter Development NumPy) set(Python_EXECUTABLE ${Python3_EXECUTABLE}) set(Python_INCLUDE_DIRS ${Python3_INCLUDE_DIRS}) diff --git a/open-codegen/test/test_ros2.py b/open-codegen/test/test_ros2.py index 573777dc..3ae53b81 100644 --- a/open-codegen/test/test_ros2.py +++ b/open-codegen/test/test_ros2.py @@ -1,8 +1,10 @@ import logging import os +import shlex import signal import shutil import subprocess +import sys import time import unittest @@ -166,10 +168,12 @@ def test_generated_ros2_package_works(self): ros2_dir = self.ros2_package_dir() env = self.ros2_test_env() shell_path, setup_script = self.ros2_shell() + python_executable = shlex.quote(sys.executable) self._run_shell( f"source {setup_script} >/dev/null 2>&1 || true; " - f"colcon build --packages-select {self.PACKAGE_NAME}", + f"colcon build --packages-select {self.PACKAGE_NAME} " + f"--cmake-args -DPython3_EXECUTABLE={python_executable}", cwd=ros2_dir, env=env, timeout=600) From 918ba05a388a33a9f422dfb2b3e9a97de4a397c4 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 14:53:28 +0000 Subject: [PATCH 034/133] CI issue: import empy --- ci/script.sh | 5 +++++ open-codegen/opengen/templates/ros2/CMakeLists.txt | 3 +++ 2 files changed, 8 insertions(+) diff --git a/ci/script.sh b/ci/script.sh index f188eb95..85a1a0b1 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -75,6 +75,11 @@ run_python_ros2_tests() { fi set -u + if ! python -c "import em" >/dev/null 2>&1; then + # rosidl_adapter imports the `em` module from Empy during message generation + python -m pip install empy + fi + command -v ros2 >/dev/null command -v colcon >/dev/null python -W ignore test/test_ros2.py -v diff --git a/open-codegen/opengen/templates/ros2/CMakeLists.txt b/open-codegen/opengen/templates/ros2/CMakeLists.txt index 5e786b5e..10d54220 100644 --- a/open-codegen/opengen/templates/ros2/CMakeLists.txt +++ b/open-codegen/opengen/templates/ros2/CMakeLists.txt @@ -7,6 +7,9 @@ endif() find_package(ament_cmake REQUIRED) find_package(rclcpp REQUIRED) + +# tells CMake's FindPython3 to prefer a venv if one is active +# (instead of the system-wide python) set(Python3_FIND_VIRTUALENV FIRST) if(NOT Python3_EXECUTABLE AND DEFINED ENV{VIRTUAL_ENV}) set(_open_python3_executable "$ENV{VIRTUAL_ENV}/bin/python") From e66d975104f24062e6c7f536384279c92dd3b221 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 15:00:40 +0000 Subject: [PATCH 035/133] trying to fix GA issues --- ci/script.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ci/script.sh b/ci/script.sh index 85a1a0b1..b8d988b7 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -75,9 +75,13 @@ run_python_ros2_tests() { fi set -u - if ! python -c "import em" >/dev/null 2>&1; then - # rosidl_adapter imports the `em` module from Empy during message generation - python -m pip install empy + if ! python -c "import em, lark, catkin_pkg" >/dev/null 2>&1; then + # ROS2 build helpers run under the active Python interpreter. The test venv + # already has NumPy from `pip install .`, but we also need the ROS-side + # Python packages used during interface and package metadata generation. + # Empy 4 has broken older ROS message generators in the past, so keep it + # on the 3.x API here. + python -m pip install "empy<4" lark catkin_pkg fi command -v ros2 >/dev/null From ec34d392f4d5385514c3d0fb9d7c1e4191cac200 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 15:24:44 +0000 Subject: [PATCH 036/133] rearrange GA jobs --- .github/workflows/ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 75a7237b..44e55725 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,7 +45,6 @@ jobs: name: Python tests (${{ matrix.name }}) needs: - rust_tests - - ros2_tests runs-on: ${{ matrix.os }} timeout-minutes: 45 strategy: @@ -106,7 +105,7 @@ jobs: ros2_tests: name: ROS2 tests - needs: rust_tests + needs: python_tests runs-on: ubuntu-latest timeout-minutes: 45 container: From 27a7a12957626c79730a35214fd16631dbc7099f Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 15:54:50 +0000 Subject: [PATCH 037/133] further testing of ROS2 --- open-codegen/test/test_ros2.py | 148 +++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) diff --git a/open-codegen/test/test_ros2.py b/open-codegen/test/test_ros2.py index 3ae53b81..6856b610 100644 --- a/open-codegen/test/test_ros2.py +++ b/open-codegen/test/test_ros2.py @@ -12,6 +12,154 @@ import opengen as og +class BuildConfigurationRos2TestCase(unittest.TestCase): + """Unit tests for ROS2-specific build configuration behavior.""" + + def test_with_ros2_sets_ros2_config_and_enables_c_bindings(self): + """`with_ros2` should store the ROS2 config and enable C bindings.""" + ros2_config = og.config.RosConfiguration().with_package_name("unit_test_ros2_pkg") + build_config = og.config.BuildConfiguration().with_ros2(ros2_config) + + self.assertIs(build_config.ros2_config, ros2_config) + self.assertIsNone(build_config.ros_config) + self.assertTrue(build_config.build_c_bindings) + + build_dict = build_config.to_dict() + self.assertIn("ros2_config", build_dict) + self.assertNotIn("ros_config", build_dict) + self.assertEqual("unit_test_ros2_pkg", build_dict["ros2_config"]["package_name"]) + + def test_ros_and_ros2_configs_clear_each_other(self): + """Selecting ROS1 or ROS2 should clear the other package configuration.""" + ros1_config = og.config.RosConfiguration().with_package_name("unit_test_ros_pkg") + ros2_config = og.config.RosConfiguration().with_package_name("unit_test_ros2_pkg") + build_config = og.config.BuildConfiguration() + + build_config.with_ros2(ros2_config) + self.assertIs(build_config.ros2_config, ros2_config) + self.assertIsNone(build_config.ros_config) + + build_config.with_ros(ros1_config) + self.assertIs(build_config.ros_config, ros1_config) + self.assertIsNone(build_config.ros2_config) + + build_config.with_ros2(ros2_config) + self.assertIs(build_config.ros2_config, ros2_config) + self.assertIsNone(build_config.ros_config) + + +class Ros2TemplateCustomizationTestCase(unittest.TestCase): + """Generation tests for custom ROS2 configuration values.""" + + TEST_DIR = ".python_test_build" + OPTIMIZER_NAME = "rosenbrock_ros2_custom" + PACKAGE_NAME = "custom_parametric_optimizer_ros2" + NODE_NAME = "custom_open_node_ros2" + DESCRIPTION = "custom ROS2 package for generation tests" + RESULT_TOPIC = "custom_result_topic" + PARAMS_TOPIC = "custom_params_topic" + RATE = 17.5 + RESULT_QUEUE_SIZE = 11 + PARAMS_QUEUE_SIZE = 13 + + @staticmethod + def get_open_local_absolute_path(): + """Return the absolute path to the local OpEn repository root.""" + cwd = os.getcwd() + return cwd.split('open-codegen')[0] + + @classmethod + def solverConfig(cls): + """Return a solver configuration shared by the ROS2 generation tests.""" + return Ros2BuildTestCase.solverConfig() + + @classmethod + def setUpCustomRos2PackageGeneration(cls): + """Generate a ROS2 package with non-default configuration values.""" + u = cs.MX.sym("u", 5) + p = cs.MX.sym("p", 2) + phi = og.functions.rosenbrock(u, p) + c = cs.vertcat(1.5 * u[0] - u[1], + cs.fmax(0.0, u[2] - u[3] + 0.1)) + bounds = og.constraints.Ball2(None, 1.5) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name(cls.OPTIMIZER_NAME) + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) \ + .with_penalty_constraints(c) + ros_config = og.config.RosConfiguration() \ + .with_package_name(cls.PACKAGE_NAME) \ + .with_node_name(cls.NODE_NAME) \ + .with_description(cls.DESCRIPTION) \ + .with_rate(cls.RATE) \ + .with_queue_sizes(cls.RESULT_QUEUE_SIZE, cls.PARAMS_QUEUE_SIZE) \ + .with_publisher_subtopic(cls.RESULT_TOPIC) \ + .with_subscriber_subtopic(cls.PARAMS_TOPIC) + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=cls.get_open_local_absolute_path()) \ + .with_build_directory(cls.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_build_c_bindings() \ + .with_ros2(ros_config) + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=cls.solverConfig()) \ + .build() + + @classmethod + def setUpClass(cls): + """Generate the custom ROS2 package once before running tests.""" + cls.setUpCustomRos2PackageGeneration() + + @classmethod + def ros2_package_dir(cls): + """Return the filesystem path to the generated custom ROS2 package.""" + return os.path.join( + cls.TEST_DIR, + cls.OPTIMIZER_NAME, + cls.PACKAGE_NAME) + + def test_custom_ros2_configuration_is_rendered_into_generated_files(self): + """Custom ROS2 config values should appear in the generated package files.""" + ros2_dir = self.ros2_package_dir() + + with open(os.path.join(ros2_dir, "package.xml"), encoding="utf-8") as f: + package_xml = f.read() + self.assertIn(f"{self.PACKAGE_NAME}", package_xml) + self.assertIn(f"{self.DESCRIPTION}", package_xml) + + with open(os.path.join(ros2_dir, "include", "open_optimizer.hpp"), encoding="utf-8") as f: + optimizer_header = f.read() + self.assertIn(f'#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_NODE_NAME "{self.NODE_NAME}"', + optimizer_header) + self.assertIn(f'#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_RESULT_TOPIC "{self.RESULT_TOPIC}"', + optimizer_header) + self.assertIn(f'#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_PARAMS_TOPIC "{self.PARAMS_TOPIC}"', + optimizer_header) + self.assertIn(f"#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_RATE {self.RATE}", + optimizer_header) + self.assertIn( + f"#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_RESULT_TOPIC_QUEUE_SIZE {self.RESULT_QUEUE_SIZE}", + optimizer_header) + self.assertIn( + f"#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_PARAMS_TOPIC_QUEUE_SIZE {self.PARAMS_QUEUE_SIZE}", + optimizer_header) + + with open(os.path.join(ros2_dir, "config", "open_params.yaml"), encoding="utf-8") as f: + params_yaml = f.read() + self.assertIn(f'result_topic: "{self.RESULT_TOPIC}"', params_yaml) + self.assertIn(f'params_topic: "{self.PARAMS_TOPIC}"', params_yaml) + self.assertIn(f"rate: {self.RATE}", params_yaml) + + with open(os.path.join(ros2_dir, "launch", "open_optimizer.launch.py"), encoding="utf-8") as f: + launch_file = f.read() + self.assertIn(f'package="{self.PACKAGE_NAME}"', launch_file) + self.assertIn(f'executable="{self.NODE_NAME}"', launch_file) + self.assertIn(f'name="{self.NODE_NAME}"', launch_file) + self.assertIn(f'FindPackageShare("{self.PACKAGE_NAME}")', launch_file) + + class Ros2BuildTestCase(unittest.TestCase): """Integration tests for auto-generated ROS2 packages.""" From 48d3bdfba752385d47621e1ba7268a85d46d614e Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 16:07:52 +0000 Subject: [PATCH 038/133] update changelog - towards opengen v0.11.0 --- open-codegen/CHANGELOG.md | 12 ++++++++++++ open-codegen/publish-pypi.sh | 0 2 files changed, 12 insertions(+) mode change 100644 => 100755 open-codegen/publish-pypi.sh diff --git a/open-codegen/CHANGELOG.md b/open-codegen/CHANGELOG.md index 75b7a86d..eb715853 100644 --- a/open-codegen/CHANGELOG.md +++ b/open-codegen/CHANGELOG.md @@ -8,6 +8,18 @@ and this project adheres to [Semantic Versioning](http://semver.org/). Note: This is the Changelog file of `opengen` - the Python interface of OpEn +## [0.11.0] - 2026-03-25 + +### Added + +- ROS2 package generation support via `BuildConfiguration.with_ros2(...)`, including auto-generated ROS2 templates, launcher, messages, and package wrapper code +- Dedicated ROS2 tests covering package generation, build configuration behavior, rendered custom package settings, and end-to-end execution of a generated ROS2 node + +### Changed + +- Extended `RosConfiguration` so it can be used for both ROS and ROS2 package generation + + ## [0.10.1] - 2026-03-25 diff --git a/open-codegen/publish-pypi.sh b/open-codegen/publish-pypi.sh old mode 100644 new mode 100755 From b828f2436fb68432b19c85f79b142de7768702b8 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 16:08:05 +0000 Subject: [PATCH 039/133] minor --- open-codegen/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/open-codegen/CHANGELOG.md b/open-codegen/CHANGELOG.md index eb715853..e6940a39 100644 --- a/open-codegen/CHANGELOG.md +++ b/open-codegen/CHANGELOG.md @@ -8,7 +8,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). Note: This is the Changelog file of `opengen` - the Python interface of OpEn -## [0.11.0] - 2026-03-25 +## [0.11.0] - Unreleased ### Added From 29f65f96ee019572ecdaf670d1b5d7cef50ecce8 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 16:08:29 +0000 Subject: [PATCH 040/133] bump version --- open-codegen/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/open-codegen/VERSION b/open-codegen/VERSION index 71172b43..05b845f9 100644 --- a/open-codegen/VERSION +++ b/open-codegen/VERSION @@ -1 +1 @@ -0.10.1 \ No newline at end of file +0.11.0a1 \ No newline at end of file From 7af46bad0a370fc70c0bc848044e01ac7fcd25c7 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 16:14:46 +0000 Subject: [PATCH 041/133] update publish-pypi.sh - warn if not on master and version not alpha - print version and branch --- open-codegen/publish-pypi.sh | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/open-codegen/publish-pypi.sh b/open-codegen/publish-pypi.sh index 1feca85b..ddaf594d 100755 --- a/open-codegen/publish-pypi.sh +++ b/open-codegen/publish-pypi.sh @@ -4,9 +4,30 @@ set -eu # This script facilitates releasing a new version of opengen to PyPI. # It expects a local virtual environment at ./venv with publishing tools. -echo "[OpEnGen] Checking out master" -git checkout master -git pull origin master +version=$(cat VERSION) +current_branch=$(git rev-parse --abbrev-ref HEAD) + +is_alpha_version=false +case "$version" in + *a[0-9]*) + is_alpha_version=true + ;; +esac + +if [ "$current_branch" != "master" ] && [ "$is_alpha_version" = false ]; then + echo "[OpEnGen] Warning: version $version is not an alpha release and the current branch is '$current_branch' (not 'master')." + printf "Proceed anyway? [y/N] " + read -r response + case "$response" in + [yY][eE][sS]|[yY]) + echo "[OpEnGen] Proceeding from branch '$current_branch'" + ;; + *) + echo "[OpEnGen] Publish cancelled" + exit 0 + ;; + esac +fi echo "[OpEnGen] Cleaning previous build artifacts" rm -rf ./build ./dist ./opengen.egg-info @@ -23,7 +44,7 @@ python -m build echo "[OpEnGen] Checking distributions with twine" python -m twine check dist/* -echo "[OpEnGen] Uploading to PyPI..." +echo "[OpEnGen] You are about to publish version $version from branch '$current_branch'." printf "Are you sure? [y/N] " read -r response case "$response" in @@ -37,6 +58,5 @@ case "$response" in esac echo "[OpEnGen] Don't forget to create a tag; run:" -version=$(cat VERSION) echo "\$ git tag -a opengen-$version -m 'opengen-$version'" echo "\$ git push --tags" From 874d2bbaa391a82fa3a6c76c077a0aba4812887e Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 18:36:05 +0000 Subject: [PATCH 042/133] [ci skip] update website docs --- docs/python-ros.md | 2 +- docs/python-ros2.mdx | 186 +++++++++++++++++++++++++++++++++++++++++++ website/sidebars.js | 1 + 3 files changed, 188 insertions(+), 1 deletion(-) create mode 100644 docs/python-ros2.mdx diff --git a/docs/python-ros.md b/docs/python-ros.md index f06fa078..43a7f4d8 100644 --- a/docs/python-ros.md +++ b/docs/python-ros.md @@ -2,7 +2,7 @@ id: python-ros title: Generation of ROS packages sidebar_label: ROS packages -description: Code generation for ROS packages using OpEn in Python +description: Code generation for ROS packages using opengen --- ## What is ROS diff --git a/docs/python-ros2.mdx b/docs/python-ros2.mdx new file mode 100644 index 00000000..e875716c --- /dev/null +++ b/docs/python-ros2.mdx @@ -0,0 +1,186 @@ +--- +id: python-ros2 +title: Generation of ROS2 packages +sidebar_label: ROS2 packages +description: Code generation for ROS2 packages using opengen +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::note Info +The functionality presented here was introduced in `opengen` version [`0.11.0a1`](https://pypi.org/project/opengen/#history). +::: + +## What is ROS2 + +[ROS2](https://docs.ros.org/en/jazzy/index.html) is the successor of the Robot Operating System (ROS). It provides tools, libraries, and communication mechanisms that make it easier to build distributed robotic applications. + +In ROS2, functionality is organised in **nodes** which exchange data by publishing and subscribing to **topics** using typed **messages**. This makes ROS2 a natural fit for connecting optimizers, controllers, estimators, and sensors in robotics systems. + +## ROS2 + OpEn + +OpEn can generate ready-to-use ROS2 packages directly from a parametric optimizer. The generated package exposes the optimizer as a ROS2 node, includes the required message definitions, and provides the files needed to build, configure, and launch it inside a ROS2 workspace. + +The input and output messages are the same as in the [ROS1 package documentation](./python-ros#messages). + +## Configuration Parameters + +The configuration parameters are the same as in the [ROS1 package documentation](./python-ros#configuration-parameters): you can configure the node rate, the input topic name, and the output topic name. + +In ROS2, these settings are stored using the ROS2 parameter-file format in `config/open_params.yaml`: + +```yaml +/**: + ros__parameters: + result_topic: "result" + params_topic: "parameters" + rate: 10 +``` + +## Code generation + +To generate a ROS2 package from Python, create a `RosConfiguration` object and attach it to the build configuration using `.with_ros2(...)`. + +### Example + +```py +import opengen as og +import casadi.casadi as cs + +u = cs.SX.sym("u", 5) +p = cs.SX.sym("p", 2) +phi = og.functions.rosenbrock(u, p) + +problem = og.builder.Problem(u, p, phi) \ + .with_constraints(og.constraints.Ball2(None, 1.5)) + +meta = og.config.OptimizerMeta() \ + .with_optimizer_name("rosenbrock_ros2") + +ros2_config = og.config.RosConfiguration() \ + .with_package_name("parametric_optimizer_ros2") \ + .with_node_name("open_node_ros2") \ + .with_rate(10) + +build_config = og.config.BuildConfiguration() \ + .with_build_directory("my_optimizers") \ + .with_ros2(ros2_config) + +builder = og.builder.OpEnOptimizerBuilder(problem, meta, build_config) +builder.build() +``` + +Note the use of `with_ros2` and note that `RosConfiguration` is the same config +class as in [ROS1](./python-ros). +This generates the optimizer in `my_optimizers/rosenbrock_ros2`, and the ROS2 +package is created inside that directory as `parametric_optimizer_ros2`. + + +## Use the auto-generated ROS2 package + +OpEn generates a `README.md` file inside the generated ROS2 package with detailed instructions. In brief, the workflow is: + +1. Build the package with `colcon build` +2. Source the generated workspace setup script +3. Run the node with `ros2 run` +4. Publish optimization requests on the input topic and read results from the output topic + +For example, from inside the generated package directory: + + + + +```bash +colcon build --packages-select parametric_optimizer_ros2 +source install/setup.bash +ros2 run parametric_optimizer_ros2 open_node_ros2 +``` + + + + +```bash +colcon build --packages-select parametric_optimizer_ros2 +source install/setup.zsh +ros2 run parametric_optimizer_ros2 open_node_ros2 +``` + + + + +In a second terminal: + + + + +```bash +source install/setup.bash +ros2 topic pub --once /parameters parametric_optimizer_ros2/msg/OptimizationParameters \ + "{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}" +ros2 topic echo /result +``` + + + + +```bash +source install/setup.zsh +ros2 topic pub --once /parameters parametric_optimizer_ros2/msg/OptimizationParameters \ + "{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}" +ros2 topic echo /result +``` + + + + +Instead of starting the node with `ros2 run`, you can also use the generated launch file: + +```bash +ros2 launch parametric_optimizer_ros2 open_optimizer.launch.py +``` + +
+ See the launch file + +

The launch file is as follows

+ + ```python + # file open_optimizer.launch.py + from launch import LaunchDescription + from launch.substitutions import PathJoinSubstitution + from launch_ros.actions import Node + from launch_ros.substitutions import FindPackageShare + + + def generate_launch_description(): + return LaunchDescription([ + Node( + package="custom_parametric_optimizer_ros2", + executable="custom_open_node_ros2", + name="custom_open_node_ros2", + output="screen", + parameters=[PathJoinSubstitution([ + FindPackageShare("custom_parametric_optimizer_ros2"), + "config", + "open_params.yaml", + ])], + ) + ]) + ``` +
+ +The launch file starts the auto-generated node and loads its parameters from `config/open_params.yaml`, where you can adjust settings such as the input topic, output topic, and node rate. + + +## Inside the ROS2 package + +The auto-generated ROS2 package contains everything needed to build and run the optimizer as a ROS2 node. + +- `msg/` contains the auto-generated message definitions, including `OptimizationParameters.msg` and `OptimizationResult.msg` +- `src/` contains the C++ node implementation that wraps the optimizer +- `include/` contains the corresponding C++ headers +- `config/open_params.yaml` stores runtime parameters such as the input topic, output topic, and node rate +- `launch/open_optimizer.launch.py` provides a ready-to-use ROS2 launch file +- `CMakeLists.txt` and `package.xml` define the ROS2 package and its build dependencies +- `README.md` contains package-specific build and usage instructions diff --git a/website/sidebars.js b/website/sidebars.js index 94729cf6..3b4c7d02 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -14,6 +14,7 @@ module.exports = { 'python-c', 'python-bindings', 'python-tcp-ip', + 'python-ros2', 'python-ros', 'python-examples', ], From 9d056552623f862b514fcd8d15b36fb238dff220 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 18:44:22 +0000 Subject: [PATCH 043/133] [ci skip] update ROS2 documentation --- docs/python-ros2.mdx | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/docs/python-ros2.mdx b/docs/python-ros2.mdx index e875716c..82ce1778 100644 --- a/docs/python-ros2.mdx +++ b/docs/python-ros2.mdx @@ -134,6 +134,41 @@ ros2 topic echo /result +If ROS2 cannot write to its default log directory, set an explicit writable log path before running the node: + +```bash +mkdir -p .ros_log +export ROS_LOG_DIR="$PWD/.ros_log" +``` + +:::note Troubleshooting +On some systems, the generated node may start but not appear in the ROS2 graph. If `ros2 topic pub` keeps printing `Waiting for at least 1 matching subscription(s)...`, set +`RMW_IMPLEMENTATION=rmw_fastrtps_cpp` in both terminals before sourcing the generated workspace and running any `ros2` commands: + +```bash +export RMW_IMPLEMENTATION=rmw_fastrtps_cpp +``` + +This should only be needed if ROS2 discovery is not working correctly with your default middleware. +::: + +To verify that the node is visible, you can run: + +```bash +ros2 node list --no-daemon --spin-time 5 +ros2 topic list --no-daemon --spin-time 5 +``` + +The first command should list the running node, for example `/open_node_ros2`. The second should list the available topics, including `/parameters` and `/result`. + +To read a single optimizer response, you can use: + +```bash +ros2 topic echo /result --once +``` + +This subscribes to the result topic, prints one `OptimizationResult` message, and then exits. + Instead of starting the node with `ros2 run`, you can also use the generated launch file: ```bash From 501a4e8cd00ff87c8490a6c36721afae439858f0 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 18:48:40 +0000 Subject: [PATCH 044/133] ros2: tighter testing - unit test for correctness of result --- open-codegen/test/test_ros2.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/open-codegen/test/test_ros2.py b/open-codegen/test/test_ros2.py index 6856b610..a415bb96 100644 --- a/open-codegen/test/test_ros2.py +++ b/open-codegen/test/test_ros2.py @@ -1,5 +1,6 @@ import logging import os +import re import shlex import signal import shutil @@ -402,6 +403,25 @@ def test_generated_ros2_package_works(self): self._terminate_process(echo_process) self.assertIn("solution", echo_stdout) + # A bit of integration testing: check whether the solver was able to + # solve the problem successfully + self.assertRegex( + echo_stdout, + r"solution:\s*\n(?:- .+\n)+", + msg=f"Expected a non-empty solution vector in result output:\n{echo_stdout}") + self.assertIn("status: 0", echo_stdout) + self.assertRegex( + echo_stdout, + r"inner_iterations:\s*[1-9]\d*", + msg=f"Expected a positive inner iteration count in result output:\n{echo_stdout}") + self.assertRegex( + echo_stdout, + r"outer_iterations:\s*[1-9]\d*", + msg=f"Expected a positive outer iteration count in result output:\n{echo_stdout}") + self.assertRegex( + echo_stdout, + r"cost:\s*-?\d+(?:\.\d+)?(?:e[+-]?\d+)?", + msg=f"Expected a numeric cost in result output:\n{echo_stdout}") self.assertIn("solve_time_ms", echo_stdout) finally: if node_process.poll() is None: From 3215c0ffadbccd5a0cc107f00dd852977f11de12 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 18:55:01 +0000 Subject: [PATCH 045/133] fix issue with ros2 rate - ros2 rate must be double, not int - update jinja2 template --- .../opengen/templates/ros2/open_params.yaml | 2 +- open-codegen/test/test_ros2.py | 217 ++++++++++-------- 2 files changed, 122 insertions(+), 97 deletions(-) diff --git a/open-codegen/opengen/templates/ros2/open_params.yaml b/open-codegen/opengen/templates/ros2/open_params.yaml index b1ae266e..adde7b45 100644 --- a/open-codegen/opengen/templates/ros2/open_params.yaml +++ b/open-codegen/opengen/templates/ros2/open_params.yaml @@ -2,4 +2,4 @@ ros__parameters: result_topic: "{{ros.publisher_subtopic}}" params_topic: "{{ros.subscriber_subtopic}}" - rate: {{ros.rate}} + rate: {{ "%.1f"|format(ros.rate) if ros.rate == (ros.rate|int) else ros.rate }} diff --git a/open-codegen/test/test_ros2.py b/open-codegen/test/test_ros2.py index a415bb96..af5617aa 100644 --- a/open-codegen/test/test_ros2.py +++ b/open-codegen/test/test_ros2.py @@ -304,35 +304,25 @@ def _terminate_process(process, timeout=10): stdout = "" return stdout or "" - def test_ros2_package_generation(self): - """Verify the ROS2 package files are generated.""" - ros2_dir = self.ros2_package_dir() - self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "package.xml"))) - self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "CMakeLists.txt"))) - self.assertTrue(os.path.isfile( - os.path.join(ros2_dir, "launch", "open_optimizer.launch.py"))) - - def test_generated_ros2_package_works(self): - """Build, run, and call the generated ROS2 package.""" - ros2_dir = self.ros2_package_dir() - env = self.ros2_test_env() - shell_path, setup_script = self.ros2_shell() + def _build_generated_package(self, ros2_dir, env): + """Build the generated ROS2 package with the active Python executable.""" python_executable = shlex.quote(sys.executable) - self._run_shell( - f"source {setup_script} >/dev/null 2>&1 || true; " + f"source {self.ros2_shell()[1]} >/dev/null 2>&1 || true; " f"colcon build --packages-select {self.PACKAGE_NAME} " f"--cmake-args -DPython3_EXECUTABLE={python_executable}", cwd=ros2_dir, env=env, timeout=600) - node_process = subprocess.Popen( + def _spawn_ros_process(self, command, ros2_dir, env): + """Start a long-running ROS2 command in a fresh process group.""" + shell_path, setup_script = self.ros2_shell() + return subprocess.Popen( [ shell_path, "-lc", - f"source {setup_script} && " - f"ros2 run {self.PACKAGE_NAME} {self.NODE_NAME}" + f"source {setup_script} && {command}" ], cwd=ros2_dir, env=env, @@ -341,92 +331,127 @@ def test_generated_ros2_package_works(self): stderr=subprocess.STDOUT, start_new_session=True) + def _wait_for_node_and_topics(self, ros2_dir, env): + """Wait until the generated ROS2 node and its topics become discoverable.""" + _, setup_script = self.ros2_shell() + node_result = None + topic_result = None + for _ in range(6): + node_result = self._run_shell( + f"source {setup_script} && " + "ros2 node list --no-daemon --spin-time 5", + cwd=ros2_dir, + env=env, + timeout=30, + check=False) + topic_result = self._run_shell( + f"source {setup_script} && " + "ros2 topic list --no-daemon --spin-time 5", + cwd=ros2_dir, + env=env, + timeout=30, + check=False) + node_seen = f"/{self.NODE_NAME}" in node_result.stdout + topics_seen = "/parameters" in topic_result.stdout and "/result" in topic_result.stdout + if node_seen and topics_seen: + return + time.sleep(1) + + self.fail( + "Generated ROS2 node did not become discoverable.\n" + f"ros2 node list output:\n{node_result.stdout if node_result else ''}\n" + f"ros2 topic list output:\n{topic_result.stdout if topic_result else ''}") + + def _assert_result_message(self, echo_stdout): + """Assert that the echoed result message indicates a successful solve.""" + self.assertIn("solution", echo_stdout) + # A bit of integration testing: check whether the solver was able to + # solve the problem successfully. + self.assertRegex( + echo_stdout, + r"solution:\s*\n(?:- .+\n)+", + msg=f"Expected a non-empty solution vector in result output:\n{echo_stdout}") + self.assertIn("status: 0", echo_stdout) + self.assertRegex( + echo_stdout, + r"inner_iterations:\s*[1-9]\d*", + msg=f"Expected a positive inner iteration count in result output:\n{echo_stdout}") + self.assertRegex( + echo_stdout, + r"outer_iterations:\s*[1-9]\d*", + msg=f"Expected a positive outer iteration count in result output:\n{echo_stdout}") + self.assertRegex( + echo_stdout, + r"cost:\s*-?\d+(?:\.\d+)?(?:e[+-]?\d+)?", + msg=f"Expected a numeric cost in result output:\n{echo_stdout}") + self.assertIn("solve_time_ms", echo_stdout) + + def _exercise_running_optimizer(self, ros2_dir, env): + """Publish one request and verify that one valid result message is returned.""" + _, setup_script = self.ros2_shell() + echo_process = self._spawn_ros_process("ros2 topic echo /result --once", ros2_dir, env) + try: - node_seen = False - topics_seen = False - for _ in range(6): - node_result = self._run_shell( - f"source {setup_script} && " - "ros2 node list --no-daemon --spin-time 5", - cwd=ros2_dir, - env=env, - timeout=30, - check=False) - topic_result = self._run_shell( - f"source {setup_script} && " - "ros2 topic list --no-daemon --spin-time 5", - cwd=ros2_dir, - env=env, - timeout=30, - check=False) - node_seen = f"/{self.NODE_NAME}" in node_result.stdout - topics_seen = "/parameters" in topic_result.stdout and "/result" in topic_result.stdout - if node_seen and topics_seen: - break - time.sleep(1) - - if not (node_seen and topics_seen): - process_output = self._terminate_process(node_process) - self.fail( - "Generated ROS2 node did not become discoverable.\n" - f"ros2 node list output:\n{node_result.stdout}\n" - f"ros2 topic list output:\n{topic_result.stdout}\n" - f"node process output:\n{process_output}") - - echo_process = subprocess.Popen( - [ - shell_path, - "-lc", - f"source {setup_script} && " - "ros2 topic echo /result --once" - ], + time.sleep(1) + self._run_shell( + f"source {setup_script} && " + "ros2 topic pub --once /parameters " + f"{self.PACKAGE_NAME}/msg/OptimizationParameters " + "'{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}'", cwd=ros2_dir, env=env, - text=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - start_new_session=True) + timeout=60) + echo_stdout, _ = echo_process.communicate(timeout=60) + finally: + if echo_process.poll() is None: + self._terminate_process(echo_process) - try: - time.sleep(1) - self._run_shell( - f"source {setup_script} && " - "ros2 topic pub --once /parameters " - f"{self.PACKAGE_NAME}/msg/OptimizationParameters " - "'{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}'", - cwd=ros2_dir, - env=env, - timeout=60) - echo_stdout, _ = echo_process.communicate(timeout=60) - finally: - if echo_process.poll() is None: - self._terminate_process(echo_process) - - self.assertIn("solution", echo_stdout) - # A bit of integration testing: check whether the solver was able to - # solve the problem successfully - self.assertRegex( - echo_stdout, - r"solution:\s*\n(?:- .+\n)+", - msg=f"Expected a non-empty solution vector in result output:\n{echo_stdout}") - self.assertIn("status: 0", echo_stdout) - self.assertRegex( - echo_stdout, - r"inner_iterations:\s*[1-9]\d*", - msg=f"Expected a positive inner iteration count in result output:\n{echo_stdout}") - self.assertRegex( - echo_stdout, - r"outer_iterations:\s*[1-9]\d*", - msg=f"Expected a positive outer iteration count in result output:\n{echo_stdout}") - self.assertRegex( - echo_stdout, - r"cost:\s*-?\d+(?:\.\d+)?(?:e[+-]?\d+)?", - msg=f"Expected a numeric cost in result output:\n{echo_stdout}") - self.assertIn("solve_time_ms", echo_stdout) + self._assert_result_message(echo_stdout) + + def test_ros2_package_generation(self): + """Verify the ROS2 package files are generated.""" + ros2_dir = self.ros2_package_dir() + self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "package.xml"))) + self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "CMakeLists.txt"))) + self.assertTrue(os.path.isfile( + os.path.join(ros2_dir, "launch", "open_optimizer.launch.py"))) + + def test_generated_ros2_package_works(self): + """Build, run, and call the generated ROS2 package.""" + ros2_dir = self.ros2_package_dir() + env = self.ros2_test_env() + self._build_generated_package(ros2_dir, env) + + node_process = self._spawn_ros_process( + f"ros2 run {self.PACKAGE_NAME} {self.NODE_NAME}", + ros2_dir, + env) + + try: + self._wait_for_node_and_topics(ros2_dir, env) + self._exercise_running_optimizer(ros2_dir, env) finally: if node_process.poll() is None: self._terminate_process(node_process) + def test_generated_ros2_launch_file_works(self): + """Build the package, launch the node, and verify the launch file works.""" + ros2_dir = self.ros2_package_dir() + env = self.ros2_test_env() + self._build_generated_package(ros2_dir, env) + + launch_process = self._spawn_ros_process( + f"ros2 launch {self.PACKAGE_NAME} open_optimizer.launch.py", + ros2_dir, + env) + + try: + self._wait_for_node_and_topics(ros2_dir, env) + self._exercise_running_optimizer(ros2_dir, env) + finally: + if launch_process.poll() is None: + self._terminate_process(launch_process) + if __name__ == '__main__': logging.getLogger('retry').setLevel(logging.ERROR) From 66f633792306edc85e6e2d0aa8fdd41d9ff55684 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 18:56:34 +0000 Subject: [PATCH 046/133] update ros2 docs --- docs/python-ros2.mdx | 16 +++++----- open-codegen/opengen/templates/ros2/README.md | 29 +++++++++++++++++-- 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/docs/python-ros2.mdx b/docs/python-ros2.mdx index 82ce1778..a5339462 100644 --- a/docs/python-ros2.mdx +++ b/docs/python-ros2.mdx @@ -35,7 +35,7 @@ In ROS2, these settings are stored using the ROS2 parameter-file format in `conf ros__parameters: result_topic: "result" params_topic: "parameters" - rate: 10 + rate: 10.0 ``` ## Code generation @@ -118,7 +118,7 @@ In a second terminal: source install/setup.bash ros2 topic pub --once /parameters parametric_optimizer_ros2/msg/OptimizationParameters \ "{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}" -ros2 topic echo /result +ros2 topic echo /result --once ``` @@ -128,7 +128,7 @@ ros2 topic echo /result source install/setup.zsh ros2 topic pub --once /parameters parametric_optimizer_ros2/msg/OptimizationParameters \ "{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}" -ros2 topic echo /result +ros2 topic echo /result --once ``` @@ -191,18 +191,18 @@ ros2 launch parametric_optimizer_ros2 open_optimizer.launch.py def generate_launch_description(): return LaunchDescription([ Node( - package="custom_parametric_optimizer_ros2", - executable="custom_open_node_ros2", - name="custom_open_node_ros2", + package="parametric_optimizer_ros2", + executable="open_node_ros2", + name="open_node_ros2", output="screen", parameters=[PathJoinSubstitution([ - FindPackageShare("custom_parametric_optimizer_ros2"), + FindPackageShare("parametric_optimizer_ros2"), "config", "open_params.yaml", ])], ) ]) - ``` + ``` The launch file starts the auto-generated node and loads its parameters from `config/open_params.yaml`, where you can adjust settings such as the input topic, output topic, and node rate. diff --git a/open-codegen/opengen/templates/ros2/README.md b/open-codegen/opengen/templates/ros2/README.md index b8e768cc..5862a1f0 100644 --- a/open-codegen/opengen/templates/ros2/README.md +++ b/open-codegen/opengen/templates/ros2/README.md @@ -10,7 +10,7 @@ From within the folder `{{ros.package_name}}`, compile with: ```bash colcon build --packages-select {{ros.package_name}} source install/setup.bash -# or source install/setup.zsh on MacOS +# or source install/setup.zsh if you are using zsh ``` If you want to activate logging (recommended), do @@ -33,6 +33,22 @@ source install/setup.bash ros2 run {{ros.package_name}} {{ros.node_name}} ``` +If ROS2 cannot write to its default log directory, set an explicit writable log +path: + +```bash +mkdir -p .ros_log +export ROS_LOG_DIR="$PWD/.ros_log" +``` + +If the node starts but does not appear in the ROS2 graph, try forcing Fast DDS +in both terminals before sourcing the generated workspace and running any +`ros2` commands: + +```bash +export RMW_IMPLEMENTATION=rmw_fastrtps_cpp +``` + In a second terminal, source the same environment and verify discovery: ```bash @@ -58,7 +74,7 @@ The result will be announced on the configured result topic (default: `/{{ros.publisher_subtopic}}`): ```bash -ros2 topic echo /{{ros.publisher_subtopic}} +ros2 topic echo /{{ros.publisher_subtopic}} --once ``` To get the optimal solution you can do: @@ -67,6 +83,15 @@ To get the optimal solution you can do: ros2 topic echo /{{ros.publisher_subtopic}} --field solution ``` +You can also start the node using the generated launch file: + +```bash +ros2 launch {{ros.package_name}} open_optimizer.launch.py +``` + +The launch file loads its runtime parameters from +[`config/open_params.yaml`](config/open_params.yaml). + ## Messages From 25868c7d24ab14008c3978875963fc9425c982fd Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Wed, 25 Mar 2026 21:44:19 +0000 Subject: [PATCH 047/133] [docit][ci skip] update website docs - update docs on ros2 - promote ros2 on main page - include robot icon from flaticon - lang support for msg in prism --- docs/python-ros2.mdx | 76 +++++++++++-------- website/src/css/custom.css | 75 +++++++++++++++++- website/src/pages/index.js | 58 ++++++++++++++ website/src/theme/prism-include-languages.js | 38 ++++++++++ website/static/img/ros2-robot.png | Bin 0 -> 14224 bytes 5 files changed, 214 insertions(+), 33 deletions(-) create mode 100644 website/src/theme/prism-include-languages.js create mode 100644 website/static/img/ros2-robot.png diff --git a/docs/python-ros2.mdx b/docs/python-ros2.mdx index a5339462..73a7b000 100644 --- a/docs/python-ros2.mdx +++ b/docs/python-ros2.mdx @@ -167,44 +167,58 @@ To read a single optimizer response, you can use: ros2 topic echo /result --once ``` -This subscribes to the result topic, prints one `OptimizationResult` message, and then exits. +This subscribes to the result topic, prints one `OptimizationResult` message, and then exits. +The above command will return a message that looks as follows -Instead of starting the node with `ros2 run`, you can also use the generated launch file: - -```bash -ros2 launch parametric_optimizer_ros2 open_optimizer.launch.py +```yaml +solution: +- 0.5352476095477849 +- 0.8028586510585609 +- 0.6747818561706652 +- 0.7747513439588263 +- 0.5131839675113338 +inner_iterations: 41 +outer_iterations: 6 +status: 0 +cost: 1.1656771801253916 +norm_fpr: 2.1973496274068953e-05 +penalty: 150000.0 +lagrange_multipliers: [] +infeasibility_f1: 0.0 +infeasibility_f2: 3.3074097972366455e-05 +solve_time_ms: 0.2175 ```
- See the launch file - -

The launch file is as follows

- - ```python - # file open_optimizer.launch.py - from launch import LaunchDescription - from launch.substitutions import PathJoinSubstitution - from launch_ros.actions import Node - from launch_ros.substitutions import FindPackageShare - - - def generate_launch_description(): - return LaunchDescription([ - Node( - package="parametric_optimizer_ros2", - executable="open_node_ros2", - name="open_node_ros2", - output="screen", - parameters=[PathJoinSubstitution([ - FindPackageShare("parametric_optimizer_ros2"), - "config", - "open_params.yaml", - ])], - ) - ]) + See the specification of `OptimizationResult` + ```msg + # Constants match the enumeration of status codes + uint8 STATUS_CONVERGED=0 + uint8 STATUS_NOT_CONVERGED_ITERATIONS=1 + uint8 STATUS_NOT_CONVERGED_OUT_OF_TIME=2 + uint8 STATUS_NOT_CONVERGED_COST=3 + uint8 STATUS_NOT_CONVERGED_FINITE_COMPUTATION=4 + + float64[] solution # solution + uint8 inner_iterations # number of inner iterations + uint16 outer_iterations # number of outer iterations + uint8 status # status code + float64 cost # cost value at solution + float64 norm_fpr # norm of FPR of last inner problem + float64 penalty # penalty value + float64[] lagrange_multipliers # vector of Lagrange multipliers + float64 infeasibility_f1 # infeasibility wrt F1 + float64 infeasibility_f2 # infeasibility wrt F2 + float64 solve_time_ms # solution time in ms ```
+Instead of starting the node with `ros2 run`, you can also use the generated launch file: + +```bash +ros2 launch parametric_optimizer_ros2 open_optimizer.launch.py +``` + The launch file starts the auto-generated node and loads its parameters from `config/open_params.yaml`, where you can adjust settings such as the input topic, output topic, and node rate. diff --git a/website/src/css/custom.css b/website/src/css/custom.css index 09bce1f7..6e225c5e 100644 --- a/website/src/css/custom.css +++ b/website/src/css/custom.css @@ -197,7 +197,7 @@ body { .homeCodeBlock { background: var(--open-page-surface); border: 1px solid var(--open-page-border); - border-radius: 24px; + border-radius: 15px; box-shadow: var(--open-page-shadow); } @@ -368,6 +368,72 @@ body { width: 100%; } +.homeRos2Promo { + display: grid; + grid-template-columns: minmax(0, 1.05fr) minmax(320px, 0.95fr); + gap: 1.5rem; + align-items: center; + width: min(1100px, calc(100% - 2rem)); + margin: 0 auto; + padding: 2rem; + background: + linear-gradient(145deg, rgba(164, 62, 53, 0.88), rgba(141, 33, 183, 0.92)), + #843129; + border: 1px solid rgba(255, 224, 204, 0.2); + border-radius: 28px; + box-shadow: var(--open-page-shadow); +} + +.homeRos2Promo__content, +.homeRos2Promo__code { + min-width: 0; +} + +.homeRos2Promo__content h2 { + margin: 0 0 0.85rem; + color: #fff8f3; + font-size: clamp(2rem, 4vw, 3rem); + line-height: 1.08; +} + +.homeRos2Promo__content p { + color: rgba(255, 248, 243, 0.88); +} + +.homeRos2Promo__robot { + display: block; + width: 200px; + height: 200px; + margin: 0 auto 1rem; +} + +.homeRos2Promo__attribution { + margin: -0.4rem 0 0.9rem; + text-align: center; + font-size: 0.68rem; + line-height: 1.25; +} + +.homeRos2Promo__attribution a { + color: rgba(255, 248, 243, 0.82); + text-decoration: none; +} + +.homeRos2Promo__attribution a:hover { + color: #fff8f3; + text-decoration: underline; +} + +.homeRos2Promo__codeBlock { + margin-top: 0; + background: rgba(255, 248, 243, 0.96); + border-color: rgba(255, 224, 204, 0.3); +} + +.homeRos2Promo__codeBlock .theme-code-block { + margin-bottom: 0; +} + .homeSplit__copy, .homeSplit__media { min-width: 0; @@ -617,7 +683,8 @@ body { max-width: none; } - .homeOcpPromo { + .homeOcpPromo, + .homeRos2Promo { grid-template-columns: 1fr; } } @@ -640,4 +707,8 @@ body { width: 64px; height: 64px; } + + .homeRos2Promo { + padding: 1.5rem; + } } diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 8a0fa34d..4fad6ce5 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -35,6 +35,15 @@ builder = og.builder.OpEnOptimizerBuilder( ) builder.build()`; +const ros2PromoCode = String.raw`ros2_config = og.config.RosConfiguration() \ + .with_package_name("my_ros_pkg") \ + .with_node_name("open_node_ros2") \ + .with_rate(10.0) + +build_config = og.config.BuildConfiguration() \ + .with_build_directory("my_optimizers") \ + .with_ros2(ros2_config)`; + const heroStats = [ {label: 'Core language', value: 'Rust'}, {label: 'Primary uses', value: 'MPC, MHE, Robotics'}, @@ -112,6 +121,7 @@ export default function Home() { const promoGif = assetUrl('img/open-promo.gif'); const boxLogo = assetUrl('img/box.png'); const ocpStatesImage = assetUrl('img/ocp-states.png'); + const ros2RobotImage = assetUrl('img/ros2-robot.png'); const [zoomedImage, setZoomedImage] = useState(null); useEffect(() => { @@ -352,6 +362,54 @@ export default function Home() { + +
+
+
+

New in opegen 0.11

+

ROS2 packages

+

+ OpEn can now generate ROS2 packages directly from a parametric + optimizer. The generated package includes ROS2 messages, + configuration files, a launch file, and a node that exposes the + solver through topics. +

+

+ This makes it easy to connect optimization-based controllers, + estimators, and planning modules into a modern robotics stack + without writing the ROS2 wrapper code by hand. +

+
+ + Learn more + + + Legacy ROS1 + +
+
+
+ Cartoon robot icon +

+ + Bot icons created by pbig - Flaticon + +

+
+ {ros2PromoCode} +
+
+
+
{zoomedImage ? (
{ + if (lang === 'php') { + require('prismjs/components/prism-markup-templating.js'); + } + require(`prismjs/components/prism-${lang}`); + }); + + registerMsgLanguage(PrismObject); + + delete globalThis.Prism; + if (typeof PrismBefore !== 'undefined') { + globalThis.Prism = PrismObject; + } +} diff --git a/website/static/img/ros2-robot.png b/website/static/img/ros2-robot.png new file mode 100644 index 0000000000000000000000000000000000000000..f5ec4f946a84a49a2f8f0c1b9bfc5dc667d9c129 GIT binary patch literal 14224 zcmbtbWm_Cgu$@I03yVu|CqU2y2<|Sy-8DdP2(H0`yIYXp?(Xgm!3pl}+noT8lQt zWJ{5~C4Vnv5JD~}x!7$a>|2Qst#|Gpfyb0+Epu4@Z|&;ULS}wib&kndX7%vt#S`_g zSX;HQNk_N{@Uy9osI#d+<4Lm2ClOO`##d+tnDghDp77bN)3RQHa0d+*h}t!-yG&>N$GsE z(YQ1|nlEz{*JCGYfb{U)fZS?YOYHpE+m* zoUPl$XI|Ujkod%s^48rC`K_*IlTLMe1PlLk$>iq$lV;h59sST#?_`|WchML39D~{i z7m=Gz=u-Ve-EE^N|(9! zlQf&b z&RodBa4;Bk_-4n|Pp3~OyHl}Wx1;38=@j+cOb4tScNy>h!7XH>^>ZkM&;j9&$McO! z4GD7*ExUIeZ(|oziH-x2#Ka>>%i>ZPpOCsa?n z&8goV_%=%%uNI_&epnTMAVCmqrb=@bSEL?`;^N|AGJdXVmH4EAKtm3K(N)|<_P|B0 z&);>7YX42c8An3x%Aj9WRlYo#)zmOc?glk^vC4SXL|US?y0qZ?;|{fGoA^IbguW`P z1&gqJBq418K2qfag{pnvgegs^Osc;ygD-_(bBgJ5#OG-gDTHZkBIw%#^-Wi?N$bvI z|Mx0EdNCS6W(kg9RSu3I#s;p`fYLGy-503Qz-UO@VQ|4DDAC~t=hOUD1H^=@k{7Ba zcV_b@k+UV~5+P|M2-tW~9Y}p!h;+CzVsZa9m!rYxJK0V4e{Bm%hn^$*{Z^==`yhv0 z286-yZrN3`T2U*CLQPH-TV`O|+jmO{S}RwB$WQ4B6ovp53Nfkz!o=hwpe_Eq=EV-6 zRYV7cAwnO3)sTz9qgK?^DhMTnmg1z)BGd*+GXz|*Ug6&|{y1NVnQSmOu=ujDGFFic z@!{{>`PP=-RURhPv@lG;lh@+X*+Iu;m$yd^Qn~R|i&p(xc&?YT!7c9}GCp`P^dUd1 z!oP5XBu~loWVz00B6F9!U?`4U)pX)^#N|xG#pNi*_1fmCfxyw{b$XTnQCv^jN3Het za1=$~>tBFZ!$~^JL*~G^R%MlI0J%KtNKcKq!cyS^5+iCY48GX+#?9Y*Y%}X37pAC; zltNQeiI!9b;+Pu|07-;++4}2s;W~3E<^Xk|8t}XGX2IJDEks~~>f;AqV{a2fgpfJ* z%vk-WFTa$-QM1EdUhybqtM`9aIWQ_dT-@=mu&zDI8OMJ`K@yPBr zofuoqNBr&qWX(}j zU43;c93cDE~P}VR%+T&a?_tP3=I3# z-cTkiS_OV`T?HaOhrEK9AKK1~_M_n@Y#_5n405&gw@E%9Vk9@2h+OYRIW=ru|aydBC2RmahnfMF8wWL22`e7N(`JU8XJsxR^Rq&>Uh28 zuQLk}(T`Wo$LjR;QqIm;vD@!`-fVi}4{F>^e+NA2Gd;a_kVdsvAL$qofPl}V<){Df zY#Ia}Lp61j*>eg{l%oV5w@x}*Z+KVgL8LxNHH{ZB5~mMtYaNnEIY*!!{QTJ`FIP-* z!is$j?^m&39t}1t)h&;tYj_d1p7n`&3r}WLi7|_rWh2D`ZpLjF`vGlO-wMW(+l(IE zN{~OgzP6%#{LAmQT#Cb&6(RB3ShcH6nvS_rr|_`d*wqQ5x%^McM(4s1o8Q-5=FoS@@XRceK|d-b^v zf!BEq!Pb^cCh=lscIkz{>%7tOeC>N6MDVQRq6gk}ea&MPjpplcZr&L4S@Gk6Yof2) zq?5rFioW~j%Nl)oJX2QHnp^g`KXrAQQs^G&Ss}VtukY^=bpui+Y%MG%mBSq+yZ9vg zWW8QSIa$b08?r)v9ER7J!2}u)V6n8{hvGKM!zT~4kTzXt-i*$e4_$^WwAz1qnre6? ztzkV)tVkU#vYFCYYE4aD-gbKvCQwq=Ucrq>nt%r?x9ry+Ixvj*udFeuc+D@oKbenP zt(6Jzk32n;`E1A04)i#G=))86wtkCI2Ea3BV_8z7pd}?0l7?dati1|lB@3HW5ncr* zv}J)R?`2lZToPckTv>P5k8h6(j+`(8@VIMle|d~*E1lP=Ap&|6BX|aU{;@lPhhc!4 z?R)F(IWNT7(Is|J$b!DZ(CgD#TOrMc17*J$9w!l?hyobB`N8fEMQZXrE!O^x%v!Be zPz_H*2V&Ik+EpHk-s>~#;?c~CaCsGgneX)3W}P4`hyB zsed4-g^w0N4KPDI$zri|FrQezfNe?bvZgDF-j}B^)Ob25cKvv^RDsLH!_(xX+JVGc zYvtiFu^v|-%aojazf%HIh`A>6~R2f9?Lwr9<5= zQMat0NbzmGaT}6A=n(spX{Q^l!uVEMnm#cer7WQYouj;SqCkVU`}*f^kE3hb+G%Mh z;-K0(qLTzMxs<61M9h|LyqOWyrB9YbhC1QCxAm|N1NA9hD~rnJck`IMYwnC(^kG~q zO_FiM%ZSk;Ax#Vx?dzK&h%}gkrj-fl95()8{gGfEZUUe)B)xI-kI1hM?=oB{}dN$&}MhboEx1QdI8^p74}ZY~gM&(Izcz*H85q8IXu!quJ=7o?C-bzU_}AQdc`m-TtOulCJ;!%zJuR zyzgOsw7%|Cy?3TeZ1beEfK>X6j=-;@iiW2aW{^B6+s4p_r17YsjmG-O)K-Ag{ znA1UcbzHsTdzH4xU-)snK1sAC*axqy&2P=2r$G4vUQ#|dhW`!ccjqkjVWVf z$zo(X?X4TjQRw+t_S&~T*Z9Yi=q&tX-)PRfKA&vs(5BLTf&iT?;ks|HM@_m6dJQq) zfJ8Qm-0?_;^|{(N$Ms{hwE703wPp_q<;vi!gfDK@G72c;~oD5Z87jLuAXmJ`idt5 z+H8?;C=`%T6+RdkFyEVkTrWte=nfS?wL26jlu4v#!A~J1n5)Bzddp(=95YDQBoj?P z=2CsyfkiY5*1HISYu;rJb7_4&gKjoj7iQJVz0OzkA9h3skUqchEv!B*Zlhz5PMR}j z;|NA-dL7^h(2{@OPr2EX;2>0Z-rlq2S5(uiBdgrBjCay|8fxeuw6fyXO2ZPm5_KNy zoloPxn+r4)D+q2tXy)O$eLjD$pP4G_@?@q6Q>5t+c;7A>`#dUsga+DlgO>Syqq%(} zX|bW@K4h3dkXK4ZdcGrD1%929yuY6v$32CF^U!>dw&LU_1nRc&1n%cc%g3>B4wkVo zheXwvY83dDK9?7pjqDW741==z4Yr!$!Xg(1c z*$URnwLl?5*Kt|Gx%0^MOtVa7L1q8#V8Cvk}VPR|4H_pVYe!A9f znW$1s=k+NFa#-q^cq-sAb*T)US+e|J^@+xd%@+@;upW1TTHsOC2}kmLxR^$tm&f38 zLZl`cr2oRC)K3vL3Yo`?NYP@VEOI3S!G%MRe3fDrFnh>GVv_pD`#(}iN=@$6L!LvV;Si`bngeJBezY#}R+F#BoeKa_= zE+b5@v63Y|wp}w#(C3en+Hi*vkncCqX8ySEo=P+>Rf;zrG1|d-bnJ3_Rgn zjfA2Tz7JM=M0}5cp<Y?nwars@}uo(hz zg%m4O3fFa0K!u7#{D-rerE((uI&L-Gmb%k?IRS!CoBjA&pdDXh89d!>`Wt7XQCf;0 znF~(EFTE7kq((&obF$yuE5A=eT`_Zq3TxL|+KQHo{+#=@HR}Dzhmrx?Pjber?66cW z$ddvKwrig27DHduCXo&BiP|k5WbN4=l#$%oK~y1-&;dbF$@JsD(^G4#q=FqtABhXx zu9I$NT3ixUN2d&Zv3p0aT6pgCe69^1eoT~*R<0Bk7p7edu%9I3a)knIOB@f6*2=f!rti>=7uf## zwRvbD?L$dHI@@UNh;fAThx~>1yqfL7)ZO2oRHf05?}4u}&bo3Qj3j4F^!=lDv7h4) zQ_g#lsz*uZI$9sHz>_7FW}#|;=Cu9m3tCDo_)xdOOtvNl z(Jn#3*H3oq2Gju9APOU*qvd-~P}1c!PCu0v$OH^K+iHEmG=1}mA=+l<)y(Ph21_vY zgJ|gV6m2XxGM-^KoaQb5gYaY|QruC?$cJI|#Zb}C4?-_NumE7R$TD`Our_W z+T*lkY9Fbds6H^fwFk*({&UzwqBG#br$~)+Y~nEKf;syEe3n&fdDPPs=Y0&#A^-!4 zOb?|Fs}-zM*_9bMp`EqQd(UZ!@C>=Xy_!1hi6j?%2;e51H@^fRqk~3267=KIa@st& zkn}NptA#n=T&xxoh$mTxJI`JpA zO!;%KwA?)RMn&D99QFx4cqZeGStw(jWvp|Y8CAsfmDecMEPLkD@m*5s$1gPdNZ%(> zwyv{rIxSO8FZuK1&srTRFpg)9#i=m0=JnWRDW<^w!`RvwD59@Xbwlx1+m9;+f`K0sR7t**p9yV-)qb!>5s#TXdWS z7x!YgLa}C&bl*vGAUgXk_qo%E&S4{CYSny42|Ay@wsx+qt}zah3E@5p{j~?^J52#|PUNy|<%L z?OH*rWf~It)fCb1v}b%bbuVu%vzaRn$7Ln>6@LuraB;U{fTI zb)ssHBl*&NETmQPd!!)nB>&|3XE*^H96NjMWC%2K=(3uMk`fjgs{~y-7(EMB8oA=? zg9us+vRL3mLG3RxXcRE;jNQl-fn~FQ1C$!C?_;U09H_LuDk>?Q$Mm5zI0oUk8C7X) z-23D?ytZ8qf5!6;a$f144#umkPSLk+x88=uy-3w+= zu2RHA&i?oOZ;y3`JEa~S<%9qv0z0-3aZ%`|_~I2a`itkI##c-?5cLHWgSI!ht|_TN zU`$IxsF?|b;yyD3Qc^A;>!S2}bx!r+U4JXg212s5yJg8!o(LEt)iG=%N_?o^8IYZyYj39|sHCVrHuH%&3OyZu*rT)j|jv z!Vd}`_Ww3vbse|7>S#GU<4%PW3FUVJs5wgP3f$a2ubu7;#$l{49KXWuSE5a;l@%YE zVZXdI_+t3OSb3uxZsAf78`}~GNy1Mgs{x}%s-I6p?KJpY2|)Ok(E2ziTOy zRB%o(gY279Qe#Q_Xa~(SpYP~Nnctz}yf@YYffeOZZjMg%-Rb%l^~$|!;Rk6)hkcON zIiR`y`=L5`GGErVzcbK2E!{45Gt`$6*jR+W_cqoBK|D*g@+f6NCR&QZ>|ud_e_NlC zn?L-gR2u}mT8W|iZE#@8kw+i#h*GhYP^;xZJ18h9@%cm(UV#61JxnkSih~u1?oVlE zj*+o71CX0&z3eE7R|tX6p$CLdzt$rQ`lbd`%IB~b;P{g7CzQV1FkldT0+6$^J$elp z;PSg5MiX{B^iKV?Fupi$tScfgq}kCgsZ&2wU=ab4CRgdnpd1;M|K??*fAnT)%^_bH zH*OX=+W@JBGsg<<9WNH>g77QOL;Hq>8m77_+sm@c-}b%5;5@Zl1@IMfae4A+uWJ=FXP z@=ysVEX2|2U(V3jMNGLp+0ibZHiw}q+~9$M2l#4Pu#ynEQleR&*$$Wx25xnxcF$;W zPHK;{C!uJP;0oDbYrZT4fndVaG5pe>G|^g4O70b@g_dn6^Ib1nAA7B{)t;Tn2Lx!7 z0AM=pV>+`oa?z7FBw%<)rJXM)?CHvNiH&0HUZAWW$*A{(CGGIzl*X(p5(_z8?gcBK zTDaeKuvvYg8N&L{GGhzG8?5tE)EB^ArweW*+s9mWY(SV$s&Ql#tz@O@R@9r z4JPls3!+z=Qe;CmP*S~VAQ$a-gyp68Don%#4ovS9EiWY|+YEMR+eRTmTZ_FVY3*?+ zO<{hqUx8ZhHgo)b&T8&{lpmFU8P`9gK4VyvnI2SN6xmV9XJ5o6B5S* z^&m`~k{QtxUgw%fj1B2Xf=-{P)CXW0SO`yOL(dM^!-ym{f&Z}##;0xt_Og;TSF!*e zH4Ix`+n&XbuLlxPSs?j*>(@kD(1Yo4b|h8J<)=?61k&0dN_pvvf)3}*BeTa_bJ0=e zsJE4%B!>?=7+)_x9vjQxu76DMw6zeCc6*s&;uQjnOzqq32Enn%hw>#&DFj2J6u&P! zgDlrRTN=+ebK{kV&^^#5g1+h1p3@_FdueF*us}ld^{oeM88t8%_AV_Ewz(p0kJqJ^ z{bu|x2oTrhO`WM2Ju(#7Fl z1O!`X)m)x+i(I`082~`gOIbN-v*=f-Jy~g)@yfwKrl=ik`p?wA8_V*`Ik@aSRqV;< zJ&U$%A9KvnsV~X&rgEUc;9PvXiK2yDA^W31{@( zF~5h*QKMeKOuBa+zdQ|Gv+0ZE>?n;WJOBV5#(!LZ-+(uZ;6UQXW>u+nkiq&oJ?<~R z@GkdR7B~u4T@EU|d$-#JmC86jE7xknT7~{ara;8#~ei78eNHJyPPk8j%)|+je4VPybX3#e@ zt^V~Unl{O{HdmWl?l_s-G_1gbEF&+&S?ZhIw+|I@1(BreJ+~M4?41?KF%dUmc>Cj7 z1P3L0-w-Zl=I6&3a2a)5bw!I4d89Dl2N3IHl)6|%-<%97Bp0B$FS48)8yZRaO{&;{ zbZ=>TE-A#(fMA`B?6?LM`Nfv+IyYw(KCerN64LWV5&|)rT>^_3Q4?<_Cp&{PYSuM` z258sG1p(c@qLp#=A8ArSO2xBjJ_dUNZC(a@=Dzxsxjg)8JbjCVzp`K_Db#uI%e~M_WMFr3C?o6Cx~xo%H^=rp1sSHCvRXPpjE2e+oTs8vxBT615^`h)5ywe0 z@$6$+G136Ks8Qj_kEv}1-it|7N~+UjD8i>n0r&mv!NZor<#xcTaPB$J`Z-ZK8;frx z1;k^CE{{v$Z^Pqm7{ehsAv1SMI{Co2z;~k0T~n5}KWk$;m&UKgY4e;ij?Pk&6_wm+ z40i%M80$FB?a>kc{29RVRcY_^lXTh=xj?>*UuK(d97usfk!)+ypvMoIdNROr$F16# zbwMLqA;@D)A;>(rE@_Nck$ffBYrje?ZboYPJ!QFn1;dVNtWELtp-3dS{vcW(cb~iy z@?(!@?I}r=4MU15PTB9Z1Whb~_vUmw(#yx(HA_wq8;}HxQpN}&iBQ4iR%r|+Vn}LA z2dH05r97Lbd!a5yi4MEGeAgK(rTS{RUN0OX^3N2fCD5>b&Et&tjD-7O0Kpy10vDev zAolwLrMN$$;+ME_6wK^$%>=IaKb!4uNzbJY-AAZ~uZj@oN+n5}xPW1mDkVu5*Cdl< zMJc3~mKOazb9w65JXvpeigY%ntNJt5`SzFk;^n>CU=8ET|#^DR+Aj5Km{g|)! zgkn&diHM*OA3Pw69WgMx#?fB<;hSa*ZRjM=)J~{|=XhR0fe&v&<&oUTJugz#g1zoy z=uS3u|4E)M+ZE!ylb#Qo!uSKj zBO_Tv`k&TnWslJItEwgW*{9b{8M9ZXM6#Q|+)jtSTLD3&Y%mg_vtbLjL3Oe-Gi2h* z=ZgcJ@>@-iyzk@$d0s76o8qySL~#%8Xa`z`b5h2vrGx0HRVa`cj3Sok{H}Q$BT>vw zi9OiZc?bOVhU76=7qF&W4>_&fbIB6w!=yNk5uy)hs#N)QvE$OJaBmU!(PjriShGY1V20aE6 zB1#D{rm~kTnr#hf7a=waI&xc;E=EiASJI*K`dBrWI>I@y6L;c*??C{Kw!)5D%VC8=ZqEYs7%(Az*kER*@XhB zhr>WKrq&TP%r6s$-m51Qt`=j;xO|U~<-%m)O>tNlxfkLDLPS+hsic&kNP==HPzmhG zD)WepQ{Tqf(@%kpFE8i=%wKFVsM;d3)jTp=72QLndsv`Zh}+tYPhY&*qOhB zD&adh8*{o^`>yy`K=wCVc~(r2YO{K&8O9!#nFLx=xnzR!_%-h+bQ@*Q)}fG=v9I>` z9WC;$X`ij}FI)FNS*>ZK2-_LLo0S1UAy7LM3x$PrF|*|s(*w7tH<-|nrNRA1+i29Y zYewzcDMppF3jG?NW=Nu2-j8=Go+K(`sOPhisYucfognIWK*fpSZyQcg2j&p1_|(ad zz1ERont&Myz;Hn?yn^+G7_kYf9VK>dZtDDwpF%bWfizN%M&fj*1sT!-fWa=o3x8aQ zD`flt7+)Gkc%*Se-q4`lp5uw=Wh(uX2uH)xn27TYNe88qD%Sx)48R47ju1V$5-j-i1EgGN2DQ3;R;rf_c zj;ogkNIXhy{wz@SsS>gMwb$k6p1n%?(zmJlK`?|O#i`OTZAVy<)0c9I$XIizsHg4w zmk<`dU%SC*JwGD<7!BK%4?zx+j1DC<#}jmmvYC9zjZ#CuPoTX%XasUqbm!RPY_G!! zho?k&WY(M{!Ya~5JxXp&O70oM%H<}e$mFzn0^5_&VY{~)a>)f5Hu|H^utZ3UBs&X-xC1o` zZ|?)0xHF~)@hNan=!Q{&Kjq~lPu#O^P@)g{^xRy2w|D!d77&ENot83FF6l`~9+*ra zY7%BDE^poki|YM@Ye7!J+1ZJPi=^52o4%6cd%7~CK_X;gnB(O-8K{n2oK8qldHJrf z#FtF8Z|lest7kCTY=s;{C}!QMTp_=;~%jNY1=54;Fn39_XKzJ!fu}P^A7$5{a zv3>T(_M%SqifOtnE(E|e1*(W3v?E*1+oh6J9qqJcvI?4S;ibDaUr%v>XjQmuJhO2+ zh;;!h05sDo+5Qk)LpD=} zpFYLr$Z32-&58c^Px89rZLhLIFtO~0B_Ko}G&ue`-Ug${!Tz4usep~GP*>KJ%;j%0 zApPRDam!2=@LDphttC%Ug8-mn7WCqP&wQ-C(Ka_F`&Z}8vsN_6k#aL zi4Q4BA41>_912blhri*oU2~c!j@RIEw~Y41iZb{@*u1pOCGOu>)hK8DMr^nFkS6ZfQ3JV*0Pt@T zgHFYzhm0C=IQ8pigYjTs>5)kq8Vn2Pe7ykCQ9qhavQ|4GD4q7tN}IgWn5qR(B*qJY zW3Dio1}DyKWT6>ox~4mM*gtzgLD7(719TS`(;i1oI7oUErpkx1aKCL2y_)K9-&E^= z$iW3cZ{zt;rWq)``F`sQz>QXz(Jv{glB{eKy*1R2@yFp_{2ybi?=(Zvh6z~vAQn?@IQ&{PzahcVre7MU-E%#@WVJA0 zvaC=laf>+v5*@c-@CQeM&cOcu{`6UJ9;zdNT_RNSRIpJEEh*;UasL?IV3z^PK!HGs zU?7vL-I3S7e&~E4m89Fo(O{6t%9KvuJo_W@vx%|UpS**TbI~B`+2FuPIJjIKL5SNB zh$}*=J;QMiEHGpvveL7-i%twW zCf_T330#o;m#)*l*~aV<2#W6t;7d^Be7ha6Q7_FNPDjaDzwKMl8bGu_aM49qR&;RG zu=Kf_LIY;U;8CG3m4hrubh0!Awc>*OUVvL`ura6%@Z4Z14}t00EHM;H?v9ACygFh} z^NIE&CF5N3ZHf^Kt=;zgZV>|dGp1BvjX$BT13B!WQ-y*CUGh`~lpQ05??dbBYkyhi zA^X8?$Juk9%l4=P1mQXCF+W99D`^0pPuRmp!ol(sys`=EeT zdGKghV6>$?el>-aq+A|St@tA6lPyco)ThN_%tcPB&FU_Q$CY;kg&4GcZdfMFWX6CD z$3OzU%1M3MiAsksnGh;tYs|Y}JJE8-sj2kJ7)o59+4V({>F2V$$)*M}zrjq#*V(0X zFXCODH?(GS3{D|;B5HNC2PAjIbTFK&Tf98(be1Jn?&e!%+@hx3gMu)SqS-u;)8|md zm|rdpr9-1&QsUpn2+;%6EMDB1(f3HOEZ`x6I}r-2s2eSVPF|;@ig2Y7e6O=nGbi!< zR>%32aTMg=@Jry3Fw)`T%kKvi$JOh-U)D83IGz1(&8ZC_;QO_k({DEOet{D1aWYp$C{-gZm$&M@&DNp{L<< zq&sa?3X?6V(!r2T)56|lMhO=B=JQPKYqVxlyn!a)`F3z*KTPd!`?$^IC+WRf0$5L{ z_vX~!bt-s##4X@ix*fcqv z7ar?5!;@7$Y_e@b7ce+Y(SA|#-un#(doPzGpdc&|eCfD{x{3(lz!PRQUwHuSh6=Te zjC|OI(U{Z+V=5tk4kXe@WkLW{w~HA-*H3c!!w#%-nBok+s9%REMzCcNtiK}wGqvM= zzCW5&SQr_;`MLG^8rLK0^fy2xA*I*`_m7UWpP*F5`7&)z?6^gBP-U!UR(J}-`}gk| zpk)i(aGD%$f)XI)kZh=V2L^5h#;eW$WDS6R$+&M|e{dw$5Z7?dkN}i!j7zWwTnEri zQck3MwYTM1zcu(sUPO__OC!vGnG9!yN1EWi6Z*8&BlVV>` zz-0h3*eLVUtTC*dLxR_5GWs{hPV4PAuNbx#UPL)>C=weLEh~ofXag(%P-o5-#Q!M~ z=m-=mI96Y~nE-7ViY>uCJA&|?ekghr>B%gp#l8P;(&<(XTKC6&-|o6)<;|ymkaTu| zR?aTlz$J!b5qBQ!zZm0w0YG4_?IAh$+hd0M!?--Qus2@e_7ec~i@3gR`g|7seH+R# z=5a}naAKV3mNmo;Me4~!=2tsT$pZ9F8e%RMW_2YmS>XVGz3xt0K&=8b4Ci^9iwoY7f+W&kDiz41$xYtSxJaNr(C5C# z_7=Dw&)$Z#r4Bye(eWetlUB(-zyd6cu4PVRpUqm4IzKC3TZ!P7!%cC=iDc*Gbm?T7 zg%z#e$~N9ETaB!&U=^}#WYQHyvX|f%waVwmQ&1$vqEX$e4~xx4x*?T zt_)Nad3LB2k&OYwy znXl@X_!G7&3T`N^6D0@<}~H+B#|GUp9_f*vDa;di(=JY&3xwQcNn+*J{J z_?T=6hQf#Bs31%j|NU^sNCFAT)?2H3mou8@n*}J1_g~K7Nj4>Om8z|beTWPvM^!ASU7QEG%BM$iauY^`e4k` z*)LIZVs5tpAA%e(%z})0&UMg1`PgyRAwFs64S2RB!=E^sL>PR(27(T`eEKrNruG+t z{v#)l2_wImb}>y2yj^k=PVx|tdE8L1gU*L^bVQ|*cw$f)-n*EOp)j|(W;3S7h?v7f zQk>wGK1(U?;faPMB>bzsg6%Xk%O$4JasCN!DA)+Du z#;<5#s=UVqPaSP6=&==XkoRhL>NeYaIKLGtYHuZ@Mghn$xySw)-cSPT`zIPM6W4?W z*rg;2lmG(7>3}0C$%PwRNvfZev?{iL|CnDyuzbO=z~=fmOw*%W0e7H(;2vF$7(Esv z*~?0i$RNn=oJgHJrIj8g3|>ngx%$8&HBd)15bEpEZsLn;+*ukf-vwqIEdKwVDEr?7 dY0BVdA8i*X_F6EY4LwZ Date: Wed, 25 Mar 2026 23:48:23 +0000 Subject: [PATCH 048/133] [ci skip] update contributing guidelines --- docs/contributing.mdx | 232 ++++++++++++++++++++++++++++++++++++++++++ docs/python-ros.md | 42 +++++--- 2 files changed, 258 insertions(+), 16 deletions(-) create mode 100644 docs/contributing.mdx diff --git a/docs/contributing.mdx b/docs/contributing.mdx new file mode 100644 index 00000000..d1584896 --- /dev/null +++ b/docs/contributing.mdx @@ -0,0 +1,232 @@ +--- +id: contributing +sidebar_label: Contributing +title: Contributing to OpEn +description: How do I contribute to OpEn +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## How can I contribute to OpEn? +Thank you for considering contributing to Optimization Engine (OpEn)! + +OpEn is an open source project and welcomes contributions from the community. + +You can contribute in several ways: + +- Submit an [**issue**](https://github.com/alphaville/optimization-engine/issues): + Often bugs will go unnoticed by the core development team and certain + use cases and user needs will have evaded our attention. + Consider submitting an issue if: + - You would like to report a [bug]; please, use the provided template for reporting + bugs. It is essential to give information about your system (OS, OpEn version) + and outline a sequence of steps to reproduce the error. When possible, please + provide a [minimum working example] + - You would like to request a [new feature]; please use the provided template + - You would like to propose modifications in OpEn's documentation, such as + for some concepts to be better elucidated or a request for an additional example +- Share with us a **success story** on [**Discord**](https://discord.gg/mfYpn4V) +- Create a **pull request** (see below) + +or, show us your love: + +- Give us a [**star on gitub**](https://github.com/alphaville/optimization-engine) +- Spread the word on [**Twitter**] + +![Star](https://media.giphy.com/media/ZxblqUVrPVmcqATkC4/giphy.gif) + +## I just have a question! +The easiest and quickest way to ask a question is to reach us on [**Discord**](https://discord.gg/mfYpn4V) or [**Gitter**](https://gitter.im/alphaville/optimization-engine). + +You may also consult the [**frequently asked questions**](/optimization-engine/docs/faq). + + +## Submitting issues +You may submit an issue regarding anything related to **OpEn**, such as: + +- a bug +- insufficient/vague documentation +- request for a feature +- request for an example + +You should, however, make sure that the same - or a very similar - issue is not already open. In that case, you may write a comment in an existing issue. + + +## Contributing code or docs + +In order to contribute code or documentation, you need to [fork] our github repository, make you modifications and submit a pull request. You should follow these rules: + +- create one or more [issues on github] that will be associated with your changes +- take it from `master`: fork OpEn and create a branch on `master` + +```console +git checkout -b fix/xyz master +``` + +- read the [style guide](#coding-style-guide) below (and write unit/integration tests) +- create a pull request in which you need to explain the key changes + +## Coding style guide + +Things to keep in mind: + +- **Code**: intuitive structure and variable names, short atomic functions, +- **Comments**: help others better understand your code +- **Docs**: document all functions (even private ones) +- **Tests**: write comprehnsive, exhaustive tests + +### Rust + +*General guidelines:* Read the Rust [API guidelines] and this [API checklist] + +*Naming convention:* We follow the [standard naming convention](https://rust-lang-nursery.github.io/api-guidelines/naming.html) of Rust. + +*Documentation:* We follow [these guidelines](https://rust-lang-nursery.github.io/api-guidelines/documentation.html). Everything should be documented. + +### Python + +We follow [this style guide](https://www.python.org/dev/peps/pep-0008) and its [naming convention](https://www.python.org/dev/peps/pep-0008/#naming-conventions) + + +### Website +This documentation is generated with Docusaurus - read a detailed guide [here](https://github.com/alphaville/optimization-engine/blob/master/website/README.md). + +- All docs are in `docs/` +- Blog entries are in `website/blog/` + +To start the website locally (at [http://localhost:3000/optimization-engine](http://localhost:3000/optimization-engine)) change directory to `website` and run `yarn start`. To update the website, execute `./publish.sh` (you need to be a collaborator on github). + +## Using Git +When using Git, keep in mind the following guidelines: + +- Create simple, atomic, commits +- Write comprehensive commit messages +- Work on a forked repository +- When you're done, submit a pull request to +[`alphaville/optimization-engine`](https://github.com/alphaville/optimization-engine/); +it will be promptly delegated to a reviewer and we will contact you +as soon as possible. + +Branch `master` is protected and all pull requests need to be reviewed by a person +other than their proposer before they can be merged into `master`. + +## Versioning +This project consists of independent modules: +(i) the core Rust library, +(ii) the MATLAB interface, +(iii) the Python interface. +Each module has a different version number (`X.Y.Z`). + +We use the **SemVer** standard - we quote from [semver.org](https://semver.org/): + +Given a version number `MAJOR.MINOR.PATCH`, increment the: + +- `MAJOR` version when you make incompatible API changes, +- `MINOR` version when you add functionality in a backwards-compatible manner, and +- `PATCH` version when you make backwards-compatible bug fixes. + +Additional labels for pre-release and build metadata are available as extensions to the `MAJOR.MINOR.PATCH` format. + +We also keep a [log of changes](https://github.com/alphaville/optimization-engine/blob/master/CHANGELOG.md) where we summarize the main changes since last version. + +## Releasing + +Each time the major or minor number of the Rust library is updated, a new crate should be published on [crates.io](https://crates.io/crates/optimization_engine). + +In order to release a new version make sure that +you have done the following: + + + + +Checklist: + +
    +
  • Updated [CHANGELOG]: bump version, write summary of changes
  • +
  • Updated [Cargo.toml]: bump version
  • +
  • Resolve all associated issues on GitHub
  • +
  • Write new unit tests if necessary
  • +
  • Update the API documentation
  • +
  • Update the information on the website
  • +
  • Merge into master once your pull request has been approved
  • +
+ +Then, create a tag and push it... + + ```bash + git tag -a v0.10.0 -m "v0.10.0" + git push --tags + ``` + +Lastly, update the [docker image](https://github.com/alphaville/optimization-engine/tree/master/docker). +This will have to be a new PR. + +
+ + + + +Checklist: + +
    +
  • Updated [CHANGELOG](https://github.com/alphaville/optimization-engine/blob/master/open-codegen/CHANGELOG.md): bump version, write summary of changes
  • +
  • Updated [VERSION]: bump version
  • +
  • Review [`pyproject.toml`](https://github.com/alphaville/optimization-engine/blob/master/open-codegen/pyproject.toml)
  • +
  • Resolve all associated issues on GitHub
  • +
  • Write new unit tests if necessary
  • +
  • Update the API documentation
  • +
  • Update the information on the website
  • +
  • Merge into master once your pull request has been approved
  • +
+ +Then, create a tag and push it... + + ```bash + git tag -a opengen-v0.10.0 -m "opengen-0.10.0" + git push --tags + ``` + + + +Lastly, update the [docker image](https://github.com/alphaville/optimization-engine/tree/master/docker). +This will have to be a new PR. + +
+ + + + + Update the [Dockerfile](https://github.com/alphaville/optimization-engine/blob/master/docker/Dockerfile). + You may need to bump the versions of open and opengen: + + ```Dockerfile + ARG OPENGEN_VERSION=0.10.0 + ARG OPTIMIZATION_ENGINE_CRATE_VERSION=0.11.0 + ``` + + Update the [CHANGELOG](https://github.com/alphaville/optimization-engine/blob/master/docker/CHANGELOG.md). + Update the [README](https://github.com/alphaville/optimization-engine/blob/master/docker/README.md) file. + Build, test, and push with + + ```bash + docker push alphaville/open:0.7.0 + ``` + + +
+ + +[CHANGELOG]: https://github.com/alphaville/optimization-engine/blob/master/CHANGELOG.md +[VERSION]: https://github.com/alphaville/optimization-engine/blob/master/open-codegen/VERSION +[Cargo.toml]: https://github.com/alphaville/optimization-engine/blob/master/Cargo.toml +[setup.py]: https://github.com/alphaville/optimization-engine/blob/master/open-codegen/setup.py +[release v0.4.0]: https://github.com/alphaville/optimization-engine/releases/tag/v0.4.0 +[bug]: https://github.com/alphaville/optimization-engine/issues/new?template=bug_report.md +[issues on github]: https://github.com/alphaville/optimization-engine/issues +[**Twitter**]: https://twitter.com/intent/tweet?original_referer=https%3A%2F%2Falphaville.github.io%2Foptimization-engine&ref_src=twsrc%5Etfw&text=Fast%20and%20accurate%20embedded%20nonconvex%20optimization%20with%20%23OptimizationEngine&tw_p=tweetbutton&url=https%3A%2F%2Falphaville.github.io%2Foptimization-engine&via=isToxic +[minimum working example]: https://en.wikipedia.org/wiki/Minimal_working_example +[new feature]: https://github.com/alphaville/optimization-engine/issues/new?template=feature_request.md +[fork]: https://github.com/alphaville/optimization-engine +[API guidelines]: https://rust-lang-nursery.github.io/api-guidelines/about.html +[API checklist]: https://rust-lang-nursery.github.io/api-guidelines/checklist.html diff --git a/docs/python-ros.md b/docs/python-ros.md index 43a7f4d8..b594e9d2 100644 --- a/docs/python-ros.md +++ b/docs/python-ros.md @@ -5,6 +5,10 @@ sidebar_label: ROS packages description: Code generation for ROS packages using opengen --- +:::note Info +Opengen now supports [ROS2](./python-ros2). +::: + ## What is ROS The [Robot Operating System](https://www.ros.org/) (ROS) is a collection of tools and libraries, as well as a framework that facilitates the data exchange among them. ROS is popular in the robotics community and is used to design and operate modern robotic systems. @@ -21,7 +25,7 @@ OpEn (with opengen version `0.5.0` or newer) can generate ready-to-use ROS packa The input parameters message follows the following specification: -``` +```msg float64[] parameter # parameter p (mandatory) float64[] initial_guess # u0 (optional/recommended) float64[] initial_y # y0 (optional) @@ -40,9 +44,28 @@ initial_y: [] #### Result -A result message (`OptimizationResult`) contains the solution of the parametric optimization problem and details about the solution procedure such as the number of inner/outer iterations and the solution time. The result of an auto-generated OpEn node is a message with the following specification: +A result message (`OptimizationResult`) contains the solution of the parametric optimization problem and details about the solution procedure such as the number of inner/outer iterations and the solution time. +An example of such a message is given below: +```yaml +solution: [0.5317, 0.7975, 0.6761, 0.7760, 0.5214] +inner_iterations: 159 +outer_iterations: 5 +status: 0 +norm_fpr: 2.142283848e-06 +penalty: 111250.0 +lagrange_multipliers: [] +infeasibility_f1: 0.0 +infeasibility_f2: 2.44131958366e-05 +solve_time_ms: 2.665959 ``` + +
+Specification of OptimizationResult + +The message `OptimizationResult` is described by the following message file + +```msg # Constants match the enumeration of status codes uint8 STATUS_CONVERGED=0 uint8 STATUS_NOT_CONVERGED_ITERATIONS=1 @@ -63,20 +86,7 @@ float64 infeasibility_f2 # infeasibility wrt F2 float64 solve_time_ms # solution time in ms ``` -An example of such a message is given below: - -```yaml -solution: [0.5317, 0.7975, 0.6761, 0.7760, 0.5214] -inner_iterations: 159 -outer_iterations: 5 -status: 0 -norm_fpr: 2.142283848e-06 -penalty: 111250.0 -lagrange_multipliers: [] -infeasibility_f1: 0.0 -infeasibility_f2: 2.44131958366e-05 -solve_time_ms: 2.665959 -``` +
### Configuration Parameters From b92fda9598b8661ed63ca1883d430e4a0f256caa Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Thu, 26 Mar 2026 00:13:48 +0000 Subject: [PATCH 049/133] [ci skip] contributing.md + link to open_ros --- docs/contributing.md | 164 ------------------------------------------- docs/python-ros2.mdx | 2 +- 2 files changed, 1 insertion(+), 165 deletions(-) delete mode 100644 docs/contributing.md diff --git a/docs/contributing.md b/docs/contributing.md deleted file mode 100644 index dcae4bae..00000000 --- a/docs/contributing.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -id: contributing -sidebar_label: Contributing -title: Contributing to OpEn -description: How do I contribute to OpEn ---- - -## How can I contribute to OpEn? -Thank you for considering contributing to Optimization Engine (OpEn)! - -OpEn is an open source project and welcomes contributions from the community. - -You can contribute in several ways: - -- Submit an [**issue**](https://github.com/alphaville/optimization-engine/issues): - Often bugs will go unnoticed by the core development team and certain - use cases and user needs will have evaded our attention. - Consider submitting an issue if: - - You would like to report a [bug]; please, use the provided template for reporting - bugs. It is essential to give information about your system (OS, OpEn version) - and outline a sequence of steps to reproduce the error. When possible, please - provide a [minimum working example] - - You would like to request a [new feature]; please use the provided template - - You would like to propose modifications in OpEn's documentation, such as - for some concepts to be better elucidated or a request for an additional example -- Share with us a **success story** on [**Discord**](https://discord.gg/mfYpn4V) -- Create a **pull request** (see below) - -or, show us your love: - -- Give us a [**star on gitub**](https://github.com/alphaville/optimization-engine) -- Spread the word on [**Twitter**] - -![Star](https://media.giphy.com/media/ZxblqUVrPVmcqATkC4/giphy.gif) - -## I just have a question! -The easiest and quickest way to ask a question is to reach us on [**Discord**](https://discord.gg/mfYpn4V) or [**Gitter**](https://gitter.im/alphaville/optimization-engine). - -You may also consult the [**frequently asked questions**](/optimization-engine/docs/faq). - - -## Submitting issues -You may submit an issue regarding anything related to **OpEn**, such as: - -- a bug -- insufficient/vague documentation -- request for a feature -- request for an example - -You should, however, make sure that the same - or a very similar - issue is not already open. In that case, you may write a comment in an existing issue. - - -## Contributing code or docs - -In order to contribute code or documentation, you need to [fork] our github repository, make you modifications and submit a pull request. You should follow these rules: - -- create one or more [issues on github] that will be associated with your changes -- take it from `master`: fork OpEn and create a branch on `master` - -```console -git checkout -b fix/xyz master -``` - -- read the [style guide](#coding-style-guide) below (and write unit/integration tests) -- create a pull request in which you need to explain the key changes - -## Coding style guide - -Things to keep in mind: - -- **Code**: intuitive structure and variable names, short atomic functions, -- **Comments**: help others better understand your code -- **Docs**: document all functions (even private ones) -- **Tests**: write comprehnsive, exhaustive tests - -### Rust - -*General guidelines:* Read the Rust [API guidelines] and this [API checklist] - -*Naming convention:* We follow the [standard naming convention](https://rust-lang-nursery.github.io/api-guidelines/naming.html) of Rust. - -*Documentation:* We follow [these guidelines](https://rust-lang-nursery.github.io/api-guidelines/documentation.html). Everything should be documented. - -### Python - -We follow [this style guide](https://www.python.org/dev/peps/pep-0008) and its [naming convention](https://www.python.org/dev/peps/pep-0008/#naming-conventions) - - -### Website -This documentation is generated with Docusaurus - read a detailed guide [here](https://github.com/alphaville/optimization-engine/blob/master/website/README.md). - -- All docs are in `docs/` -- Blog entries are in `website/blog/` - -To start the website locally (at [http://localhost:3000/optimization-engine](http://localhost:3000/optimization-engine)) change directory to `website` and run `yarn start`. To update the website, execute `./publish.sh` (you need to be a collaborator on github). - -## Using Git -When using Git, keep in mind the following guidelines: - -- Create simple, atomic, commits -- Write comprehensive commit messages -- Work on a forked repository -- When you're done, submit a pull request to -[`alphaville/optimization-engine`](https://github.com/alphaville/optimization-engine/); -it will be promptly delegated to a reviewer and we will contact you -as soon as possible. - -Branch `master` is protected and all pull requests need to be reviewed by a person -other than their proposer before they can be merged into `master`. - -## Versioning -This project consists of independent modules: -(i) the core Rust library, -(ii) the MATLAB interface, -(iii) the Python interface. -Each module has a different version number (`X.Y.Z`). - -We use the **SemVer** standard - we quote from [semver.org](https://semver.org/): - -Given a version number `MAJOR.MINOR.PATCH`, increment the: - -- `MAJOR` version when you make incompatible API changes, -- `MINOR` version when you add functionality in a backwards-compatible manner, and -- `PATCH` version when you make backwards-compatible bug fixes. - -Additional labels for pre-release and build metadata are available as extensions to the `MAJOR.MINOR.PATCH` format. - -We also keep a [log of changes](https://github.com/alphaville/optimization-engine/blob/master/CHANGELOG.md) where we summarize the main changes since last version. - -## Releasing - -Each time the major or minor number of the Rust library is updated, a new crate should be published on [crates.io](https://crates.io/crates/optimization_engine). - -In order to release a new version make sure that -you have done the following: - -- Updated [CHANGELOG] -- Updated the version in (SemVer): - - [CHANGELOG] - - [Cargo.toml] - - [setup.py] -- Resolved all associated issues on github (and you have created tests for these) -- Updated the documentation (Rust/Python API docs + website) -- Merged into master (your pull request has been approved) -- All tests pass on Travis CI and Appveyor -- Set `publish=true` in `Cargo.toml` (set it back to `false` for safety) -- Publish `opengen` on PyPI (if necessary) - - before doing so, make sure that the cargo.toml template - points to the correct version of OpEn -- Changed "Unreleased" into the right version in [CHANGELOG] and created - a release on github (example [release v0.4.0]) - -[CHANGELOG]: https://github.com/alphaville/optimization-engine/blob/master/CHANGELOG.md -[Cargo.toml]: https://github.com/alphaville/optimization-engine/blob/master/Cargo.toml -[setup.py]: https://github.com/alphaville/optimization-engine/blob/master/open-codegen/setup.py -[release v0.4.0]: https://github.com/alphaville/optimization-engine/releases/tag/v0.4.0 -[bug]: https://github.com/alphaville/optimization-engine/issues/new?template=bug_report.md -[issues on github]: https://github.com/alphaville/optimization-engine/issues -[**Twitter**]: https://twitter.com/intent/tweet?original_referer=https%3A%2F%2Falphaville.github.io%2Foptimization-engine&ref_src=twsrc%5Etfw&text=Fast%20and%20accurate%20embedded%20nonconvex%20optimization%20with%20%23OptimizationEngine&tw_p=tweetbutton&url=https%3A%2F%2Falphaville.github.io%2Foptimization-engine&via=isToxic -[minimum working example]: https://en.wikipedia.org/wiki/Minimal_working_example -[new feature]: https://github.com/alphaville/optimization-engine/issues/new?template=feature_request.md -[fork]: https://github.com/alphaville/optimization-engine -[API guidelines]: https://rust-lang-nursery.github.io/api-guidelines/about.html -[API checklist]: https://rust-lang-nursery.github.io/api-guidelines/checklist.html diff --git a/docs/python-ros2.mdx b/docs/python-ros2.mdx index 73a7b000..a81bff42 100644 --- a/docs/python-ros2.mdx +++ b/docs/python-ros2.mdx @@ -75,7 +75,7 @@ Note the use of `with_ros2` and note that `RosConfiguration` is the same config class as in [ROS1](./python-ros). This generates the optimizer in `my_optimizers/rosenbrock_ros2`, and the ROS2 package is created inside that directory as `parametric_optimizer_ros2`. - +You can inspect the auto-generated ROS2 package [here](https://github.com/alphaville/open_ros/tree/master/ros2). ## Use the auto-generated ROS2 package From a19f33347e83ec39c6769299eb51c513c65c2b47 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Thu, 26 Mar 2026 00:27:03 +0000 Subject: [PATCH 050/133] [ci skip] update website docs - update main page: promote docker - python-c.mdx: fix typo --- docs/python-c.mdx | 2 +- website/src/css/custom.css | 42 ++++++++++++++++++++++++++++++++++++- website/src/pages/index.js | 43 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 2 deletions(-) diff --git a/docs/python-c.mdx b/docs/python-c.mdx index bbe387e8..721b5bf9 100644 --- a/docs/python-c.mdx +++ b/docs/python-c.mdx @@ -270,4 +270,4 @@ iterations = 69 outer iterations = 5 solve time = 0.140401 ms ``` -``` + diff --git a/website/src/css/custom.css b/website/src/css/custom.css index 6e225c5e..94344066 100644 --- a/website/src/css/custom.css +++ b/website/src/css/custom.css @@ -434,6 +434,45 @@ body { margin-bottom: 0; } +.homeDockerPromo { + display: grid; + grid-template-columns: minmax(0, 1.02fr) minmax(320px, 0.98fr); + gap: 1.5rem; + align-items: center; + width: min(1100px, calc(100% - 2rem)); + margin: 0 auto; +} + +.homeDockerPromo__content, +.homeDockerPromo__visual { + min-width: 0; +} + +.homeDockerPromo__content h2 { + margin: 0 0 0.85rem; + color: #2f1a14; + font-size: clamp(2rem, 4vw, 3rem); + line-height: 1.08; +} + +.homeDockerPromo__content p { + color: var(--open-page-muted); +} + +.homeDockerPromo__image { + display: block; + width: min(100%, 280px); + margin: 0 auto 1rem; +} + +.homeDockerPromo__codeBlock { + margin-top: 0; +} + +.homeDockerPromo__codeBlock .theme-code-block { + margin-bottom: 0; +} + .homeSplit__copy, .homeSplit__media { min-width: 0; @@ -684,7 +723,8 @@ body { } .homeOcpPromo, - .homeRos2Promo { + .homeRos2Promo, + .homeDockerPromo { grid-template-columns: 1fr; } } diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 4fad6ce5..fb458b1e 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -44,6 +44,8 @@ build_config = og.config.BuildConfiguration() \ .with_build_directory("my_optimizers") \ .with_ros2(ros2_config)`; +const dockerPromoCode = String.raw`docker pull alphaville/open:0.7.0` + const heroStats = [ {label: 'Core language', value: 'Rust'}, {label: 'Primary uses', value: 'MPC, MHE, Robotics'}, @@ -120,6 +122,7 @@ export default function Home() { const assetUrl = (path) => `${baseUrl}${path.replace(/^\//, '')}`; const promoGif = assetUrl('img/open-promo.gif'); const boxLogo = assetUrl('img/box.png'); + const dockerGif = assetUrl('img/docker.gif'); const ocpStatesImage = assetUrl('img/ocp-states.png'); const ros2RobotImage = assetUrl('img/ros2-robot.png'); const [zoomedImage, setZoomedImage] = useState(null); @@ -410,6 +413,46 @@ export default function Home() {
+ +
+
+
+

Docker image

+

Run OpEn in a ready-made container

+

+ OpEn ships with a Docker image that gets you straight into a + working environment with Jupyter, Python, and the tooling needed + to explore examples without local setup friction. +

+

+ It is a convenient way to try the Python interface, browse the + notebooks, and experiment with the OCP workflows in a clean, + reproducible environment. +

+
+ + Learn more + + + Docker Hub + +
+
+
+ OpEn running inside the Docker image with Jupyter +
+ {dockerPromoCode} +
+
+
+
{zoomedImage ? (
Date: Thu, 26 Mar 2026 00:31:58 +0000 Subject: [PATCH 051/133] [docit] build api docs From 7bc3bc9d83a10e111b5685e8a61ec72416187484 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Thu, 26 Mar 2026 03:17:05 +0000 Subject: [PATCH 052/133] [ci skip] website: make mobile-friendly - fix issue with sidebar - modify colours --- docs/contributing.mdx | 25 ++++++++++++++++++++++++- website/src/css/custom.css | 35 ++++++++++++++++++++++++++++++++++- 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/docs/contributing.mdx b/docs/contributing.mdx index d1584896..25cf7cee 100644 --- a/docs/contributing.mdx +++ b/docs/contributing.mdx @@ -137,6 +137,8 @@ Each time the major or minor number of the Rust library is updated, a new crate In order to release a new version make sure that you have done the following: +--- + @@ -178,12 +180,13 @@ Checklist:
  • Update the API documentation
  • Update the information on the website
  • Merge into master once your pull request has been approved
  • +
  • Update the API docs
  • Then, create a tag and push it... ```bash - git tag -a opengen-v0.10.0 -m "opengen-0.10.0" + git tag -a opengen-0.10.0 -m "opengen-0.10.0" git push --tags ``` @@ -213,9 +216,29 @@ This will have to be a new PR. docker push alphaville/open:0.7.0 ``` + Update the [website docs](./docker) and the promo on the [main page](..) +
    +--- + +To update the website, run +```bash +GIT_USER=alphaville \ + CURRENT_BRANCH=master \ + USE_SSH=true \ + yarn deploy +``` +from within `website/`. Then, update the opengen API docs too; +just push a commit with message starting with `[docit]`. +You can also issue a commit without git-add. Run + +```bash +git commit -m '[docit] update api docs' --allow-empty +``` + + [CHANGELOG]: https://github.com/alphaville/optimization-engine/blob/master/CHANGELOG.md [VERSION]: https://github.com/alphaville/optimization-engine/blob/master/open-codegen/VERSION diff --git a/website/src/css/custom.css b/website/src/css/custom.css index 94344066..568b4152 100644 --- a/website/src/css/custom.css +++ b/website/src/css/custom.css @@ -32,7 +32,6 @@ body { } .navbar { - backdrop-filter: blur(16px); box-shadow: 0 10px 30px rgba(86, 44, 28, 0.12); } @@ -729,6 +728,40 @@ body { } } +@media (min-width: 997px) { + .navbar { + backdrop-filter: blur(16px); + } +} + +@media (max-width: 996px) { + .navbar-sidebar, + .navbar-sidebar__items, + .navbar-sidebar__item.menu { + background: #f8eee7; + } + + .navbar-sidebar__brand, + .navbar-sidebar__back, + .navbar-sidebar__close, + .navbar-sidebar .menu__link, + .navbar-sidebar .menu__caret, + .navbar-sidebar .menu__link--sublist::after { + color: #221714; + } + + .navbar-sidebar .menu__link { + font-weight: 500; + } + + .navbar-sidebar .menu__link:hover, + .navbar-sidebar .menu__link--active, + .navbar-sidebar .menu__list-item-collapsible:hover { + background: rgba(122, 31, 31, 0.08); + color: #221714; + } +} + @media (max-width: 640px) { .homeHero { padding-top: 2rem; From 6807d1dbda4cfe7b5c252a19b6d98a43c012f82f Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Thu, 26 Mar 2026 03:19:04 +0000 Subject: [PATCH 053/133] [docit] update api docs From a99374365c29febf3931ac035968c64bee555c0b Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Thu, 26 Mar 2026 15:39:13 +0000 Subject: [PATCH 054/133] addressing #403 --- .github/workflows/ci.yml | 6 ++++++ open-codegen/test/test_ros2.py | 32 ++++++++++++++++++++++++++++++-- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 44e55725..f91661b0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -139,6 +139,12 @@ jobs: python-version: "3.12" - name: Setup ROS 2 + # `ros-tooling/setup-ros@v0.7` still runs as a Node.js 20 action. + # Force it onto Node 24 now so CI keeps working as GitHub deprecates + # Node 20, and upgrade `setup-ros` to a Node 24-compatible release + # when one becomes available. + env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true uses: ros-tooling/setup-ros@v0.7 with: required-ros-distributions: jazzy diff --git a/open-codegen/test/test_ros2.py b/open-codegen/test/test_ros2.py index af5617aa..9390ac8f 100644 --- a/open-codegen/test/test_ros2.py +++ b/open-codegen/test/test_ros2.py @@ -125,11 +125,16 @@ def test_custom_ros2_configuration_is_rendered_into_generated_files(self): """Custom ROS2 config values should appear in the generated package files.""" ros2_dir = self.ros2_package_dir() + # The package metadata should reflect the user-provided ROS2 package name + # and description, not the defaults from the templates. with open(os.path.join(ros2_dir, "package.xml"), encoding="utf-8") as f: package_xml = f.read() self.assertIn(f"{self.PACKAGE_NAME}", package_xml) self.assertIn(f"{self.DESCRIPTION}", package_xml) + # `open_optimizer.hpp` is where the generated node constants are wired in. + # These assertions make sure the custom topic names, node name, rate, and + # queue sizes are propagated into the generated C++ code. with open(os.path.join(ros2_dir, "include", "open_optimizer.hpp"), encoding="utf-8") as f: optimizer_header = f.read() self.assertIn(f'#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_NODE_NAME "{self.NODE_NAME}"', @@ -147,12 +152,16 @@ def test_custom_ros2_configuration_is_rendered_into_generated_files(self): f"#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_PARAMS_TOPIC_QUEUE_SIZE {self.PARAMS_QUEUE_SIZE}", optimizer_header) + # The runtime YAML configuration should carry the custom topic names and + # timer rate so the launched node uses the intended ROS2 parameters. with open(os.path.join(ros2_dir, "config", "open_params.yaml"), encoding="utf-8") as f: params_yaml = f.read() self.assertIn(f'result_topic: "{self.RESULT_TOPIC}"', params_yaml) self.assertIn(f'params_topic: "{self.PARAMS_TOPIC}"', params_yaml) self.assertIn(f"rate: {self.RATE}", params_yaml) + # The generated launch file should point to the correct package and + # executable so `ros2 launch` can start the generated node. with open(os.path.join(ros2_dir, "launch", "open_optimizer.launch.py"), encoding="utf-8") as f: launch_file = f.read() self.assertIn(f'package="{self.PACKAGE_NAME}"', launch_file) @@ -244,7 +253,11 @@ def ros2_test_env(cls): env = os.environ.copy() ros2_dir = cls.ros2_package_dir() os.makedirs(os.path.join(ros2_dir, ".ros_log"), exist_ok=True) + # Keep ROS2 logs inside the generated package directory so the tests do + # not depend on a global writable log location. env["ROS_LOG_DIR"] = os.path.join(ros2_dir, ".ros_log") + # Fast DDS is the most reliable middleware choice in our CI/local test + # setup when checking node discovery from separate processes. env.setdefault("RMW_IMPLEMENTATION", "rmw_fastrtps_cpp") env.pop("ROS_LOCALHOST_ONLY", None) return env @@ -337,6 +350,9 @@ def _wait_for_node_and_topics(self, ros2_dir, env): node_result = None topic_result = None for _ in range(6): + # `ros2 node list` confirms that the process joined the ROS graph, + # while `ros2 topic list` confirms that the expected interfaces are + # actually being advertised. node_result = self._run_shell( f"source {setup_script} && " "ros2 node list --no-daemon --spin-time 5", @@ -364,13 +380,15 @@ def _wait_for_node_and_topics(self, ros2_dir, env): def _assert_result_message(self, echo_stdout): """Assert that the echoed result message indicates a successful solve.""" + # We do not compare the full numeric solution here; instead, we check + # that the generated node returned a structurally valid result and that + # the solver reported convergence. self.assertIn("solution", echo_stdout) - # A bit of integration testing: check whether the solver was able to - # solve the problem successfully. self.assertRegex( echo_stdout, r"solution:\s*\n(?:- .+\n)+", msg=f"Expected a non-empty solution vector in result output:\n{echo_stdout}") + # `status: 0` matches `STATUS_CONVERGED` in the generated result message. self.assertIn("status: 0", echo_stdout) self.assertRegex( echo_stdout, @@ -389,10 +407,12 @@ def _assert_result_message(self, echo_stdout): def _exercise_running_optimizer(self, ros2_dir, env): """Publish one request and verify that one valid result message is returned.""" _, setup_script = self.ros2_shell() + # Start listening before publishing so the single response is not missed. echo_process = self._spawn_ros_process("ros2 topic echo /result --once", ros2_dir, env) try: time.sleep(1) + # Send one concrete request through the generated ROS2 interface. self._run_shell( f"source {setup_script} && " "ros2 topic pub --once /parameters " @@ -411,6 +431,8 @@ def _exercise_running_optimizer(self, ros2_dir, env): def test_ros2_package_generation(self): """Verify the ROS2 package files are generated.""" ros2_dir = self.ros2_package_dir() + # This is a lightweight smoke test for the generator itself before we + # attempt the slower build/run integration tests below. self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "package.xml"))) self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "CMakeLists.txt"))) self.assertTrue(os.path.isfile( @@ -420,6 +442,9 @@ def test_generated_ros2_package_works(self): """Build, run, and call the generated ROS2 package.""" ros2_dir = self.ros2_package_dir() env = self.ros2_test_env() + + # First validate the plain `ros2 run` path, which exercises the + # generated executable directly without going through the launch file. self._build_generated_package(ros2_dir, env) node_process = self._spawn_ros_process( @@ -438,6 +463,9 @@ def test_generated_ros2_launch_file_works(self): """Build the package, launch the node, and verify the launch file works.""" ros2_dir = self.ros2_package_dir() env = self.ros2_test_env() + + # Then validate the generated launch description, which should bring up + # the exact same node and parameters via `ros2 launch`. self._build_generated_package(ros2_dir, env) launch_process = self._spawn_ros_process( From a433c5c41dabeda998924fdde5b72d324f7a179c Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Thu, 26 Mar 2026 16:17:01 +0000 Subject: [PATCH 055/133] draft version of new matlab API - implement OpEnTcpOptimizer class - some documentation - old MATLAB codegen is now legacy --- matlab/api/OpEnTcpOptimizer.m | 344 ++++++++++++++++++ matlab/api/createOpEnTcpOptimizer.m | 23 ++ .../@OpEnConstraints/OpEnConstraints.m | 0 .../@OpEnOptimizer/OpEnOptimizer.m | 0 .../OpEnOptimizerBuilder.m | 0 .../@OpEnOptimizerBuilder/build.m | 0 matlab/{ => legacy}/README.md | 0 matlab/{ => legacy}/examples/example_open_1.m | 0 .../{ => legacy}/examples/example_open_lv.m | 0 .../{ => legacy}/examples/example_open_nav.m | 0 .../helpers/casadi_generate_c_code.m | 0 matlab/{ => legacy}/helpers/rosenbrock.m | 0 matlab/{ => legacy}/matlab_open_root.m | 0 .../private/codegen_get_cache.txt | 0 matlab/{ => legacy}/private/codegen_head.txt | 0 .../{ => legacy}/private/codegen_main_2.txt | 0 .../{ => legacy}/private/codegen_main_3.txt | 0 .../private/codegen_main_fn_def.txt | 0 matlab/{ => legacy}/setup_open.m | 0 19 files changed, 367 insertions(+) create mode 100644 matlab/api/OpEnTcpOptimizer.m create mode 100644 matlab/api/createOpEnTcpOptimizer.m rename matlab/{ => legacy}/@OpEnConstraints/OpEnConstraints.m (100%) rename matlab/{ => legacy}/@OpEnOptimizer/OpEnOptimizer.m (100%) rename matlab/{ => legacy}/@OpEnOptimizerBuilder/OpEnOptimizerBuilder.m (100%) rename matlab/{ => legacy}/@OpEnOptimizerBuilder/build.m (100%) rename matlab/{ => legacy}/README.md (100%) rename matlab/{ => legacy}/examples/example_open_1.m (100%) rename matlab/{ => legacy}/examples/example_open_lv.m (100%) rename matlab/{ => legacy}/examples/example_open_nav.m (100%) rename matlab/{ => legacy}/helpers/casadi_generate_c_code.m (100%) rename matlab/{ => legacy}/helpers/rosenbrock.m (100%) rename matlab/{ => legacy}/matlab_open_root.m (100%) rename matlab/{ => legacy}/private/codegen_get_cache.txt (100%) rename matlab/{ => legacy}/private/codegen_head.txt (100%) rename matlab/{ => legacy}/private/codegen_main_2.txt (100%) rename matlab/{ => legacy}/private/codegen_main_3.txt (100%) rename matlab/{ => legacy}/private/codegen_main_fn_def.txt (100%) rename matlab/{ => legacy}/setup_open.m (100%) diff --git a/matlab/api/OpEnTcpOptimizer.m b/matlab/api/OpEnTcpOptimizer.m new file mode 100644 index 00000000..92e1a709 --- /dev/null +++ b/matlab/api/OpEnTcpOptimizer.m @@ -0,0 +1,344 @@ +classdef OpEnTcpOptimizer < handle + %OPENTCPOPTIMIZER TCP client for Python-generated OpEn optimizers. + % CLIENT = OPENTCPOPTIMIZER(PORT) creates a client that connects to a + % TCP-enabled optimizer running on 127.0.0.1:PORT. + % + % CLIENT = OPENTCPOPTIMIZER(PORT, IP) connects to the optimizer at + % the specified IP address and TCP port. + % + % CLIENT = OPENTCPOPTIMIZER(IP, PORT) is also accepted for callers + % who prefer to provide the endpoint in IP/port order. + % + % CLIENT = OPENTCPOPTIMIZER(..., 'Timeout', T) sets the socket + % connect/read timeout in seconds. + % + % CLIENT = OPENTCPOPTIMIZER(..., 'MaxResponseBytes', N) limits the + % maximum number of bytes accepted from the optimizer. + % + % This interface is intended for optimizers generated in Python with + % the TCP interface enabled. The optimizer server must already be + % running; this class only communicates with it. + % + % Example: + % client = OpEnTcpOptimizer(3301); + % response = client.solve([2.0, 10.0]); + % if response.ok + % disp(response.solution); + % else + % error('OpEn:RemoteError', '%s', response.message); + % end + + properties (SetAccess = private) + %IP IPv4 address or host name of the optimizer server. + ip + + %PORT TCP port of the optimizer server. + port + + %TIMEOUT Connect and read timeout in seconds. + timeout + + %MAXRESPONSEBYTES Safety limit for incoming payload size. + maxResponseBytes + end + + methods + function obj = OpEnTcpOptimizer(arg1, arg2, varargin) + %OPENTCPOPTIMIZER Construct a TCP client for a generated optimizer. + % + % OBJ = OPENTCPOPTIMIZER(PORT) uses 127.0.0.1. + % OBJ = OPENTCPOPTIMIZER(PORT, IP) uses the provided IP. + % OBJ = OPENTCPOPTIMIZER(IP, PORT) is also supported. + % + % Name-value pairs: + % 'Timeout' Socket timeout in seconds (default 10) + % 'MaxResponseBytes' Maximum response size in bytes + % (default 1048576) + + if nargin < 1 + error('OpEnTcpOptimizer:NotEnoughInputs', ... + 'You must provide at least a TCP port.'); + end + + if nargin < 2 + arg2 = []; + end + + [port, ip] = OpEnTcpOptimizer.normalizeEndpointArguments(arg1, arg2); + + parser = inputParser(); + parser.FunctionName = 'OpEnTcpOptimizer'; + addRequired(parser, 'port', @OpEnTcpOptimizer.isValidPort); + addRequired(parser, 'ip', @OpEnTcpOptimizer.isTextScalar); + addParameter(parser, 'Timeout', 10, @OpEnTcpOptimizer.isValidTimeout); + addParameter(parser, 'MaxResponseBytes', 1048576, @OpEnTcpOptimizer.isValidMaxResponseBytes); + parse(parser, port, ip, varargin{:}); + + obj.port = double(parser.Results.port); + obj.ip = OpEnTcpOptimizer.textToChar(parser.Results.ip); + obj.timeout = double(parser.Results.Timeout); + obj.maxResponseBytes = double(parser.Results.MaxResponseBytes); + end + + function response = ping(obj) + %PING Check whether the optimizer server is reachable. + % RESPONSE = PING(OBJ) sends {"Ping":1} and returns the + % decoded JSON response, typically a struct with field "Pong". + response = obj.sendRequest('{"Ping":1}', true); + end + + function kill(obj) + %KILL Ask the optimizer server to stop gracefully. + % KILL(OBJ) sends {"Kill":1}. The server closes the + % connection without returning a JSON payload. + obj.sendRequest('{"Kill":1}', false); + end + + function response = solve(obj, parameter, varargin) + %SOLVE Run the optimizer for the given parameter vector. + % RESPONSE = SOLVE(OBJ, PARAMETER) sends PARAMETER to the + % optimizer and returns a struct with field RESPONSE.ok. + % + % RESPONSE = SOLVE(..., 'InitialGuess', U0, + % 'InitialLagrangeMultipliers', Y0, 'InitialPenalty', C0) + % mirrors the TCP options supported by the generated server. + % + % On success, RESPONSE contains the solver fields returned by + % the server, plus the aliases: + % ok = true + % raw = original decoded JSON response + % f1_infeasibility = delta_y_norm_over_c + % + % On failure, RESPONSE contains: + % ok = false + % raw = original decoded JSON response + % code, message + + parser = inputParser(); + parser.FunctionName = 'OpEnTcpOptimizer.solve'; + addRequired(parser, 'parameter', @OpEnTcpOptimizer.isVectorNumeric); + addParameter(parser, 'InitialGuess', [], @OpEnTcpOptimizer.isOptionalVectorNumeric); + addParameter(parser, 'InitialLagrangeMultipliers', [], @OpEnTcpOptimizer.isOptionalVectorNumeric); + addParameter(parser, 'InitialPenalty', [], @OpEnTcpOptimizer.isOptionalScalarNumeric); + parse(parser, parameter, varargin{:}); + + request = struct(); + request.Run = struct(); + request.Run.parameter = OpEnTcpOptimizer.toRowVector(parser.Results.parameter, 'parameter'); + + if ~isempty(parser.Results.InitialGuess) + request.Run.initial_guess = OpEnTcpOptimizer.toRowVector( ... + parser.Results.InitialGuess, 'InitialGuess'); + end + + if ~isempty(parser.Results.InitialLagrangeMultipliers) + request.Run.initial_lagrange_multipliers = OpEnTcpOptimizer.toRowVector( ... + parser.Results.InitialLagrangeMultipliers, 'InitialLagrangeMultipliers'); + end + + if ~isempty(parser.Results.InitialPenalty) + request.Run.initial_penalty = double(parser.Results.InitialPenalty); + end + + rawResponse = obj.sendRequest(jsonencode(request), true); + response = OpEnTcpOptimizer.normalizeSolverResponse(rawResponse); + end + + function response = call(obj, parameter, varargin) + %CALL Alias for SOLVE to match the Python TCP interface. + response = obj.solve(parameter, varargin{:}); + end + + function response = consume(obj, parameter, varargin) + %CONSUME Alias for SOLVE to ease migration from older MATLAB code. + response = obj.solve(parameter, varargin{:}); + end + end + + methods (Access = private) + function response = sendRequest(obj, requestText, expectReply) + %SENDREQUEST Send a JSON request and optionally decode a JSON reply. + % + % The generated Rust server reads until the client closes its + % write side. We therefore use Java sockets so we can call + % shutdownOutput() after transmitting the JSON payload. + + socket = []; + cleanup = []; + + try + socket = java.net.Socket(); + timeoutMs = max(1, round(1000 * obj.timeout)); + socket.connect(java.net.InetSocketAddress(obj.ip, obj.port), timeoutMs); + socket.setSoTimeout(timeoutMs); + cleanup = onCleanup(@() OpEnTcpOptimizer.closeSocketQuietly(socket)); + + outputStream = socket.getOutputStream(); + requestBytes = int8(unicode2native(char(requestText), 'UTF-8')); + outputStream.write(requestBytes); + outputStream.flush(); + socket.shutdownOutput(); + + if ~expectReply + return; + end + + inputStream = socket.getInputStream(); + responseBytes = obj.readFully(inputStream); + if isempty(responseBytes) + error('OpEnTcpOptimizer:EmptyResponse', ... + 'The optimizer server closed the connection without sending a response.'); + end + + responseText = native2unicode(responseBytes, 'UTF-8'); + response = jsondecode(responseText); + catch err + % Ensure the socket is closed before rethrowing transport errors. + clear cleanup; + OpEnTcpOptimizer.closeSocketQuietly(socket); + rethrow(err); + end + + clear cleanup; + end + + function bytes = readFully(obj, inputStream) + %READFULLY Read the complete server reply until EOF. + % + % The server sends a single JSON document per connection and + % closes the connection afterwards, so EOF marks the end of + % the response payload. + + byteStream = java.io.ByteArrayOutputStream(); + + while true + nextByte = inputStream.read(); + if nextByte == -1 + break; + end + + byteStream.write(nextByte); + if byteStream.size() > obj.maxResponseBytes + error('OpEnTcpOptimizer:ResponseTooLarge', ... + 'The optimizer response exceeded %d bytes.', obj.maxResponseBytes); + end + end + + rawBytes = uint8(mod(double(byteStream.toByteArray()), 256)); + bytes = reshape(rawBytes, 1, []); + end + end + + methods (Static, Access = private) + function response = normalizeSolverResponse(rawResponse) + %NORMALIZESOLVERRESPONSE Add MATLAB-friendly fields to server data. + + response = rawResponse; + response.raw = rawResponse; + + if isfield(rawResponse, 'type') && strcmp(rawResponse.type, 'Error') + response.ok = false; + return; + end + + response.ok = true; + if isfield(rawResponse, 'delta_y_norm_over_c') + response.f1_infeasibility = rawResponse.delta_y_norm_over_c; + end + end + + function vector = toRowVector(value, argumentName) + %TOROWVECTOR Validate a numeric vector and serialize it as a row. + if ~OpEnTcpOptimizer.isVectorNumeric(value) + error('OpEnTcpOptimizer:InvalidVector', ... + '%s must be a numeric vector.', argumentName); + end + + vector = reshape(double(value), 1, []); + end + + function closeSocketQuietly(socket) + %CLOSESOCKETQUIETLY Best-effort socket close for cleanup paths. + if isempty(socket) + return; + end + + try + socket.close(); + catch + % Ignore close errors during cleanup. + end + end + + function tf = isValidPort(value) + %ISVALIDPORT Validate a TCP port number. + tf = isnumeric(value) && isscalar(value) && isfinite(value) ... + && value == fix(value) && value >= 1 && value <= 65535; + end + + function tf = isValidTimeout(value) + %ISVALIDTIMEOUT Validate a positive timeout. + tf = isnumeric(value) && isscalar(value) && isfinite(value) && value > 0; + end + + function tf = isValidMaxResponseBytes(value) + %ISVALIDMAXRESPONSEBYTES Validate the maximum response size. + tf = isnumeric(value) && isscalar(value) && isfinite(value) ... + && value == fix(value) && value > 0; + end + + function tf = isTextScalar(value) + %ISTEXTSCALAR True for character vectors and string scalars. + tf = ischar(value) || (isstring(value) && isscalar(value)); + end + + function [port, ip] = normalizeEndpointArguments(arg1, arg2) + %NORMALIZEENDPOINTARGUMENTS Support both (port, ip) and (ip, port). + defaultIp = '127.0.0.1'; + + if isempty(arg2) + port = arg1; + ip = defaultIp; + return; + end + + if OpEnTcpOptimizer.isValidPort(arg1) && OpEnTcpOptimizer.isTextScalar(arg2) + port = arg1; + ip = arg2; + return; + end + + if OpEnTcpOptimizer.isTextScalar(arg1) && OpEnTcpOptimizer.isValidPort(arg2) + port = arg2; + ip = arg1; + return; + end + + error('OpEnTcpOptimizer:InvalidEndpoint', ... + ['Specify the endpoint as (port), (port, ip), or (ip, port), ' ... + 'where port is an integer in [1, 65535].']); + end + + function value = textToChar(value) + %TEXTTOCHAR Convert a MATLAB text scalar to a character vector. + if isstring(value) + value = char(value); + end + end + + function tf = isVectorNumeric(value) + %ISVECTORNUMERIC True for finite numeric vectors. + tf = isnumeric(value) && isvector(value) && all(isfinite(value)); + end + + function tf = isOptionalVectorNumeric(value) + %ISOPTIONALVECTORNUMERIC True for [] or a numeric vector. + tf = isempty(value) || OpEnTcpOptimizer.isVectorNumeric(value); + end + + function tf = isOptionalScalarNumeric(value) + %ISOPTIONALSCALARNUMERIC True for [] or a finite numeric scalar. + tf = isempty(value) || (isnumeric(value) && isscalar(value) && isfinite(value)); + end + end +end diff --git a/matlab/api/createOpEnTcpOptimizer.m b/matlab/api/createOpEnTcpOptimizer.m new file mode 100644 index 00000000..c90d0bbc --- /dev/null +++ b/matlab/api/createOpEnTcpOptimizer.m @@ -0,0 +1,23 @@ +function client = createOpEnTcpOptimizer(arg1, arg2, varargin) +%CREATEOPENTCPOPTIMIZER creates a MATLAB TCP client for an OpEn optimizer. +% CLIENT = CREATEOPENTCPOPTIMIZER(PORT) connects to a TCP-enabled +% generated optimizer on 127.0.0.1:PORT. +% +% CLIENT = CREATEOPENTCPOPTIMIZER(PORT, IP) connects to the specified +% IP address and port. +% +% CLIENT = CREATEOPENTCPOPTIMIZER(IP, PORT) is also accepted. +% +% CLIENT = CREATEOPENTCPOPTIMIZER(..., Name, Value) forwards all +% remaining name-value pairs to the OPENTCPOPTIMIZER constructor. See +% "help OpEnTcpOptimizer" for the supported options and methods. +% +% This helper keeps the public API lightweight while the implementation +% lives in the documented OpEnTcpOptimizer class. + + if nargin < 2 + arg2 = []; + end + + client = OpEnTcpOptimizer(arg1, arg2, varargin{:}); +end diff --git a/matlab/@OpEnConstraints/OpEnConstraints.m b/matlab/legacy/@OpEnConstraints/OpEnConstraints.m similarity index 100% rename from matlab/@OpEnConstraints/OpEnConstraints.m rename to matlab/legacy/@OpEnConstraints/OpEnConstraints.m diff --git a/matlab/@OpEnOptimizer/OpEnOptimizer.m b/matlab/legacy/@OpEnOptimizer/OpEnOptimizer.m similarity index 100% rename from matlab/@OpEnOptimizer/OpEnOptimizer.m rename to matlab/legacy/@OpEnOptimizer/OpEnOptimizer.m diff --git a/matlab/@OpEnOptimizerBuilder/OpEnOptimizerBuilder.m b/matlab/legacy/@OpEnOptimizerBuilder/OpEnOptimizerBuilder.m similarity index 100% rename from matlab/@OpEnOptimizerBuilder/OpEnOptimizerBuilder.m rename to matlab/legacy/@OpEnOptimizerBuilder/OpEnOptimizerBuilder.m diff --git a/matlab/@OpEnOptimizerBuilder/build.m b/matlab/legacy/@OpEnOptimizerBuilder/build.m similarity index 100% rename from matlab/@OpEnOptimizerBuilder/build.m rename to matlab/legacy/@OpEnOptimizerBuilder/build.m diff --git a/matlab/README.md b/matlab/legacy/README.md similarity index 100% rename from matlab/README.md rename to matlab/legacy/README.md diff --git a/matlab/examples/example_open_1.m b/matlab/legacy/examples/example_open_1.m similarity index 100% rename from matlab/examples/example_open_1.m rename to matlab/legacy/examples/example_open_1.m diff --git a/matlab/examples/example_open_lv.m b/matlab/legacy/examples/example_open_lv.m similarity index 100% rename from matlab/examples/example_open_lv.m rename to matlab/legacy/examples/example_open_lv.m diff --git a/matlab/examples/example_open_nav.m b/matlab/legacy/examples/example_open_nav.m similarity index 100% rename from matlab/examples/example_open_nav.m rename to matlab/legacy/examples/example_open_nav.m diff --git a/matlab/helpers/casadi_generate_c_code.m b/matlab/legacy/helpers/casadi_generate_c_code.m similarity index 100% rename from matlab/helpers/casadi_generate_c_code.m rename to matlab/legacy/helpers/casadi_generate_c_code.m diff --git a/matlab/helpers/rosenbrock.m b/matlab/legacy/helpers/rosenbrock.m similarity index 100% rename from matlab/helpers/rosenbrock.m rename to matlab/legacy/helpers/rosenbrock.m diff --git a/matlab/matlab_open_root.m b/matlab/legacy/matlab_open_root.m similarity index 100% rename from matlab/matlab_open_root.m rename to matlab/legacy/matlab_open_root.m diff --git a/matlab/private/codegen_get_cache.txt b/matlab/legacy/private/codegen_get_cache.txt similarity index 100% rename from matlab/private/codegen_get_cache.txt rename to matlab/legacy/private/codegen_get_cache.txt diff --git a/matlab/private/codegen_head.txt b/matlab/legacy/private/codegen_head.txt similarity index 100% rename from matlab/private/codegen_head.txt rename to matlab/legacy/private/codegen_head.txt diff --git a/matlab/private/codegen_main_2.txt b/matlab/legacy/private/codegen_main_2.txt similarity index 100% rename from matlab/private/codegen_main_2.txt rename to matlab/legacy/private/codegen_main_2.txt diff --git a/matlab/private/codegen_main_3.txt b/matlab/legacy/private/codegen_main_3.txt similarity index 100% rename from matlab/private/codegen_main_3.txt rename to matlab/legacy/private/codegen_main_3.txt diff --git a/matlab/private/codegen_main_fn_def.txt b/matlab/legacy/private/codegen_main_fn_def.txt similarity index 100% rename from matlab/private/codegen_main_fn_def.txt rename to matlab/legacy/private/codegen_main_fn_def.txt diff --git a/matlab/setup_open.m b/matlab/legacy/setup_open.m similarity index 100% rename from matlab/setup_open.m rename to matlab/legacy/setup_open.m From 2969e6a6778b1f88e74c71ee4abf3124b9bfc182 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Thu, 26 Mar 2026 16:35:20 +0000 Subject: [PATCH 056/133] [ci skip] support for OCP solvers --- matlab/api/OpEnTcpOptimizer.m | 756 ++++++++++++++++++++++++---- matlab/api/createOpEnTcpOptimizer.m | 17 +- 2 files changed, 662 insertions(+), 111 deletions(-) diff --git a/matlab/api/OpEnTcpOptimizer.m b/matlab/api/OpEnTcpOptimizer.m index 92e1a709..a0eceb5d 100644 --- a/matlab/api/OpEnTcpOptimizer.m +++ b/matlab/api/OpEnTcpOptimizer.m @@ -9,24 +9,33 @@ % CLIENT = OPENTCPOPTIMIZER(IP, PORT) is also accepted for callers % who prefer to provide the endpoint in IP/port order. % - % CLIENT = OPENTCPOPTIMIZER(..., 'Timeout', T) sets the socket - % connect/read timeout in seconds. + % CLIENT = OPENTCPOPTIMIZER(..., 'ManifestPath', MANIFESTPATH) loads + % an OCP optimizer manifest created by Python's ``ocp`` module. Once + % a manifest is loaded, the client also supports named-parameter + % calls such as: % - % CLIENT = OPENTCPOPTIMIZER(..., 'MaxResponseBytes', N) limits the - % maximum number of bytes accepted from the optimizer. + % response = client.solve(x0=[1; 0], xref=[0; 0]); + % + % Name-value pairs: + % 'ManifestPath' Path to optimizer_manifest.json + % 'Timeout' Socket timeout in seconds (default 10) + % 'MaxResponseBytes' Maximum response size in bytes + % (default 1048576) + % + % If only a manifest path is provided, the constructor attempts to + % read ``optimizer.yml`` next to the manifest and use its TCP/IP + % endpoint automatically. % % This interface is intended for optimizers generated in Python with % the TCP interface enabled. The optimizer server must already be % running; this class only communicates with it. % - % Example: + % Examples: % client = OpEnTcpOptimizer(3301); % response = client.solve([2.0, 10.0]); - % if response.ok - % disp(response.solution); - % else - % error('OpEn:RemoteError', '%s', response.message); - % end + % + % client = OpEnTcpOptimizer('ManifestPath', 'optimizer_manifest.json'); + % response = client.solve(x0=[1.0, -1.0], xref=[0.0, 0.0]); properties (SetAccess = private) %IP IPv4 address or host name of the optimizer server. @@ -40,44 +49,91 @@ %MAXRESPONSEBYTES Safety limit for incoming payload size. maxResponseBytes + + %MANIFESTPATH Absolute path to an optional OCP manifest. + manifestPath + + %MANIFEST Decoded OCP manifest data. + manifest end methods - function obj = OpEnTcpOptimizer(arg1, arg2, varargin) + function obj = OpEnTcpOptimizer(varargin) %OPENTCPOPTIMIZER Construct a TCP client for a generated optimizer. % - % OBJ = OPENTCPOPTIMIZER(PORT) uses 127.0.0.1. - % OBJ = OPENTCPOPTIMIZER(PORT, IP) uses the provided IP. - % OBJ = OPENTCPOPTIMIZER(IP, PORT) is also supported. - % - % Name-value pairs: - % 'Timeout' Socket timeout in seconds (default 10) - % 'MaxResponseBytes' Maximum response size in bytes - % (default 1048576) + % Supported call patterns: + % OpEnTcpOptimizer(port) + % OpEnTcpOptimizer(port, ip) + % OpEnTcpOptimizer(ip, port) + % OpEnTcpOptimizer(..., 'ManifestPath', path) + % OpEnTcpOptimizer('ManifestPath', path) + + obj.manifestPath = ''; + obj.manifest = []; + + [endpointArgs, options] = OpEnTcpOptimizer.parseConstructorInputs(varargin); - if nargin < 1 - error('OpEnTcpOptimizer:NotEnoughInputs', ... - 'You must provide at least a TCP port.'); + if ~isempty(options.ManifestPath) + obj.loadManifest(options.ManifestPath); end - if nargin < 2 - arg2 = []; + [port, ip] = obj.resolveEndpoint(endpointArgs); + + obj.port = double(port); + obj.ip = OpEnTcpOptimizer.textToChar(ip); + obj.timeout = double(options.Timeout); + obj.maxResponseBytes = double(options.MaxResponseBytes); + end + + function obj = loadManifest(obj, manifestPath) + %LOADMANIFEST Load an OCP optimizer manifest. + % OBJ = LOADMANIFEST(OBJ, MANIFESTPATH) loads an + % ``optimizer_manifest.json`` file created by the Python OCP + % module. After loading the manifest, the client accepts + % named-parameter calls such as ``solve(x0=..., xref=...)``. + + if ~OpEnTcpOptimizer.isTextScalar(manifestPath) + error('OpEnTcpOptimizer:InvalidManifestPath', ... + 'ManifestPath must be a character vector or string scalar.'); end + manifestPath = OpEnTcpOptimizer.textToChar(manifestPath); + manifestPath = OpEnTcpOptimizer.validateManifestPath(manifestPath); + manifestText = fileread(manifestPath); + manifestData = jsondecode(manifestText); + + if ~isstruct(manifestData) + error('OpEnTcpOptimizer:InvalidManifest', ... + 'The manifest must decode to a MATLAB struct.'); + end + + if ~isfield(manifestData, 'parameters') + error('OpEnTcpOptimizer:InvalidManifest', ... + 'The manifest does not contain a "parameters" field.'); + end + + OpEnTcpOptimizer.validateManifestParameters(manifestData.parameters); - [port, ip] = OpEnTcpOptimizer.normalizeEndpointArguments(arg1, arg2); + obj.manifestPath = manifestPath; + obj.manifest = manifestData; + end + + function tf = hasManifest(obj) + %HASMANIFEST True if an OCP manifest has been loaded. + tf = ~isempty(obj.manifest); + end - parser = inputParser(); - parser.FunctionName = 'OpEnTcpOptimizer'; - addRequired(parser, 'port', @OpEnTcpOptimizer.isValidPort); - addRequired(parser, 'ip', @OpEnTcpOptimizer.isTextScalar); - addParameter(parser, 'Timeout', 10, @OpEnTcpOptimizer.isValidTimeout); - addParameter(parser, 'MaxResponseBytes', 1048576, @OpEnTcpOptimizer.isValidMaxResponseBytes); - parse(parser, port, ip, varargin{:}); + function names = parameterNames(obj) + %PARAMETERNAMES Return the ordered OCP parameter names. + if ~obj.hasManifest() + names = {}; + return; + end - obj.port = double(parser.Results.port); - obj.ip = OpEnTcpOptimizer.textToChar(parser.Results.ip); - obj.timeout = double(parser.Results.Timeout); - obj.maxResponseBytes = double(parser.Results.MaxResponseBytes); + definitions = OpEnTcpOptimizer.manifestParametersAsCell(obj.manifest.parameters); + names = cell(size(definitions)); + for i = 1:numel(definitions) + names{i} = definitions{i}.name; + end end function response = ping(obj) @@ -94,68 +150,289 @@ function kill(obj) obj.sendRequest('{"Kill":1}', false); end - function response = solve(obj, parameter, varargin) - %SOLVE Run the optimizer for the given parameter vector. - % RESPONSE = SOLVE(OBJ, PARAMETER) sends PARAMETER to the - % optimizer and returns a struct with field RESPONSE.ok. + function response = solve(obj, varargin) + %SOLVE Run a parametric or OCP optimizer over TCP. + % RESPONSE = SOLVE(OBJ, P) sends the flat parameter vector P + % to a standard parametric optimizer. % - % RESPONSE = SOLVE(..., 'InitialGuess', U0, - % 'InitialLagrangeMultipliers', Y0, 'InitialPenalty', C0) - % mirrors the TCP options supported by the generated server. + % RESPONSE = SOLVE(OBJ, x0=..., xref=..., ...) packs the named + % parameter blocks declared in the loaded OCP manifest and + % sends the resulting flat parameter vector to the solver. + % + % In both modes, the optional solver warm-start arguments are: + % InitialGuess + % InitialLagrangeMultipliers + % InitialPenalty % % On success, RESPONSE contains the solver fields returned by - % the server, plus the aliases: + % the server, plus: % ok = true % raw = original decoded JSON response % f1_infeasibility = delta_y_norm_over_c % - % On failure, RESPONSE contains: - % ok = false - % raw = original decoded JSON response - % code, message + % For OCP solves, RESPONSE also contains: + % packed_parameter = flat parameter vector sent to server + % inputs = stage-wise control inputs, when available + % states = state trajectory for multiple shooting OCPs + + [parameterVector, solverOptions, solveMode] = obj.prepareSolveInputs(varargin); + rawResponse = obj.runSolveRequest(parameterVector, solverOptions); + response = OpEnTcpOptimizer.normalizeSolverResponse(rawResponse); + + if strcmp(solveMode, 'ocp') + response.packed_parameter = parameterVector; + if response.ok + response = obj.enrichOcpResponse(response, parameterVector); + end + end + end + + function response = call(obj, varargin) + %CALL Alias for SOLVE to match the Python TCP interface. + response = obj.solve(varargin{:}); + end + + function response = consume(obj, varargin) + %CONSUME Alias for SOLVE to ease migration from older MATLAB code. + response = obj.solve(varargin{:}); + end + end + + methods (Access = private) + function [parameterVector, solverOptions, solveMode] = prepareSolveInputs(obj, inputArgs) + %PREPARESOLVEINPUTS Parse parametric or OCP solve inputs. + + if isempty(inputArgs) + error('OpEnTcpOptimizer:MissingSolveArguments', ... + 'Provide either a flat parameter vector or named OCP parameters.'); + end + + if OpEnTcpOptimizer.isVectorNumeric(inputArgs{1}) + solveMode = 'parametric'; + parameterVector = OpEnTcpOptimizer.toRowVector(inputArgs{1}, 'parameter'); + solverOptions = OpEnTcpOptimizer.parseSolverOptions(inputArgs(2:end)); + return; + end + + if ~obj.hasManifest() + error('OpEnTcpOptimizer:ManifestRequired', ... + ['Named parameter solves require an OCP manifest. Load one with ' ... + 'loadManifest(...) or the constructor option ''ManifestPath''.']); + end + + [parameterVector, solverOptions] = obj.packOcpParameters(inputArgs); + solveMode = 'ocp'; + end + + function [parameterVector, solverOptions] = packOcpParameters(obj, inputArgs) + %PACKOCPPARAMETERS Pack named OCP parameters using the manifest. + + pairs = OpEnTcpOptimizer.normalizeNameValuePairs(inputArgs, 'OpEnTcpOptimizer.solve'); + solverOptions = OpEnTcpOptimizer.emptySolverOptions(); + providedValues = containers.Map('KeyType', 'char', 'ValueType', 'any'); + + for i = 1:size(pairs, 1) + name = pairs{i, 1}; + value = pairs{i, 2}; + lowerName = lower(name); + + switch lowerName + case 'initialguess' + solverOptions.InitialGuess = value; + case 'initiallagrangemultipliers' + solverOptions.InitialLagrangeMultipliers = value; + case 'initialpenalty' + solverOptions.InitialPenalty = value; + otherwise + if isKey(providedValues, name) + error('OpEnTcpOptimizer:DuplicateParameter', ... + 'Parameter "%s" was provided more than once.', name); + end + providedValues(name) = value; + end + end + + solverOptions = OpEnTcpOptimizer.validateSolverOptions(solverOptions); + + definitions = OpEnTcpOptimizer.manifestParametersAsCell(obj.manifest.parameters); + parameterVector = []; + missing = {}; + + for i = 1:numel(definitions) + definition = definitions{i}; + if isKey(providedValues, definition.name) + value = providedValues(definition.name); + remove(providedValues, definition.name); + else + value = definition.default; + end + + if isempty(value) + missing{end + 1} = definition.name; %#ok + continue; + end + + parameterVector = [parameterVector, ... %#ok + OpEnTcpOptimizer.normalizeParameterBlock( ... + value, definition.size, definition.name)]; + end + + if ~isempty(missing) + error('OpEnTcpOptimizer:MissingOcpParameters', ... + 'Missing values for parameters: %s.', strjoin(missing, ', ')); + end + + remainingNames = sort(keys(providedValues)); + if ~isempty(remainingNames) + error('OpEnTcpOptimizer:UnknownOcpParameters', ... + 'Unknown OCP parameter(s): %s.', strjoin(remainingNames, ', ')); + end + end - parser = inputParser(); - parser.FunctionName = 'OpEnTcpOptimizer.solve'; - addRequired(parser, 'parameter', @OpEnTcpOptimizer.isVectorNumeric); - addParameter(parser, 'InitialGuess', [], @OpEnTcpOptimizer.isOptionalVectorNumeric); - addParameter(parser, 'InitialLagrangeMultipliers', [], @OpEnTcpOptimizer.isOptionalVectorNumeric); - addParameter(parser, 'InitialPenalty', [], @OpEnTcpOptimizer.isOptionalScalarNumeric); - parse(parser, parameter, varargin{:}); + function response = enrichOcpResponse(obj, response, packedParameters) + %ENRICHOCPRESPONSE Add OCP-oriented views to a successful solve. + + response.inputs = obj.extractOcpInputs(response.solution); + + if strcmp(obj.manifest.shooting, 'multiple') + response.states = obj.extractMultipleShootingStates( ... + response.solution, packedParameters); + end + end + + function inputs = extractOcpInputs(obj, flatSolution) + %EXTRACTOCPINPUTS Extract stage-wise input blocks from the solution. + + flatSolution = OpEnTcpOptimizer.toRowVector(flatSolution, 'solution'); + + if strcmp(obj.manifest.shooting, 'single') + nu = double(obj.manifest.nu); + horizon = double(obj.manifest.horizon); + inputs = cell(1, horizon); + + for stageIdx = 1:horizon + startIdx = (stageIdx - 1) * nu + 1; + stopIdx = stageIdx * nu; + inputs{stageIdx} = flatSolution(startIdx:stopIdx); + end + return; + end + + sliceMatrix = obj.manifest.input_slices; + inputs = cell(1, size(sliceMatrix, 1)); + for i = 1:size(sliceMatrix, 1) + startIdx = sliceMatrix(i, 1) + 1; + stopIdx = sliceMatrix(i, 2); + inputs{i} = flatSolution(startIdx:stopIdx); + end + end + + function states = extractMultipleShootingStates(obj, flatSolution, packedParameters) + %EXTRACTMULTIPLESHOOTINGSTATES Extract the state trajectory. + % + % For multiple shooting OCPs the manifest contains the state + % slices directly, so no extra CasADi dependency is needed in + % MATLAB to reconstruct the state trajectory. + + flatSolution = OpEnTcpOptimizer.toRowVector(flatSolution, 'solution'); + packedParameters = OpEnTcpOptimizer.toRowVector(packedParameters, 'packedParameters'); + + stateSlices = obj.manifest.state_slices; + states = cell(1, size(stateSlices, 1) + 1); + states{1} = obj.extractParameterByName(packedParameters, 'x0'); + + for i = 1:size(stateSlices, 1) + startIdx = stateSlices(i, 1) + 1; + stopIdx = stateSlices(i, 2); + states{i + 1} = flatSolution(startIdx:stopIdx); + end + end + + function value = extractParameterByName(obj, packedParameters, parameterName) + %EXTRACTPARAMETERBYNAME Extract one named parameter block. + + definitions = OpEnTcpOptimizer.manifestParametersAsCell(obj.manifest.parameters); + offset = 0; + + for i = 1:numel(definitions) + definition = definitions{i}; + nextOffset = offset + definition.size; + if strcmp(definition.name, parameterName) + value = packedParameters(offset + 1:nextOffset); + return; + end + offset = nextOffset; + end + + error('OpEnTcpOptimizer:MissingManifestParameter', ... + 'The manifest does not define a parameter named "%s".', parameterName); + end + + function rawResponse = runSolveRequest(obj, parameterVector, solverOptions) + %RUNSOLVEREQUEST Serialize and send a solver execution request. request = struct(); request.Run = struct(); - request.Run.parameter = OpEnTcpOptimizer.toRowVector(parser.Results.parameter, 'parameter'); + request.Run.parameter = OpEnTcpOptimizer.toRowVector(parameterVector, 'parameter'); - if ~isempty(parser.Results.InitialGuess) + if ~isempty(solverOptions.InitialGuess) request.Run.initial_guess = OpEnTcpOptimizer.toRowVector( ... - parser.Results.InitialGuess, 'InitialGuess'); + solverOptions.InitialGuess, 'InitialGuess'); end - if ~isempty(parser.Results.InitialLagrangeMultipliers) + if ~isempty(solverOptions.InitialLagrangeMultipliers) request.Run.initial_lagrange_multipliers = OpEnTcpOptimizer.toRowVector( ... - parser.Results.InitialLagrangeMultipliers, 'InitialLagrangeMultipliers'); + solverOptions.InitialLagrangeMultipliers, 'InitialLagrangeMultipliers'); end - if ~isempty(parser.Results.InitialPenalty) - request.Run.initial_penalty = double(parser.Results.InitialPenalty); + if ~isempty(solverOptions.InitialPenalty) + request.Run.initial_penalty = double(solverOptions.InitialPenalty); end rawResponse = obj.sendRequest(jsonencode(request), true); - response = OpEnTcpOptimizer.normalizeSolverResponse(rawResponse); end - function response = call(obj, parameter, varargin) - %CALL Alias for SOLVE to match the Python TCP interface. - response = obj.solve(parameter, varargin{:}); - end + function [port, ip] = resolveEndpoint(obj, endpointArgs) + %RESOLVEENDPOINT Resolve the TCP endpoint from inputs or manifest. - function response = consume(obj, parameter, varargin) - %CONSUME Alias for SOLVE to ease migration from older MATLAB code. - response = obj.solve(parameter, varargin{:}); + if isempty(endpointArgs) + if ~obj.hasManifest() + error('OpEnTcpOptimizer:MissingEndpoint', ... + 'Provide a TCP endpoint or a manifest with a matching optimizer.yml file.'); + end + + tcpDefaults = OpEnTcpOptimizer.readTcpDefaultsFromManifest(obj.manifestPath); + if isempty(tcpDefaults) + error('OpEnTcpOptimizer:MissingEndpoint', ... + ['No TCP endpoint was provided and no TCP settings could be read from ' ... + 'optimizer.yml next to the manifest.']); + end + + port = tcpDefaults.port; + ip = tcpDefaults.ip; + return; + end + + if numel(endpointArgs) == 1 + if ~OpEnTcpOptimizer.isValidPort(endpointArgs{1}) + error('OpEnTcpOptimizer:InvalidEndpoint', ... + 'A single endpoint argument must be a TCP port.'); + end + port = endpointArgs{1}; + ip = '127.0.0.1'; + return; + end + + if numel(endpointArgs) == 2 + [port, ip] = OpEnTcpOptimizer.normalizeEndpointArguments( ... + endpointArgs{1}, endpointArgs{2}); + return; + end + + error('OpEnTcpOptimizer:InvalidEndpoint', ... + 'Specify the endpoint as (port), (port, ip), or (ip, port).'); end - end - methods (Access = private) function response = sendRequest(obj, requestText, expectReply) %SENDREQUEST Send a JSON request and optionally decode a JSON reply. % @@ -230,6 +507,170 @@ function kill(obj) end methods (Static, Access = private) + function [endpointArgs, options] = parseConstructorInputs(inputArgs) + %PARSECONSTRUCTORINPUTS Split constructor endpoint and options. + + endpointArgs = {}; + options = struct( ... + 'ManifestPath', '', ... + 'Timeout', 10, ... + 'MaxResponseBytes', 1048576); + + idx = 1; + while idx <= numel(inputArgs) + token = inputArgs{idx}; + if OpEnTcpOptimizer.isRecognizedConstructorOption(token) + if idx == numel(inputArgs) + error('OpEnTcpOptimizer:InvalidConstructorInput', ... + 'Missing value for option "%s".', OpEnTcpOptimizer.textToChar(token)); + end + + name = lower(OpEnTcpOptimizer.textToChar(token)); + value = inputArgs{idx + 1}; + switch name + case 'manifestpath' + if ~OpEnTcpOptimizer.isTextScalar(value) + error('OpEnTcpOptimizer:InvalidManifestPath', ... + 'ManifestPath must be a character vector or string scalar.'); + end + options.ManifestPath = OpEnTcpOptimizer.textToChar(value); + case 'timeout' + options.Timeout = value; + case 'maxresponsebytes' + options.MaxResponseBytes = value; + end + idx = idx + 2; + else + endpointArgs{end + 1} = token; %#ok + idx = idx + 1; + end + end + + if numel(endpointArgs) == 1 && OpEnTcpOptimizer.isManifestPathToken(endpointArgs{1}) + options.ManifestPath = OpEnTcpOptimizer.textToChar(endpointArgs{1}); + endpointArgs = {}; + end + + if ~(OpEnTcpOptimizer.isValidTimeout(options.Timeout)) + error('OpEnTcpOptimizer:InvalidTimeout', ... + 'Timeout must be a positive scalar.'); + end + + if ~(OpEnTcpOptimizer.isValidMaxResponseBytes(options.MaxResponseBytes)) + error('OpEnTcpOptimizer:InvalidMaxResponseBytes', ... + 'MaxResponseBytes must be a positive integer.'); + end + end + + function tf = isRecognizedConstructorOption(token) + %ISRECOGNIZEDCONSTRUCTOROPTION True for constructor option names. + tf = OpEnTcpOptimizer.isTextScalar(token) && any(strcmpi( ... + OpEnTcpOptimizer.textToChar(token), {'ManifestPath', 'Timeout', 'MaxResponseBytes'})); + end + + function tf = isManifestPathToken(token) + %ISMANIFESTPATHTOKEN Heuristic for a positional manifest path. + if ~OpEnTcpOptimizer.isTextScalar(token) + tf = false; + return; + end + + token = OpEnTcpOptimizer.textToChar(token); + [~, ~, ext] = fileparts(token); + tf = strcmpi(ext, '.json') && isfile(token); + end + + function manifestPath = validateManifestPath(manifestPath) + %VALIDATEMANIFESTPATH Validate and absolutize a manifest path. + if ~isfile(manifestPath) + error('OpEnTcpOptimizer:ManifestNotFound', ... + 'Manifest file not found: %s', manifestPath); + end + + [folder, name, ext] = fileparts(manifestPath); + if ~strcmpi(ext, '.json') + error('OpEnTcpOptimizer:InvalidManifestPath', ... + 'The manifest path must point to a JSON file.'); + end + + manifestPath = fullfile(folder, [name, ext]); + end + + function validateManifestParameters(parameters) + %VALIDATEMANIFESTPARAMETERS Validate manifest parameter entries. + definitions = OpEnTcpOptimizer.manifestParametersAsCell(parameters); + for i = 1:numel(definitions) + definition = definitions{i}; + if ~isfield(definition, 'name') || ~isfield(definition, 'size') + error('OpEnTcpOptimizer:InvalidManifest', ... + 'Each manifest parameter needs "name" and "size" fields.'); + end + if ~ischar(definition.name) && ~isstring(definition.name) + error('OpEnTcpOptimizer:InvalidManifest', ... + 'Manifest parameter names must be text values.'); + end + if ~OpEnTcpOptimizer.isValidPositiveInteger(definition.size) + error('OpEnTcpOptimizer:InvalidManifest', ... + 'Manifest parameter sizes must be positive integers.'); + end + end + end + + function defaults = readTcpDefaultsFromManifest(manifestPath) + %READTCPDEFAULTSFROMMANIFEST Read TCP defaults from optimizer.yml. + defaults = []; + optimizerDir = fileparts(manifestPath); + yamlPath = fullfile(optimizerDir, 'optimizer.yml'); + + if ~isfile(yamlPath) + return; + end + + defaults = OpEnTcpOptimizer.parseOptimizerYaml(yamlPath); + end + + function defaults = parseOptimizerYaml(yamlPath) + %PARSEOPTIMIZERYAML Read the tcp.ip and tcp.port fields. + defaults = []; + yamlText = fileread(yamlPath); + lines = regexp(yamlText, '\r\n|\n|\r', 'split'); + + inTcpBlock = false; + ip = ''; + port = []; + + for i = 1:numel(lines) + line = lines{i}; + trimmed = strtrim(line); + + if isempty(trimmed) + continue; + end + + if strcmp(trimmed, 'tcp:') + inTcpBlock = true; + continue; + end + + if inTcpBlock && ~isempty(line) && ~isspace(line(1)) + break; + end + + if inTcpBlock + if startsWith(trimmed, 'ip:') + ip = strtrim(extractAfter(trimmed, 3)); + elseif startsWith(trimmed, 'port:') + portText = strtrim(extractAfter(trimmed, 5)); + port = str2double(portText); + end + end + end + + if ~isempty(ip) && OpEnTcpOptimizer.isValidPort(port) + defaults = struct('ip', ip, 'port', port); + end + end + function response = normalizeSolverResponse(rawResponse) %NORMALIZESOLVERRESPONSE Add MATLAB-friendly fields to server data. @@ -247,6 +688,122 @@ function kill(obj) end end + function solverOptions = parseSolverOptions(inputArgs) + %PARSESOLVEROPTIONS Parse warm-start related name-value pairs. + pairs = OpEnTcpOptimizer.normalizeNameValuePairs(inputArgs, 'OpEnTcpOptimizer.solve'); + solverOptions = OpEnTcpOptimizer.emptySolverOptions(); + + for i = 1:size(pairs, 1) + name = lower(pairs{i, 1}); + value = pairs{i, 2}; + + switch name + case 'initialguess' + solverOptions.InitialGuess = value; + case 'initiallagrangemultipliers' + solverOptions.InitialLagrangeMultipliers = value; + case 'initialpenalty' + solverOptions.InitialPenalty = value; + otherwise + error('OpEnTcpOptimizer:UnknownSolveOption', ... + 'Unknown solve option "%s".', pairs{i, 1}); + end + end + + solverOptions = OpEnTcpOptimizer.validateSolverOptions(solverOptions); + end + + function solverOptions = validateSolverOptions(solverOptions) + %VALIDATESOLVEROPTIONS Validate optional warm-start inputs. + if ~OpEnTcpOptimizer.isOptionalVectorNumeric(solverOptions.InitialGuess) + error('OpEnTcpOptimizer:InvalidInitialGuess', ... + 'InitialGuess must be a numeric vector or [].'); + end + + if ~OpEnTcpOptimizer.isOptionalVectorNumeric(solverOptions.InitialLagrangeMultipliers) + error('OpEnTcpOptimizer:InvalidInitialLagrangeMultipliers', ... + 'InitialLagrangeMultipliers must be a numeric vector or [].'); + end + + if ~OpEnTcpOptimizer.isOptionalScalarNumeric(solverOptions.InitialPenalty) + error('OpEnTcpOptimizer:InvalidInitialPenalty', ... + 'InitialPenalty must be a numeric scalar or [].'); + end + end + + function solverOptions = emptySolverOptions() + %EMPTYSOLVEROPTIONS Return the default solve option bundle. + solverOptions = struct( ... + 'InitialGuess', [], ... + 'InitialLagrangeMultipliers', [], ... + 'InitialPenalty', []); + end + + function pairs = normalizeNameValuePairs(inputArgs, functionName) + %NORMALIZENAMEVALUEPAIRS Validate and normalize name-value pairs. + if isempty(inputArgs) + pairs = cell(0, 2); + return; + end + + if mod(numel(inputArgs), 2) ~= 0 + error('OpEnTcpOptimizer:InvalidNameValueInput', ... + '%s expects name-value arguments in pairs.', functionName); + end + + pairs = cell(numel(inputArgs) / 2, 2); + pairIdx = 1; + for i = 1:2:numel(inputArgs) + name = inputArgs{i}; + if ~OpEnTcpOptimizer.isTextScalar(name) + error('OpEnTcpOptimizer:InvalidNameValueInput', ... + 'Expected a text parameter name at argument position %d.', i); + end + + pairs{pairIdx, 1} = OpEnTcpOptimizer.textToChar(name); + pairs{pairIdx, 2} = inputArgs{i + 1}; + pairIdx = pairIdx + 1; + end + end + + function blocks = manifestParametersAsCell(parameters) + %MANIFESTPARAMETERSASCELL Normalize decoded parameter definitions. + if isempty(parameters) + blocks = {}; + return; + end + + if isstruct(parameters) + blocks = cell(1, numel(parameters)); + for i = 1:numel(parameters) + blocks{i} = parameters(i); + end + return; + end + + error('OpEnTcpOptimizer:InvalidManifest', ... + 'Manifest parameters must decode to a struct array.'); + end + + function vector = normalizeParameterBlock(value, expectedSize, parameterName) + %NORMALIZEPARAMETERBLOCK Normalize one OCP parameter block. + if expectedSize == 1 && isnumeric(value) && isscalar(value) && isfinite(value) + vector = double(value); + return; + end + + if ~OpEnTcpOptimizer.isVectorNumeric(value) + error('OpEnTcpOptimizer:InvalidOcpParameter', ... + 'Parameter "%s" must be a numeric vector.', parameterName); + end + + vector = reshape(double(value), 1, []); + if numel(vector) ~= double(expectedSize) + error('OpEnTcpOptimizer:InvalidOcpParameterDimension', ... + 'Parameter "%s" must have length %d.', parameterName, double(expectedSize)); + end + end + function vector = toRowVector(value, argumentName) %TOROWVECTOR Validate a numeric vector and serialize it as a row. if ~OpEnTcpOptimizer.isVectorNumeric(value) @@ -270,10 +827,29 @@ function closeSocketQuietly(socket) end end + function [port, ip] = normalizeEndpointArguments(arg1, arg2) + %NORMALIZEENDPOINTARGUMENTS Support both (port, ip) and (ip, port). + if OpEnTcpOptimizer.isValidPort(arg1) && OpEnTcpOptimizer.isTextScalar(arg2) + port = arg1; + ip = arg2; + return; + end + + if OpEnTcpOptimizer.isTextScalar(arg1) && OpEnTcpOptimizer.isValidPort(arg2) + port = arg2; + ip = arg1; + return; + end + + error('OpEnTcpOptimizer:InvalidEndpoint', ... + ['Specify the endpoint as (port), (port, ip), or (ip, port), ' ... + 'where port is an integer in [1, 65535].']); + end + function tf = isValidPort(value) %ISVALIDPORT Validate a TCP port number. - tf = isnumeric(value) && isscalar(value) && isfinite(value) ... - && value == fix(value) && value >= 1 && value <= 65535; + tf = OpEnTcpOptimizer.isValidPositiveInteger(value) ... + && double(value) >= 1 && double(value) <= 65535; end function tf = isValidTimeout(value) @@ -283,6 +859,11 @@ function closeSocketQuietly(socket) function tf = isValidMaxResponseBytes(value) %ISVALIDMAXRESPONSEBYTES Validate the maximum response size. + tf = OpEnTcpOptimizer.isValidPositiveInteger(value); + end + + function tf = isValidPositiveInteger(value) + %ISVALIDPOSITIVEINTEGER Validate a positive integer scalar. tf = isnumeric(value) && isscalar(value) && isfinite(value) ... && value == fix(value) && value > 0; end @@ -292,33 +873,6 @@ function closeSocketQuietly(socket) tf = ischar(value) || (isstring(value) && isscalar(value)); end - function [port, ip] = normalizeEndpointArguments(arg1, arg2) - %NORMALIZEENDPOINTARGUMENTS Support both (port, ip) and (ip, port). - defaultIp = '127.0.0.1'; - - if isempty(arg2) - port = arg1; - ip = defaultIp; - return; - end - - if OpEnTcpOptimizer.isValidPort(arg1) && OpEnTcpOptimizer.isTextScalar(arg2) - port = arg1; - ip = arg2; - return; - end - - if OpEnTcpOptimizer.isTextScalar(arg1) && OpEnTcpOptimizer.isValidPort(arg2) - port = arg2; - ip = arg1; - return; - end - - error('OpEnTcpOptimizer:InvalidEndpoint', ... - ['Specify the endpoint as (port), (port, ip), or (ip, port), ' ... - 'where port is an integer in [1, 65535].']); - end - function value = textToChar(value) %TEXTTOCHAR Convert a MATLAB text scalar to a character vector. if isstring(value) diff --git a/matlab/api/createOpEnTcpOptimizer.m b/matlab/api/createOpEnTcpOptimizer.m index c90d0bbc..fd1fb57a 100644 --- a/matlab/api/createOpEnTcpOptimizer.m +++ b/matlab/api/createOpEnTcpOptimizer.m @@ -1,5 +1,5 @@ -function client = createOpEnTcpOptimizer(arg1, arg2, varargin) -%CREATEOPENTCPOPTIMIZER creates a MATLAB TCP client for an OpEn optimizer. +function client = createOpEnTcpOptimizer(varargin) +%CREATEOPENTCPOPTIMIZER Create a MATLAB TCP client for an OpEn optimizer. % CLIENT = CREATEOPENTCPOPTIMIZER(PORT) connects to a TCP-enabled % generated optimizer on 127.0.0.1:PORT. % @@ -8,16 +8,13 @@ % % CLIENT = CREATEOPENTCPOPTIMIZER(IP, PORT) is also accepted. % +% CLIENT = CREATEOPENTCPOPTIMIZER('ManifestPath', MANIFESTPATH) creates +% a manifest-aware OCP TCP client and tries to read the endpoint from the +% sibling ``optimizer.yml`` file. +% % CLIENT = CREATEOPENTCPOPTIMIZER(..., Name, Value) forwards all % remaining name-value pairs to the OPENTCPOPTIMIZER constructor. See % "help OpEnTcpOptimizer" for the supported options and methods. -% -% This helper keeps the public API lightweight while the implementation -% lives in the documented OpEnTcpOptimizer class. - - if nargin < 2 - arg2 = []; - end - client = OpEnTcpOptimizer(arg1, arg2, varargin{:}); + client = OpEnTcpOptimizer(varargin{:}); end From 3aaa4bfa7870d995c9f4b35fdb89a8d03af0c19a Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Thu, 26 Mar 2026 16:44:41 +0000 Subject: [PATCH 057/133] add changelog in matlab toolbox --- matlab/CHANGELOG.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 matlab/CHANGELOG.md diff --git a/matlab/CHANGELOG.md b/matlab/CHANGELOG.md new file mode 100644 index 00000000..c116abcd --- /dev/null +++ b/matlab/CHANGELOG.md @@ -0,0 +1,24 @@ +# Change Log + +All notable changes to the MATLAB interface will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/) +and this project adheres to [Semantic Versioning](http://semver.org/). + +Note: This is the Changelog file for the MATLAB interface of OpEn. + + +## [0.1.0] - Unreleased + +### Added + +- New MATLAB TCP client in `matlab/api/OpEnTcpOptimizer.m` for TCP-enabled optimizers generated in Python. +- Convenience constructor helper `matlab/api/createOpEnTcpOptimizer.m`. +- Support for parametric optimizers over TCP using calls of the form `response = client.solve(p)`. +- Support for OCP optimizers over TCP by loading `optimizer_manifest.json` and allowing named-parameter calls such as `response = client.solve('x0', x0, 'xref', xref)`. +- Automatic packing of named OCP parameter blocks according to the manifest order, including support for manifest defaults. +- MATLAB-side helpers for `ping`, `kill`, warm-start options, and normalized solver responses. + +### Changed + +- Added a dedicated MATLAB API area under `matlab/api` for the current interface, separate from the legacy MATLAB code. From a6434cbb6eeed10017509c80af67d235fb3cc8c3 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Thu, 26 Mar 2026 16:56:39 +0000 Subject: [PATCH 058/133] matlab api: readme file --- matlab/README.md | 205 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 205 insertions(+) create mode 100644 matlab/README.md diff --git a/matlab/README.md b/matlab/README.md new file mode 100644 index 00000000..5a5ae63d --- /dev/null +++ b/matlab/README.md @@ -0,0 +1,205 @@ +# OpEn MATLAB API + +This directory contains the MATLAB interface of **Optimization Engine (OpEn)**. + +The current MATLAB API lives in [`matlab/api`](./api). It communicates with +optimizers generated in Python that expose a TCP server interface. + +The legacy MATLAB code is preserved in [`matlab/legacy`](./legacy). + +## Capabilities + +The current MATLAB toolbox supports: + +- Connecting to TCP-enabled optimizers generated in Python +- Calling standard parametric optimizers using a flat parameter vector +- Calling OCP-generated optimizers using named parameter blocks from + `optimizer_manifest.json` +- Loading OCP manifests and, when available, automatically reading the TCP + endpoint from the sibling `optimizer.yml` +- Sending `ping` and `kill` requests to the optimizer server +- Providing optional warm-start data through: + - `InitialGuess` + - `InitialLagrangeMultipliers` + - `InitialPenalty` +- Returning normalized solver responses with an `ok` flag and solver + diagnostics +- Returning stage-wise `inputs` for OCP optimizers and `states` for + multiple-shooting OCP optimizers + +The main entry points are: + +- [`matlab/api/OpEnTcpOptimizer.m`](./api/OpEnTcpOptimizer.m) +- [`matlab/api/createOpEnTcpOptimizer.m`](./api/createOpEnTcpOptimizer.m) + +## Getting Started + +Add the MATLAB API folder to your path: + +```matlab +addpath(fullfile(pwd, 'matlab', 'api')); +``` + +Make sure the target optimizer TCP server is already running. + +## Simple Optimizers + +### Connect to a parametric optimizer + +Use a TCP port directly. The IP defaults to `127.0.0.1`. + +```matlab +client = OpEnTcpOptimizer(3301); +pong = client.ping(); +disp(pong.Pong); +``` + +You can also specify the endpoint explicitly: + +```matlab +client = OpEnTcpOptimizer('127.0.0.1', 3301); +``` + +### Solve a parametric optimizer + +For a standard parametric optimizer, pass the flat parameter vector: + +```matlab +response = client.solve([2.0, 10.0]); + +if response.ok + disp(response.solution); + disp(response.cost); +else + error('OpEn:SolverError', '%s', response.message); +end +``` + +### Solve with warm-start information + +```matlab +response1 = client.solve([2.0, 10.0]); + +response2 = client.solve( ... + [2.0, 10.0], ... + 'InitialGuess', response1.solution, ... + 'InitialLagrangeMultipliers', response1.lagrange_multipliers, ... + 'InitialPenalty', response1.penalty); +``` + +### Stop the server + +```matlab +client.kill(); +``` + +## OCP Optimizers + +For OCP-generated optimizers, MATLAB uses **name-value pairs** to provide the +parameter blocks listed in `optimizer_manifest.json`. + +### Load an OCP optimizer from its manifest + +If `optimizer_manifest.json` and `optimizer.yml` are in the same generated +optimizer directory, the client can infer the TCP endpoint automatically: + +```matlab +manifestPath = fullfile( ... + pwd, ... + 'open-codegen', ... + '.python_test_build_ocp', ... + 'ocp_single_tcp', ... + 'optimizer_manifest.json'); + +client = OpEnTcpOptimizer('ManifestPath', manifestPath); +disp(client.parameterNames()); +``` + +You can also override the endpoint explicitly: + +```matlab +client = OpEnTcpOptimizer(3391, 'ManifestPath', manifestPath); +``` + +### Solve a single-shooting OCP optimizer + +The following example matches the OCP manifest in +`open-codegen/.python_test_build_ocp/ocp_single_tcp`: + +```matlab +response = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0]); + +if response.ok + disp(response.solution); + disp(response.inputs); + disp(response.exit_status); +else + error('OpEn:SolverError', '%s', response.message); +end +``` + +If the manifest defines default values for some parameters, you only need to +provide the required ones: + +```matlab +manifestPath = fullfile( ... + pwd, ... + 'open-codegen', ... + '.python_test_build_ocp', ... + 'ocp_manifest_bindings', ... + 'optimizer_manifest.json'); + +client = OpEnTcpOptimizer('ManifestPath', manifestPath); +response = client.solve('x0', [1.0, 0.0]); +``` + +### Solve a multiple-shooting OCP optimizer + +For multiple-shooting OCPs, the MATLAB client also returns the state +trajectory reconstructed from the manifest slices: + +```matlab +manifestPath = fullfile( ... + pwd, ... + 'open-codegen', ... + '.python_test_build_ocp', ... + 'ocp_multiple_tcp', ... + 'optimizer_manifest.json'); + +client = OpEnTcpOptimizer('ManifestPath', manifestPath); + +response = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0]); + +disp(response.inputs); +disp(response.states); +``` + +### OCP warm-start example + +Warm-start options can be combined with named OCP parameters: + +```matlab +response1 = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0]); + +response2 = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0], ... + 'InitialGuess', response1.solution, ... + 'InitialLagrangeMultipliers', response1.lagrange_multipliers, ... + 'InitialPenalty', response1.penalty); +``` + +## Notes + +- The MATLAB API does not start the optimizer server; it connects to a server + that is already running. +- For plain parametric optimizers, use `client.solve(p)`. +- For OCP optimizers, use `client.solve('name1', value1, 'name2', value2, ...)`. +- The helper function `createOpEnTcpOptimizer(...)` is a thin wrapper around + `OpEnTcpOptimizer(...)`. From b6fead3fa53a994bf01fb1c7641ff3d258f85c5f Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 12:29:37 +0000 Subject: [PATCH 059/133] affine space: impl. try_new --- src/constraints/affine_space.rs | 68 ++++++++++++++++++++++++++++----- 1 file changed, 58 insertions(+), 10 deletions(-) diff --git a/src/constraints/affine_space.rs b/src/constraints/affine_space.rs index fe705340..616df6b2 100644 --- a/src/constraints/affine_space.rs +++ b/src/constraints/affine_space.rs @@ -1,6 +1,6 @@ use super::Constraint; use crate::matrix_operations; -use crate::CholeskyFactorizer; +use crate::{CholeskyError, CholeskyFactorizer}; use ndarray::{ArrayView1, ArrayView2, LinalgScalar}; use num::Float; @@ -17,6 +17,18 @@ pub struct AffineSpace { n_cols: usize, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +/// Errors that can arise when constructing an [`AffineSpace`]. +pub enum AffineSpaceError { + /// The vector `b` is empty. + EmptyB, + /// The dimensions of `A` and `b` are incompatible. + IncompatibleDimensions, + /// The matrix `AA^T` is not positive definite, which typically means + /// that `A` does not have full row rank. + NotFullRowRank, +} + impl AffineSpace where T: Float + LinalgScalar + 'static, @@ -32,25 +44,61 @@ where /// ## Returns /// New Affine Space structure /// + /// ## Panics + /// + /// Panics if: + /// + /// - `b` is empty, + /// - `A` and `b` have incompatible dimensions, + /// - `A` does not have full row rank. + /// + /// Use [`AffineSpace::try_new`] if you want to handle these conditions + /// without panicking. + /// pub fn new(a: Vec, b: Vec) -> Self { + Self::try_new(a, b).expect("invalid affine space data") + } + + /// Construct a new affine space given the matrix $A\in\mathbb{R}^{m\times n}$ + /// and the vector $b\in\mathbb{R}^m$. + /// + /// ## Arguments + /// + /// - `a`: matrix $A$, row-wise data + /// - `b`: vector $b$ + /// + /// ## Returns + /// + /// Returns a new [`AffineSpace`] on success, or an [`AffineSpaceError`] if + /// the provided data are invalid. + pub fn try_new(a: Vec, b: Vec) -> Result { let n_rows = b.len(); let n_elements_a = a.len(); - assert!(n_rows > 0, "b must not be empty"); - assert!( - n_elements_a.is_multiple_of(n_rows), - "A and b have incompatible dimensions" - ); + if n_rows == 0 { + return Err(AffineSpaceError::EmptyB); + } + if !n_elements_a.is_multiple_of(n_rows) { + return Err(AffineSpaceError::IncompatibleDimensions); + } let n_cols = n_elements_a / n_rows; - let aat = matrix_operations::mul_a_at(&a, n_rows, n_cols).unwrap(); + let aat = matrix_operations::mul_a_at(&a, n_rows, n_cols) + .map_err(|_| AffineSpaceError::IncompatibleDimensions)?; let mut factorizer = CholeskyFactorizer::new(n_rows); - factorizer.factorize(&aat).unwrap(); - AffineSpace { + factorizer + .factorize(&aat) + .map_err(|err| match err { + CholeskyError::NotPositiveDefinite => AffineSpaceError::NotFullRowRank, + CholeskyError::DimensionMismatch | CholeskyError::NotFactorized => { + AffineSpaceError::IncompatibleDimensions + } + })?; + Ok(AffineSpace { a_mat: a, b_vec: b, factorizer, n_rows, n_cols, - } + }) } } From 175c4c8e1fdc994e7ba4f7cd0c7cb94d68cb9056 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 12:30:07 +0000 Subject: [PATCH 060/133] sprinkle some #[must_use] here and there --- src/alm/alm_optimizer.rs | 12 +++++++++ src/alm/alm_problem.rs | 8 ++++++ src/constraints/cartesian_product.rs | 1 + src/constraints/mod.rs | 2 +- src/constraints/no_constraints.rs | 1 + src/constraints/tests.rs | 40 ++++++++++++++++++++++++++++ src/constraints/zero.rs | 1 + src/core/fbs/fbs_optimizer.rs | 4 +++ src/core/panoc/panoc_cache.rs | 1 + src/core/panoc/panoc_optimizer.rs | 5 ++++ src/lipschitz_estimator.rs | 2 ++ 11 files changed, 76 insertions(+), 1 deletion(-) diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index 93ac5dcf..2d26d98b 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -264,6 +264,7 @@ where /// .with_max_outer_iterations(10); ///``` /// + #[must_use] pub fn new( alm_cache: &'life mut AlmCache, alm_problem: AlmProblem< @@ -315,6 +316,7 @@ where /// The method panics if the specified number of outer iterations is zero /// /// + #[must_use] pub fn with_max_outer_iterations(mut self, max_outer_iterations: usize) -> Self { assert!( max_outer_iterations > 0, @@ -340,6 +342,7 @@ where /// The method panics if the specified number of inner iterations is zero /// /// + #[must_use] pub fn with_max_inner_iterations(mut self, max_inner_iterations: usize) -> Self { assert!( max_inner_iterations > 0, @@ -362,6 +365,7 @@ where /// /// Returns the current mutable and updated instance of the provided object /// + #[must_use] pub fn with_max_duration(mut self, max_duration: std::time::Duration) -> Self { self.max_duration = Some(max_duration); self @@ -381,6 +385,7 @@ where /// /// The method panics if the specified tolerance is not positive /// + #[must_use] pub fn with_delta_tolerance(mut self, delta_tolerance: T) -> Self { assert!( delta_tolerance > T::zero(), @@ -404,6 +409,7 @@ where /// /// The method panics if the specified tolerance is not positive /// + #[must_use] pub fn with_epsilon_tolerance(mut self, epsilon_tolerance: T) -> Self { assert!( epsilon_tolerance > T::zero(), @@ -432,6 +438,7 @@ where /// The method panics if the update factor is not larger than `1.0 + T::epsilon()` /// /// + #[must_use] pub fn with_penalty_update_factor(mut self, penalty_update_factor: T) -> Self { assert!( penalty_update_factor > T::one() + T::epsilon(), @@ -461,6 +468,7 @@ where /// The method panics if the specified tolerance update factor is not in the /// interval from `T::epsilon()` to `1.0 - T::epsilon()`. /// + #[must_use] pub fn with_inner_tolerance_update_factor(mut self, inner_tolerance_update_factor: T) -> Self { assert!( inner_tolerance_update_factor > T::epsilon() @@ -494,6 +502,7 @@ where /// `with_inner_tolerance` to do so before invoking `with_initial_inner_tolerance`. /// /// + #[must_use] pub fn with_initial_inner_tolerance(mut self, initial_inner_tolerance: T) -> Self { assert!( initial_inner_tolerance >= self.epsilon_tolerance, @@ -526,6 +535,7 @@ where /// The method panics if the specified sufficient decrease coefficient is not /// in the range `(T::epsilon(), 1.0 - T::epsilon())` /// + #[must_use] pub fn with_sufficient_decrease_coefficient( mut self, sufficient_decrease_coefficient: T, @@ -554,6 +564,7 @@ where /// /// The method will panic if the length of `y_init` is not equal to `n1` /// + #[must_use] pub fn with_initial_lagrange_multipliers(mut self, y_init: &[T]) -> Self { let cache = &mut self.alm_cache; assert!( @@ -584,6 +595,7 @@ where /// The method panics if the specified initial penalty parameter is not /// larger than `T::epsilon()` /// + #[must_use] pub fn with_initial_penalty(self, c0: T) -> Self { assert!( c0 > T::epsilon(), diff --git a/src/alm/alm_problem.rs b/src/alm/alm_problem.rs index e806d8c6..98d24955 100644 --- a/src/alm/alm_problem.rs +++ b/src/alm/alm_problem.rs @@ -74,6 +74,12 @@ pub struct AlmProblem< pub(crate) n1: usize, /// number of PM-type parameters (range dim of F2) pub(crate) n2: usize, + /// This phantom data object is used because all other attributes + /// are not tied to the type T directly. T appears in some + /// trait bounds (e.g., MappingAlm, ParametricCostType, etc), but this + /// is not enough for the struct layout/type system. + /// Without this, Rust gives a bunch of errors. Movoer, this is a zero-size + /// object. marker: PhantomData, } @@ -148,6 +154,8 @@ where /// ); /// ``` /// + #[allow(clippy::too_many_arguments)] + #[must_use] pub fn new( constraints: ConstraintsType, alm_set_c: Option, diff --git a/src/constraints/cartesian_product.rs b/src/constraints/cartesian_product.rs index bbca4f33..fe12f194 100644 --- a/src/constraints/cartesian_product.rs +++ b/src/constraints/cartesian_product.rs @@ -123,6 +123,7 @@ impl<'a, T> CartesianProduct<'a, T> { /// ``` /// The method will panic if any of the associated projections panics. /// + #[must_use] pub fn add_constraint(mut self, ni: usize, constraint: impl Constraint + 'a) -> Self { assert!( self.dimension() < ni, diff --git a/src/constraints/mod.rs b/src/constraints/mod.rs index 31586257..aed5a22e 100644 --- a/src/constraints/mod.rs +++ b/src/constraints/mod.rs @@ -25,7 +25,7 @@ mod soc; mod sphere2; mod zero; -pub use affine_space::AffineSpace; +pub use affine_space::{AffineSpace, AffineSpaceError}; pub use ball1::Ball1; pub use ball2::Ball2; pub use ballinf::BallInf; diff --git a/src/constraints/no_constraints.rs b/src/constraints/no_constraints.rs index 53188676..7263a9b1 100644 --- a/src/constraints/no_constraints.rs +++ b/src/constraints/no_constraints.rs @@ -7,6 +7,7 @@ pub struct NoConstraints {} impl NoConstraints { /// Constructs new instance of `NoConstraints` /// + #[must_use] pub fn new() -> NoConstraints { NoConstraints {} } diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index c20bfea4..a42aed11 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -1201,6 +1201,46 @@ fn t_affine_space_single_row() { unit_test_utils::assert_nearly_equal(1., s, 1e-12, 1e-14, "wrong sum"); } +#[test] +fn t_affine_space_try_new() { + let a = vec![ + 0.5, 0.1, 0.2, -0.3, -0.6, 0.3, 0., 0.5, 1.0, 0.1, -1.0, -0.4, + ]; + let b = vec![1., 2., -0.5]; + let affine_set = AffineSpace::try_new(a, b); + assert!(affine_set.is_ok(), "try_new should succeed on valid data"); +} + +#[test] +fn t_affine_space_try_new_empty_b() { + let a = vec![1.0, 2.0]; + let b = vec![]; + let affine_set = AffineSpace::::try_new(a, b); + assert!(matches!(affine_set, Err(AffineSpaceError::EmptyB))); +} + +#[test] +fn t_affine_space_try_new_wrong_dimensions() { + let a = vec![0.5, 0.1, 0.2, -0.3, -0.6, 0.3, 0., 0.5, 1.0, 0.1, -1.0]; + let b = vec![1., 2., -0.5]; + let affine_set = AffineSpace::try_new(a, b); + assert!(matches!( + affine_set, + Err(AffineSpaceError::IncompatibleDimensions) + )); +} + +#[test] +fn t_affine_space_try_new_rank_deficient() { + let a = vec![1.0, 2.0, 2.0, 4.0]; + let b = vec![1.0, 2.0]; + let affine_set = AffineSpace::try_new(a, b); + assert!(matches!( + affine_set, + Err(AffineSpaceError::NotFullRowRank) + )); +} + #[test] #[should_panic] fn t_affine_space_wrong_dimensions() { diff --git a/src/constraints/zero.rs b/src/constraints/zero.rs index f1539cfd..76998510 100644 --- a/src/constraints/zero.rs +++ b/src/constraints/zero.rs @@ -7,6 +7,7 @@ pub struct Zero {} impl Zero { /// Constructs new instance of `Zero` + #[must_use] pub fn new() -> Self { Zero {} } diff --git a/src/core/fbs/fbs_optimizer.rs b/src/core/fbs/fbs_optimizer.rs index 24b6a7a5..a576112e 100644 --- a/src/core/fbs/fbs_optimizer.rs +++ b/src/core/fbs/fbs_optimizer.rs @@ -49,6 +49,7 @@ where /// /// - `problem`: problem definition /// - `cache`: instance of `FBSCache` + #[must_use] pub fn new( problem: Problem<'a, GradientType, ConstraintType, CostType, T>, cache: &'a mut FBSCache, @@ -65,6 +66,7 @@ where /// ## Panics /// /// The method panics if the specified tolerance is not positive + #[must_use] pub fn with_tolerance( self, tolerance: T, @@ -76,6 +78,7 @@ where } /// Sets the maximum number of iterations + #[must_use] pub fn with_max_iter( mut self, max_iter: usize, @@ -85,6 +88,7 @@ where } /// Sets the maximum number of iterations + #[must_use] pub fn with_max_duration( mut self, max_duration: time::Duration, diff --git a/src/core/panoc/panoc_cache.rs b/src/core/panoc/panoc_cache.rs index b491ae43..326e38e1 100644 --- a/src/core/panoc/panoc_cache.rs +++ b/src/core/panoc/panoc_cache.rs @@ -245,6 +245,7 @@ where /// The method panics if alpha or epsilon are nonpositive and if sy_epsilon /// is negative. /// + #[must_use] pub fn with_cbfgs_parameters(mut self, alpha: T, epsilon: T, sy_epsilon: T) -> Self { self.lbfgs = self .lbfgs diff --git a/src/core/panoc/panoc_optimizer.rs b/src/core/panoc/panoc_optimizer.rs index 8c7018ce..fa995f9a 100644 --- a/src/core/panoc/panoc_optimizer.rs +++ b/src/core/panoc/panoc_optimizer.rs @@ -48,6 +48,7 @@ where /// ## Panic /// /// Does not panic + #[must_use] pub fn new( problem: Problem<'a, GradientType, ConstraintType, CostType, T>, cache: &'a mut PANOCCache, @@ -67,6 +68,7 @@ where /// ## Panics /// /// The method panics if the specified tolerance is not positive + #[must_use] pub fn with_tolerance(self, tolerance: T) -> Self { assert!(tolerance > T::zero(), "tolerance must be larger than 0"); @@ -95,6 +97,7 @@ where /// The method panics if the provided value of the AKKT-specific tolerance is /// not positive. /// + #[must_use] pub fn with_akkt_tolerance(self, akkt_tolerance: T) -> Self { assert!( akkt_tolerance > T::zero(), @@ -109,6 +112,7 @@ where /// ## Panics /// /// Panics if the provided number of iterations is equal to zero + #[must_use] pub fn with_max_iter(mut self, max_iter: usize) -> Self { assert!(max_iter > 0, "max_iter must be larger than 0"); @@ -117,6 +121,7 @@ where } /// Sets the maximum solution time, useful in real-time applications + #[must_use] pub fn with_max_duration(mut self, max_duation: time::Duration) -> Self { self.max_duration = Some(max_duation); self diff --git a/src/lipschitz_estimator.rs b/src/lipschitz_estimator.rs index 1906ddc1..a39ceab9 100644 --- a/src/lipschitz_estimator.rs +++ b/src/lipschitz_estimator.rs @@ -138,6 +138,7 @@ where /// # Panics /// The method will panic if `delta` is non positive /// + #[must_use] pub fn with_delta(mut self, delta: T) -> Self { assert!(delta > T::zero()); self.delta_lip = delta; @@ -155,6 +156,7 @@ where /// # Panics /// The method will panic if `epsilon` is non positive /// + #[must_use] pub fn with_epsilon(mut self, epsilon: T) -> Self { assert!(epsilon > T::zero()); self.epsilon_lip = epsilon; From 857a1314275d42c3373073bf169adb8af9d70834 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 12:36:08 +0000 Subject: [PATCH 061/133] clean up all the T::from(..).expect(..) - introduced numeric.rs (crate access) - function cast() - unit tests --- src/alm/alm_cache.rs | 4 ++-- src/alm/alm_factory.rs | 10 +++----- src/alm/alm_optimizer.rs | 20 +++++++--------- src/constraints/ballp.rs | 18 +++++++------- src/constraints/epigraph_squared_norm.rs | 6 +---- src/constraints/simplex.rs | 8 ++++--- src/constraints/sphere2.rs | 4 +++- src/core/panoc/panoc_cache.rs | 5 ++-- src/core/panoc/panoc_engine.rs | 30 ++++++++++++------------ src/lib.rs | 1 + src/lipschitz_estimator.rs | 6 ++--- src/mocks.rs | 6 +---- src/numeric.rs | 10 ++++++++ 13 files changed, 64 insertions(+), 64 deletions(-) create mode 100644 src/numeric.rs diff --git a/src/alm/alm_cache.rs b/src/alm/alm_cache.rs index e17400d3..8113fbf4 100644 --- a/src/alm/alm_cache.rs +++ b/src/alm/alm_cache.rs @@ -1,10 +1,10 @@ -use crate::panoc::PANOCCache; +use crate::{numeric::cast, panoc::PANOCCache}; use lbfgs::LbfgsPrecision; use num::Float; use std::iter::Sum; fn default_initial_penalty() -> T { - T::from(10.0).expect("10.0 must be representable") + cast::(10.0) } /// Cache for `AlmOptimizer` (to be allocated once) diff --git a/src/alm/alm_factory.rs b/src/alm/alm_factory.rs index e118e90e..fef9d9b1 100644 --- a/src/alm/alm_factory.rs +++ b/src/alm/alm_factory.rs @@ -5,15 +5,11 @@ /* prepares psi and d_psi, which can be used to define an AlmOptimizer */ /* ---------------------------------------------------------------------------- */ -use crate::{constraints::Constraint, matrix_operations, FunctionCallResult}; +use crate::{constraints::Constraint, matrix_operations, numeric::cast, FunctionCallResult}; use num::Float; use std::marker::PhantomData; use std::{iter::Sum, ops::AddAssign}; -fn half() -> T { - T::from(0.5).expect("0.5 must be representable") -} - /// Prepares function $\psi$ and its gradient given the problem data: $f$, $\nabla{}f$, /// and optionally $F_1$, $JF_1$, $C$ and $F_2$ /// @@ -264,7 +260,7 @@ where s.copy_from_slice(&f1_u_plus_y_over_c); set_c.project(&mut s); let dist_sq: T = matrix_operations::norm2_squared_diff(&f1_u_plus_y_over_c, &s); - let scaling: T = half::() * penalty_parameter; + let scaling: T = cast::(0.5) * penalty_parameter; *cost += scaling * dist_sq; } if let Some(f2) = &self.mapping_f2 { @@ -272,7 +268,7 @@ where let mut z = vec![T::zero(); self.n2]; f2(u, &mut z)?; let norm_sq: T = matrix_operations::norm2_squared(&z); - let scaling: T = half::() * c; + let scaling: T = cast::(0.5) * c; *cost += scaling * norm_sq; } Ok(()) diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index 2d26d98b..cc798059 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -2,7 +2,7 @@ use crate::{ alm::*, constraints, core::{panoc::PANOCOptimizer, ExitStatus, Problem, SolverStatus}, - matrix_operations, FunctionCallResult, SolverError, + matrix_operations, numeric::cast, FunctionCallResult, SolverError, }; use lbfgs::LbfgsPrecision; use num::Float; @@ -11,10 +11,6 @@ use std::{iter::Sum, ops::AddAssign}; const DEFAULT_MAX_OUTER_ITERATIONS: usize = 50; const DEFAULT_MAX_INNER_ITERATIONS: usize = 5000; -fn float(value: f64) -> T { - T::from(value).expect("floating-point constant must be representable") -} - /// Internal/private structure used by method AlmOptimizer.step /// to return some minimal information about the inner problem struct InnerProblemStatus { @@ -281,19 +277,19 @@ where // set the initial value of the inner tolerance; this step is // not necessary, however, because we set the initial tolerance // in #solve (see below) - alm_cache.panoc_cache.set_akkt_tolerance(float(0.1)); + alm_cache.panoc_cache.set_akkt_tolerance(cast::(0.1)); AlmOptimizer { alm_cache, alm_problem, max_outer_iterations: DEFAULT_MAX_OUTER_ITERATIONS, max_inner_iterations: DEFAULT_MAX_INNER_ITERATIONS, max_duration: None, - epsilon_tolerance: float(1e-6), - delta_tolerance: float(1e-4), - penalty_update_factor: float(5.0), - epsilon_update_factor: float(0.1), - sufficient_decrease_coeff: float(0.1), - epsilon_inner_initial: float(0.1), + epsilon_tolerance: cast::(1e-6), + delta_tolerance: cast::(1e-4), + penalty_update_factor: cast::(5.0), + epsilon_update_factor: cast::(0.1), + sufficient_decrease_coeff: cast::(0.1), + epsilon_inner_initial: cast::(0.1), } } diff --git a/src/constraints/ballp.rs b/src/constraints/ballp.rs index 7428f0f3..4095a7a8 100644 --- a/src/constraints/ballp.rs +++ b/src/constraints/ballp.rs @@ -1,3 +1,5 @@ +use crate::numeric::cast; + use super::Constraint; use num::Float; @@ -181,15 +183,14 @@ impl<'a, T: Float> BallP<'a, T> { let mut lambda_hi = T::one(); while radius_error(lambda_hi) > T::zero() { - lambda_hi = lambda_hi * T::from(2.0).expect("2.0 must be representable"); - if lambda_hi > T::from(1e20).expect("1e20 must be representable") { + lambda_hi = lambda_hi * cast::(2.0); + if lambda_hi > cast::(1e20) { panic!("Failed to bracket the Lagrange multiplier"); } } for _ in 0..max_iter { - let lambda_mid = - T::from(0.5).expect("0.5 must be representable") * (lambda_lo + lambda_hi); + let lambda_mid = cast::(0.5) * (lambda_lo + lambda_hi); let err = radius_error(lambda_mid); if err.abs() <= tol { @@ -205,8 +206,7 @@ impl<'a, T: Float> BallP<'a, T> { } } - let lambda_star = - T::from(0.5).expect("0.5 must be representable") * (lambda_lo + lambda_hi); + let lambda_star = cast::(0.5) * (lambda_lo + lambda_hi); x.iter_mut().zip(abs_x.iter()).for_each(|(xi, &a)| { let u = Self::solve_coordinate_newton(a, lambda_star, p, tol, max_iter); @@ -253,11 +253,11 @@ impl<'a, T: Float> BallP<'a, T> { + lambda * p * (p - T::one()) - * u.powf(p - T::from(2.0).expect("2.0 must be representable")); + * u.powf(p - cast::(2.0)); let mut candidate = u - f / df; if !candidate.is_finite() || candidate <= lo || candidate >= hi { - candidate = T::from(0.5).expect("0.5 must be representable") * (lo + hi); + candidate = cast::(0.5) * (lo + hi); } if (candidate - u).abs() <= tol * (T::one() + u.abs()) { @@ -267,7 +267,7 @@ impl<'a, T: Float> BallP<'a, T> { u = candidate; } - T::from(0.5).expect("0.5 must be representable") * (lo + hi) + cast::(0.5) * (lo + hi) } } diff --git a/src/constraints/epigraph_squared_norm.rs b/src/constraints/epigraph_squared_norm.rs index 1ed9874e..2db267ae 100644 --- a/src/constraints/epigraph_squared_norm.rs +++ b/src/constraints/epigraph_squared_norm.rs @@ -1,14 +1,10 @@ -use crate::matrix_operations; +use crate::{matrix_operations, numeric::cast}; use super::Constraint; use num::Float; use roots::FloatType; use std::iter::Sum; -fn cast(value: f64) -> T { - T::from(value).expect("constant must be representable") -} - #[derive(Copy, Clone, Default)] /// The epigraph of the squared Euclidean norm, that is, /// $$ diff --git a/src/constraints/simplex.rs b/src/constraints/simplex.rs index 4cd9c0f1..fcf7cb4e 100644 --- a/src/constraints/simplex.rs +++ b/src/constraints/simplex.rs @@ -1,3 +1,5 @@ +use crate::numeric::cast; + use super::Constraint; use num::Float; @@ -39,7 +41,7 @@ impl Constraint for Simplex { // ---- step 2 x.iter().skip(1).for_each(|x_n| { if *x_n > rho { - rho = rho + (*x_n - rho) / T::from(v.len() + 1).expect("usize must fit in T"); + rho = rho + (*x_n - rho) / cast(v.len() + 1); if rho > *x_n - *a { v.push(*x_n); } else { @@ -55,7 +57,7 @@ impl Constraint for Simplex { v_tilde.iter().for_each(|v_t_n| { if *v_t_n > rho { v.push(*v_t_n); - rho = rho + (*v_t_n - rho) / T::from(v.len()).expect("usize must fit in T"); + rho = rho + (*v_t_n - rho) / cast(v.len()); } }); } @@ -69,7 +71,7 @@ impl Constraint for Simplex { if *v_n <= rho { hit_list.push(n); current_len_v -= 1; - rho = rho + (rho - *v_n) / T::from(current_len_v).expect("i64 must fit in T"); + rho = rho + (rho - *v_n) / cast(current_len_v); } }); hit_list.iter().rev().for_each(|target| { diff --git a/src/constraints/sphere2.rs b/src/constraints/sphere2.rs index 790450a4..f56f6e0c 100644 --- a/src/constraints/sphere2.rs +++ b/src/constraints/sphere2.rs @@ -1,3 +1,5 @@ +use crate::numeric::cast; + use super::Constraint; use num::Float; use std::iter::Sum; @@ -51,7 +53,7 @@ where /// `center` have incompatible dimensions. /// fn project(&self, x: &mut [T]) { - let epsilon = T::from(1e-12).expect("1e-12 must be representable"); + let epsilon = cast::(1e-12); assert!(!x.is_empty(), "x must be nonempty"); if let Some(center) = &self.center { assert_eq!( diff --git a/src/core/panoc/panoc_cache.rs b/src/core/panoc/panoc_cache.rs index 326e38e1..4d4f7ffa 100644 --- a/src/core/panoc/panoc_cache.rs +++ b/src/core/panoc/panoc_cache.rs @@ -1,13 +1,14 @@ +use crate::numeric::cast; use lbfgs::LbfgsPrecision; use num::Float; use std::iter::Sum; fn default_sy_epsilon() -> T { - T::from(1e-10).expect("1e-10 must be representable") + cast::(1e-10) } fn default_cbfgs_epsilon() -> T { - T::from(1e-8).expect("1e-8 must be representable") + cast::(1e-8) } fn default_cbfgs_alpha() -> T { diff --git a/src/core/panoc/panoc_engine.rs b/src/core/panoc/panoc_engine.rs index eb4780c5..7e2334b6 100644 --- a/src/core/panoc/panoc_engine.rs +++ b/src/core/panoc/panoc_engine.rs @@ -1,39 +1,39 @@ use crate::{ constraints, core::{panoc::PANOCCache, AlgorithmEngine, Problem}, - matrix_operations, FunctionCallResult, SolverError, + matrix_operations, numeric::cast, FunctionCallResult, SolverError, }; use lbfgs::LbfgsPrecision; use num::Float; use std::iter::Sum; fn min_l_estimate() -> T { - T::from(1e-10).expect("1e-10 must be representable") + cast::(1e-10) } fn gamma_l_coeff() -> T { - T::from(0.95).expect("0.95 must be representable") + cast::(0.95) } //const SIGMA_COEFF: f64 = 0.49; fn delta_lipschitz() -> T { - T::from(1e-12).expect("1e-12 must be representable") + cast::(1e-12) } fn epsilon_lipschitz() -> T { - T::from(1e-6).expect("1e-6 must be representable") + cast::(1e-6) } fn lipschitz_update_epsilon() -> T { - T::from(1e-6).expect("1e-6 must be representable") + cast::(1e-6) } /// Maximum iterations of updating the Lipschitz constant const MAX_LIPSCHITZ_UPDATE_ITERATIONS: usize = 10; fn max_lipschitz_constant() -> T { - T::from(1e9).expect("1e9 must be representable") + cast::(1e9) } fn norm2_squared_diff(a: &[T], b: &[T]) -> T { @@ -188,7 +188,7 @@ where // rhs ← cost + LIP_EPS * |f| - + (L/2/gamma) ||gamma_fpr||^2 cost_value + lipschitz_update_epsilon::() * cost_value.abs() - inner_prod_grad_fpr - + (gamma_l_coeff::() / (T::from(2.0).expect("2.0 must be representable") * gamma)) + + (gamma_l_coeff::() / (cast::(2.0) * gamma)) * cache.norm_gamma_fpr * cache.norm_gamma_fpr } @@ -211,8 +211,8 @@ where // update L, sigma and gamma... self.cache.lipschitz_constant = - self.cache.lipschitz_constant * T::from(2.0).expect("2.0 must be representable"); - self.cache.gamma = self.cache.gamma / T::from(2.0).expect("2.0 must be representable"); + self.cache.lipschitz_constant * cast::(2.0); + self.cache.gamma = self.cache.gamma / cast::(2.0); // recompute the half step... self.gradient_step(u_current); // updates self.cache.gradient_step @@ -227,7 +227,7 @@ where it_lipschitz_search += 1; } self.cache.sigma = (T::one() - gamma_l_coeff::()) - / (T::from(4.0).expect("4.0 must be representable") * self.cache.gamma); + / (cast::(4.0) * self.cache.gamma); Ok(()) } @@ -252,7 +252,7 @@ where /// Computes the RHS of the linesearch condition fn compute_rhs_ls(&mut self) { let cache = &mut self.cache; - let half = T::from(0.5).expect("0.5 must be representable"); + let half = cast::(0.5); // dist squared ← norm(gradient step - u half step)^2 // rhs_ls ← f - (gamma/2) * norm(gradf)^2 @@ -268,7 +268,7 @@ where /// returns `true` if and only if lhs > rhs (when the line search should continue) fn line_search_condition(&mut self, u: &[T]) -> Result { let gamma = self.cache.gamma; - let half = T::from(0.5).expect("0.5 must be representable"); + let half = cast::(0.5); // u_plus ← u - (1-tau)*gamma_fpr + tau*direction self.compute_u_plus(u); @@ -309,7 +309,7 @@ where self.cache.tau = T::one(); // initialise tau ← 1.0 let mut num_ls_iters = 0; while self.line_search_condition(u_current)? && num_ls_iters < MAX_LINESEARCH_ITERATIONS { - self.cache.tau = self.cache.tau / T::from(2.0).expect("2.0 must be representable"); + self.cache.tau = self.cache.tau / cast::(2.0); num_ls_iters += 1; } if num_ls_iters == MAX_LINESEARCH_ITERATIONS { @@ -391,7 +391,7 @@ where self.cache.gamma = gamma_l_coeff::() / self.cache.lipschitz_constant.max(min_l_estimate()); self.cache.sigma = (T::one() - gamma_l_coeff::()) - / (T::from(4.0).expect("4.0 must be representable") * self.cache.gamma); + / (cast::(4.0) * self.cache.gamma); self.gradient_step(u_current); // updated self.cache.gradient_step self.half_step(); // updates self.cache.u_half_step diff --git a/src/lib.rs b/src/lib.rs index e0f61bb0..ece5f024 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -58,6 +58,7 @@ pub mod constraints; pub mod core; pub mod lipschitz_estimator; pub mod matrix_operations; +mod numeric; pub use crate::cholesky_factorizer::{CholeskyError, CholeskyFactorizer}; pub use crate::core::fbs; diff --git a/src/lipschitz_estimator.rs b/src/lipschitz_estimator.rs index a39ceab9..f06fc942 100644 --- a/src/lipschitz_estimator.rs +++ b/src/lipschitz_estimator.rs @@ -41,15 +41,15 @@ //! ``` //! -use crate::SolverError; +use crate::{numeric::cast, SolverError}; use num::Float; fn default_delta() -> T { - T::from(1e-6).expect("1e-6 must be representable") + cast::(1e-6) } fn default_epsilon() -> T { - T::from(1e-6).expect("1e-6 must be representable") + cast::(1e-6) } fn norm2(a: &[T]) -> T { diff --git a/src/mocks.rs b/src/mocks.rs index 797b8bc9..41b2105c 100644 --- a/src/mocks.rs +++ b/src/mocks.rs @@ -1,12 +1,8 @@ -use crate::{matrix_operations, SolverError}; +use crate::{matrix_operations, numeric::cast, SolverError}; use num::Float; use std::iter::Sum; use std::ops::Mul; -fn cast(value: f64) -> T { - T::from(value).expect("floating-point constant must be representable") -} - pub fn solution_a() -> [T; 2] { [ cast::(-0.148_959_718_255_77), diff --git a/src/numeric.rs b/src/numeric.rs new file mode 100644 index 00000000..a7978b91 --- /dev/null +++ b/src/numeric.rs @@ -0,0 +1,10 @@ +use num::{Float, ToPrimitive}; + +/// Convert a numeric literal or integer index into the target float type. +#[inline] +pub(crate) fn cast(value: impl ToPrimitive) -> T +where + T: Float, +{ + T::from(value).expect("numeric constant must be representable") +} From 5d89d2e64453f082040adb87199954140c5255f4 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 12:44:07 +0000 Subject: [PATCH 062/133] tighter unit testing (f36/f64) --- src/constraints/epigraph_squared_norm.rs | 8 +- src/constraints/tests.rs | 264 ++++++++++++++++++++++- 2 files changed, 267 insertions(+), 5 deletions(-) diff --git a/src/constraints/epigraph_squared_norm.rs b/src/constraints/epigraph_squared_norm.rs index 2db267ae..12844773 100644 --- a/src/constraints/epigraph_squared_norm.rs +++ b/src/constraints/epigraph_squared_norm.rs @@ -95,7 +95,7 @@ where let cubic_poly_roots = roots::find_roots_cubic(a3, a2, a1, a0); - let root_tol = cast::(1e-6); + let root_tol = cast::(10.0) * num::Float::sqrt(T::epsilon()); let mut right_root: Option = None; // Pick the first admissible real root @@ -117,7 +117,7 @@ where // Newton refinement let newton_max_iters: usize = 5; - let newton_eps = cast::(1e-14); + let newton_eps = cast::(10.0) * T::epsilon(); for _ in 0..newton_max_iters { let zsol_sq = zsol * zsol; @@ -130,7 +130,7 @@ where let dp_z = cast::(3.0) * a3 * zsol_sq + cast::(2.0) * a2 * zsol + a1; assert!( - num::Float::abs(dp_z) > cast::(1e-15), + num::Float::abs(dp_z) > cast::(10.0) * T::epsilon(), "EpigraphSquaredNorm::project: Newton derivative too small" ); @@ -141,7 +141,7 @@ where let scaling = cast::(1.0) + cast::(2.0) * (right_root - t); assert!( - num::Float::abs(scaling) > cast::(1e-15), + num::Float::abs(scaling) > cast::(10.0) * T::epsilon(), "EpigraphSquaredNorm::project: scaling factor too small" ); diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index a42aed11..7a17f4ab 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -1,6 +1,7 @@ -use crate::matrix_operations; +use crate::{matrix_operations, numeric::cast}; use super::*; +use num::{Float, ToPrimitive}; use rand; use rand::RngExt; use rand_distr::{Distribution, Gamma}; @@ -1375,6 +1376,247 @@ fn is_norm_p_projection( true } +fn is_norm_p_projection_with_tol( + x: &[f64], + x_candidate_proj: &[f64], + p: f64, + radius: f64, + sample_points: usize, + feasibility_tol: f64, + inner_prod_tol: f64, +) -> bool { + let n = x.len(); + assert_eq!(n, x_candidate_proj.len()); + + let norm_proj = x_candidate_proj + .iter() + .map(|xi| xi.abs().powf(p)) + .sum::() + .powf(1.0 / p); + if norm_proj > radius + feasibility_tol { + return false; + } + + let e: Vec = x + .iter() + .zip(x_candidate_proj.iter()) + .map(|(xi, yi)| xi - yi) + .collect(); + let samples = sample_lp_sphere(sample_points, n, p, radius); + for xi in samples.iter() { + let w: Vec = x_candidate_proj + .iter() + .zip(xi.iter()) + .map(|(xproj_i, xi_i)| xproj_i - xi_i) + .collect(); + let inner = matrix_operations::inner_product(&w, &e); + if inner < -inner_prod_tol { + return false; + } + } + true +} + +fn as_f64_vec(x: &[T]) -> Vec { + x.iter() + .map(|xi| { + xi.to_f64() + .expect("test float values must be convertible to f64") + }) + .collect() +} + +fn lp_norm_generic(x: &[T], p: T) -> T { + x.iter() + .map(|xi| xi.abs().powf(p)) + .fold(T::zero(), |sum, xi| sum + xi) + .powf(T::one() / p) +} + +fn random_vec(rng: &mut impl rand::Rng, len: usize, lower: f64, upper: f64) -> Vec { + (0..len) + .map(|_| cast::(rng.random_range(lower..upper))) + .collect() +} + +fn run_ballp_random_properties() +where + T: Float + ToPrimitive, +{ + let mut rng = rand::rng(); + let solver_tol = if T::epsilon() > cast::(1e-10) { + cast::(1e-5) + } else { + cast::(1e-12) + }; + let feasibility_tol = if T::epsilon() > cast::(1e-10) { + cast::(5e-3) + } else { + cast::(1e-8) + }; + let idempotence_tol = if T::epsilon() > cast::(1e-10) { + cast::(2e-4) + } else { + cast::(1e-10) + }; + let inner_prod_tol = if T::epsilon() > cast::(1e-10) { + 5e-3 + } else { + 1e-8 + }; + + for &(dim, p_f64, radius_f64, with_center) in &[ + (3_usize, 1.7_f64, 1.1_f64, false), + (4_usize, 2.5_f64, 0.9_f64, true), + (5_usize, 3.4_f64, 1.4_f64, true), + ] { + for _ in 0..40 { + let center = with_center.then(|| random_vec::(&mut rng, dim, -1.5, 1.5)); + let mut x = random_vec::(&mut rng, dim, -4.0, 4.0); + let x_before = x.clone(); + let p = cast::(p_f64); + let radius = cast::(radius_f64); + let ball = BallP::new(center.as_deref(), radius, p, solver_tol, 300); + ball.project(&mut x); + + let shifted_projection: Vec = if let Some(center) = center.as_ref() { + x.iter() + .zip(center.iter()) + .map(|(xi, ci)| *xi - *ci) + .collect() + } else { + x.clone() + }; + let proj_norm = lp_norm_generic(&shifted_projection, p); + assert!( + proj_norm <= radius + feasibility_tol, + "projected point is not feasible for BallP" + ); + + let mut reproj = x.clone(); + ball.project(&mut reproj); + let max_reproj_diff = reproj + .iter() + .zip(x.iter()) + .fold(T::zero(), |acc, (a, b)| acc.max((*a - *b).abs())); + assert!( + max_reproj_diff <= idempotence_tol, + "BallP projection is not idempotent within tolerance" + ); + + let shifted_x_before: Vec = if let Some(center) = center.as_ref() { + x_before + .iter() + .zip(center.iter()) + .map(|(xi, ci)| { + (*xi - *ci) + .to_f64() + .expect("test float values must be convertible to f64") + }) + .collect() + } else { + as_f64_vec(&x_before) + }; + let shifted_projection_f64 = as_f64_vec(&shifted_projection); + assert!( + is_norm_p_projection_with_tol( + &shifted_x_before, + &shifted_projection_f64, + p_f64, + radius_f64, + 500, + feasibility_tol + .to_f64() + .expect("test float values must be convertible to f64"), + inner_prod_tol, + ), + "BallP projection failed sampled optimality check" + ); + } + } +} + +fn run_epigraph_squared_norm_random_properties() +where + T: Float + roots::FloatType + std::iter::Sum + ToPrimitive, +{ + let mut rng = rand::rng(); + let feasibility_tol = if T::epsilon() > cast::(1e-10) { + cast::(2e-4) + } else { + cast::(1e-10) + }; + let idempotence_tol = if T::epsilon() > cast::(1e-10) { + cast::(2e-4) + } else { + cast::(1e-10) + }; + let vi_tol = if T::epsilon() > cast::(1e-10) { + 2e-3 + } else { + 1e-8 + }; + let epi = EpigraphSquaredNorm::new(); + + for dim in 2..=5 { + for _ in 0..50 { + let mut x = random_vec::(&mut rng, dim, -3.0, 3.0); + x.push(cast::(rng.random_range(-2.0..4.0))); + let x_before = as_f64_vec(&x); + + epi.project(&mut x); + + let z = &x[..dim]; + let t = x[dim]; + let norm_z_sq = matrix_operations::norm2_squared(z); + assert!( + norm_z_sq <= t + feasibility_tol, + "Epigraph projection is not feasible" + ); + + let mut reproj = x.clone(); + epi.project(&mut reproj); + let max_reproj_diff = reproj + .iter() + .zip(x.iter()) + .fold(T::neg_infinity(), |acc, (a, b)| { + acc.max(num::Float::abs(*a - *b)) + }); + assert!( + max_reproj_diff <= idempotence_tol, + "Epigraph projection is not idempotent within tolerance" + ); + + let proj_f64 = as_f64_vec(&x); + let residual: Vec = x_before + .iter() + .zip(proj_f64.iter()) + .map(|(xb, xp)| xb - xp) + .collect(); + + for _ in 0..150 { + let z_feasible: Vec = (0..dim) + .map(|_| rng.random_range(-3.0..3.0)) + .collect(); + let norm_z_sq_feasible = z_feasible.iter().map(|zi| zi * zi).sum::(); + let t_feasible = norm_z_sq_feasible + rng.random_range(0.0..3.0); + let mut y = z_feasible; + y.push(t_feasible); + let diff: Vec = proj_f64 + .iter() + .zip(y.iter()) + .map(|(xp, yi)| xp - yi) + .collect(); + let inner = matrix_operations::inner_product(&diff, &residual); + assert!( + inner >= -vi_tol, + "Epigraph projection failed sampled variational inequality" + ); + } + } + } +} + #[test] fn t_ballp_at_origin_projection() { let radius = 0.8; @@ -1457,3 +1699,23 @@ fn t_ballp_at_xc_projection_f32() { assert!((x[0] - proj_expected[0]).abs() < 1e-4_f32); assert!((x[1] - proj_expected[1]).abs() < 1e-4_f32); } + +#[test] +fn t_ballp_random_properties_f64() { + run_ballp_random_properties::(); +} + +#[test] +fn t_ballp_random_properties_f32() { + run_ballp_random_properties::(); +} + +#[test] +fn t_epigraph_squared_norm_random_properties_f64() { + run_epigraph_squared_norm_random_properties::(); +} + +#[test] +fn t_epigraph_squared_norm_random_properties_f32() { + run_epigraph_squared_norm_random_properties::(); +} From f262a7639dbedb1bd31dc9b3f9378117a28c7948 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 12:57:10 +0000 Subject: [PATCH 063/133] cargo fmt --- src/alm/alm_optimizer.rs | 4 +++- src/constraints/affine_space.rs | 14 ++++++-------- src/constraints/ballp.rs | 6 +----- src/constraints/tests.rs | 9 ++------- src/core/panoc/panoc_engine.rs | 13 ++++++------- 5 files changed, 18 insertions(+), 28 deletions(-) diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index cc798059..b7492543 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -2,7 +2,9 @@ use crate::{ alm::*, constraints, core::{panoc::PANOCOptimizer, ExitStatus, Problem, SolverStatus}, - matrix_operations, numeric::cast, FunctionCallResult, SolverError, + matrix_operations, + numeric::cast, + FunctionCallResult, SolverError, }; use lbfgs::LbfgsPrecision; use num::Float; diff --git a/src/constraints/affine_space.rs b/src/constraints/affine_space.rs index 616df6b2..84b38855 100644 --- a/src/constraints/affine_space.rs +++ b/src/constraints/affine_space.rs @@ -84,14 +84,12 @@ where let aat = matrix_operations::mul_a_at(&a, n_rows, n_cols) .map_err(|_| AffineSpaceError::IncompatibleDimensions)?; let mut factorizer = CholeskyFactorizer::new(n_rows); - factorizer - .factorize(&aat) - .map_err(|err| match err { - CholeskyError::NotPositiveDefinite => AffineSpaceError::NotFullRowRank, - CholeskyError::DimensionMismatch | CholeskyError::NotFactorized => { - AffineSpaceError::IncompatibleDimensions - } - })?; + factorizer.factorize(&aat).map_err(|err| match err { + CholeskyError::NotPositiveDefinite => AffineSpaceError::NotFullRowRank, + CholeskyError::DimensionMismatch | CholeskyError::NotFactorized => { + AffineSpaceError::IncompatibleDimensions + } + })?; Ok(AffineSpace { a_mat: a, b_vec: b, diff --git a/src/constraints/ballp.rs b/src/constraints/ballp.rs index 4095a7a8..1d2930b4 100644 --- a/src/constraints/ballp.rs +++ b/src/constraints/ballp.rs @@ -249,11 +249,7 @@ impl<'a, T: Float> BallP<'a, T> { lo = u; } - let df = T::one() - + lambda - * p - * (p - T::one()) - * u.powf(p - cast::(2.0)); + let df = T::one() + lambda * p * (p - T::one()) * u.powf(p - cast::(2.0)); let mut candidate = u - f / df; if !candidate.is_finite() || candidate <= lo || candidate >= hi { diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index e078046e..55c97bee 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -1295,10 +1295,7 @@ fn t_affine_space_try_new_rank_deficient() { let a = vec![1.0, 2.0, 2.0, 4.0]; let b = vec![1.0, 2.0]; let affine_set = AffineSpace::try_new(a, b); - assert!(matches!( - affine_set, - Err(AffineSpaceError::NotFullRowRank) - )); + assert!(matches!(affine_set, Err(AffineSpaceError::NotFullRowRank))); } #[test] @@ -1654,9 +1651,7 @@ where .collect(); for _ in 0..150 { - let z_feasible: Vec = (0..dim) - .map(|_| rng.random_range(-3.0..3.0)) - .collect(); + let z_feasible: Vec = (0..dim).map(|_| rng.random_range(-3.0..3.0)).collect(); let norm_z_sq_feasible = z_feasible.iter().map(|zi| zi * zi).sum::(); let t_feasible = norm_z_sq_feasible + rng.random_range(0.0..3.0); let mut y = z_feasible; diff --git a/src/core/panoc/panoc_engine.rs b/src/core/panoc/panoc_engine.rs index 7e2334b6..f021117c 100644 --- a/src/core/panoc/panoc_engine.rs +++ b/src/core/panoc/panoc_engine.rs @@ -1,7 +1,9 @@ use crate::{ constraints, core::{panoc::PANOCCache, AlgorithmEngine, Problem}, - matrix_operations, numeric::cast, FunctionCallResult, SolverError, + matrix_operations, + numeric::cast, + FunctionCallResult, SolverError, }; use lbfgs::LbfgsPrecision; use num::Float; @@ -210,8 +212,7 @@ where self.cache.lbfgs.reset(); // invalidate the L-BFGS buffer // update L, sigma and gamma... - self.cache.lipschitz_constant = - self.cache.lipschitz_constant * cast::(2.0); + self.cache.lipschitz_constant = self.cache.lipschitz_constant * cast::(2.0); self.cache.gamma = self.cache.gamma / cast::(2.0); // recompute the half step... @@ -226,8 +227,7 @@ where self.compute_fpr(u_current); it_lipschitz_search += 1; } - self.cache.sigma = (T::one() - gamma_l_coeff::()) - / (cast::(4.0) * self.cache.gamma); + self.cache.sigma = (T::one() - gamma_l_coeff::()) / (cast::(4.0) * self.cache.gamma); Ok(()) } @@ -390,8 +390,7 @@ where self.cache_gradient_norm(); self.cache.gamma = gamma_l_coeff::() / self.cache.lipschitz_constant.max(min_l_estimate()); - self.cache.sigma = (T::one() - gamma_l_coeff::()) - / (cast::(4.0) * self.cache.gamma); + self.cache.sigma = (T::one() - gamma_l_coeff::()) / (cast::(4.0) * self.cache.gamma); self.gradient_step(u_current); // updated self.cache.gradient_step self.half_step(); // updates self.cache.u_half_step From 65f52493fc66c53a79fb41f1cfadcab0417824ef Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 14:13:20 +0000 Subject: [PATCH 064/133] add #[allow(clippy::too_many_arguments)] --- src/alm/alm_factory.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/alm/alm_factory.rs b/src/alm/alm_factory.rs index fef9d9b1..4ed65e42 100644 --- a/src/alm/alm_factory.rs +++ b/src/alm/alm_factory.rs @@ -174,6 +174,7 @@ where /// ); /// ``` /// + #[allow(clippy::too_many_arguments)] pub fn new( f: Cost, df: CostGradient, From 45b3673ea72bc1e63cdf376b7234e91a70058b1c Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 14:20:31 +0000 Subject: [PATCH 065/133] fix issues in docs of AlmFactory --- src/alm/alm_factory.rs | 89 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 5 deletions(-) diff --git a/src/alm/alm_factory.rs b/src/alm/alm_factory.rs index 4ed65e42..ffc55a4c 100644 --- a/src/alm/alm_factory.rs +++ b/src/alm/alm_factory.rs @@ -10,8 +10,11 @@ use num::Float; use std::marker::PhantomData; use std::{iter::Sum, ops::AddAssign}; -/// Prepares function $\psi$ and its gradient given the problem data: $f$, $\nabla{}f$, -/// and optionally $F_1$, $JF_1$, $C$ and $F_2$ +/// Prepares the ALM/PM merit function $\psi$ and its gradient from the problem data. +/// +/// This is a low-level helper used by the ALM implementation to assemble the +/// augmented cost seen by the inner solver from the user-provided cost, +/// gradient, mappings, Jacobian-vector products, and set data. /// /// # Types /// @@ -132,7 +135,7 @@ where JacobianMappingF2Trans: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, // jf2(u, d, result) SetC: Constraint, { - /// Construct a new instance of `MockFactory` + /// Construct a new instance of [`AlmFactory`]. /// /// # Arguments /// - `f` cost function $f$ @@ -146,6 +149,16 @@ where /// /// The scalar type `T` is inferred from the supplied functions and set. /// + /// # Panics + /// + /// This constructor panics if: + /// + /// - `mapping_f2` is provided but `n2 == 0`, + /// - `n2 > 0` but `mapping_f2` is not provided, + /// - `mapping_f2` and `jacobian_mapping_f2_trans` are not provided together, + /// - `mapping_f1` and `jacobian_mapping_f1_trans` are not provided together, + /// - `mapping_f1` and `set_c` are not provided together. + /// /// # Example /// /// This example uses `f64` for simplicity, but the same API also works with @@ -214,7 +227,7 @@ where } } - /// Computes function $\psi$ given by + /// Computes the function $\psi$ given by /// /// $$\psi(u) = f(u) + \tfrac{c}{2}\left[\mathrm{dist}_C^2\left(F_1(u) + \bar{c}^{-1}y\right) /// + \Vert F_2(u) \Vert^2\right],$$ @@ -231,11 +244,24 @@ where /// - `xi` is the vector $\xi = (c, y) \in \mathbb{R}^{n_1 + 1}$ /// - `cost`: stores the value of $\psi(u; \xi)$ on exit /// + /// If `F1` is present, `xi` must contain the penalty parameter `c` in + /// `xi[0]` followed by the Lagrange multiplier vector `y`. + /// + /// If only `F2` is present, `xi` must still contain at least the penalty + /// parameter `c` as its first entry. + /// + /// If neither `F1` nor `F2` is present, `xi` may be empty. + /// /// # Returns /// /// This method returns `Ok(())` if the computation is successful or an appropriate /// `SolverError` otherwise. /// + /// # Panics + /// + /// This method may panic if the supplied slices are inconsistent with the + /// dimensions expected by the provided mappings or set projection. + /// pub fn psi(&self, u: &[T], xi: &[T], cost: &mut T) -> FunctionCallResult { (self.f)(u, cost)?; let ny = if !xi.is_empty() { xi.len() - 1 } else { 0 }; @@ -275,7 +301,7 @@ where Ok(()) } - /// Computes the gradient of $\psi$ + /// Computes the gradient of $\psi$. /// /// The gradient of `psi` is given by /// @@ -290,11 +316,64 @@ where /// - `xi` is the vector $\xi = (c, y) \in \mathbb{R}^{n_1 + 1}$ /// - `grad`: stores the value of $\nabla \psi(u; \xi)$ on exit /// + /// As with [`AlmFactory::psi`], `xi` must contain the penalty parameter + /// `c` as its first entry whenever `F1` or `F2` is active. + /// /// # Returns /// /// This method returns `Ok(())` if the computation is successful or an appropriate /// `SolverError` otherwise. /// + /// # Panics + /// + /// This method may panic if the supplied slices are inconsistent with the + /// dimensions expected by the provided mappings, Jacobian-vector products, + /// or set projection. + /// + /// # Example + /// + /// This example uses `f64` for simplicity, but the same API also works with + /// `f32`. + /// + /// ```rust + /// use optimization_engine::{constraints::Ball2, alm::*, FunctionCallResult}; + /// + /// let set_c = Ball2::new(None, 1.0); + /// + /// let f = |_u: &[f64], cost: &mut f64| -> FunctionCallResult { + /// *cost = 0.0; + /// Ok(()) + /// }; + /// let df = |_u: &[f64], grad: &mut [f64]| -> FunctionCallResult { + /// grad.fill(0.0); + /// Ok(()) + /// }; + /// let f1 = |u: &[f64], f1u: &mut [f64]| -> FunctionCallResult { + /// f1u[0] = u[0]; + /// Ok(()) + /// }; + /// let jf1_tr = |_u: &[f64], d: &[f64], res: &mut [f64]| -> FunctionCallResult { + /// res[0] = d[0]; + /// Ok(()) + /// }; + /// + /// let factory = AlmFactory::new( + /// f, + /// df, + /// Some(f1), + /// Some(jf1_tr), + /// NO_MAPPING, + /// NO_JACOBIAN_MAPPING, + /// Some(set_c), + /// 0, + /// ); + /// + /// let u = [0.5_f64]; + /// let xi = [2.0_f64, 0.1_f64]; + /// let mut grad = [0.0_f64]; + /// factory.d_psi(&u, &xi, &mut grad).unwrap(); + /// ``` + /// pub fn d_psi(&self, u: &[T], xi: &[T], grad: &mut [T]) -> FunctionCallResult { let nu = u.len(); From 7a2095c9baeae1082a5eae46b53f42962ab9552f Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 14:25:54 +0000 Subject: [PATCH 066/133] AlmCache: update docs - add examples in docs --- src/alm/alm_cache.rs | 71 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 56 insertions(+), 15 deletions(-) diff --git a/src/alm/alm_cache.rs b/src/alm/alm_cache.rs index 8113fbf4..e65911d2 100644 --- a/src/alm/alm_cache.rs +++ b/src/alm/alm_cache.rs @@ -7,14 +7,13 @@ fn default_initial_penalty() -> T { cast::(10.0) } -/// Cache for `AlmOptimizer` (to be allocated once) +/// Cache and mutable state for `AlmOptimizer` /// -/// This is a cache structure that contains all the data that make -/// up the "state" of the ALM/PM algorithm, i.e., all those data that -/// the algorithm *updates*. +/// `AlmCache` stores the data that the outer ALM/PM loop updates from one +/// iteration to the next, together with the [`PANOCCache`] used to solve the +/// inner problems. /// -/// On the other hand, the problem data are provided in an instance -/// of `AlmProblem`. +/// The problem definition itself is stored separately in `AlmProblem`. /// /// The scalar type `T` is generic and is typically `f64` or `f32`. The default /// is `f64`. @@ -60,19 +59,43 @@ impl AlmCache where T: Float + LbfgsPrecision + Sum, { - /// Construct a new instance of `AlmCache` + /// Constructs a new `AlmCache` /// /// # Arguments /// - /// - `panoc_cache`: an instance of `PANOCCache` that will be used by - /// the inner problem - /// - `n1`, `n2`: range dimensions of mappings `F1` and `F2` respectively + /// - `panoc_cache`: cache used by the inner PANOC solver + /// - `n1`: dimension of the ALM mapping `F1` + /// - `n2`: dimension of the PM mapping `F2` /// - /// The scalar type `T` is inferred from `panoc_cache`. + /// The scalar type `T` is inferred from `panoc_cache`. Depending on the + /// values of `n1` and `n2`, this constructor allocates the auxiliary + /// vectors needed by the ALM and PM updates: /// - /// # Panics + /// - `y_plus` and `w_alm_aux` are allocated only when `n1 > 0` + /// - `w_pm` is allocated only when `n2 > 0` + /// - `xi` is allocated when `n1 + n2 > 0` and is initialized as + /// `xi = (c^0, y^0)`, where `c^0` is the default initial penalty and + /// `y^0` is the zero vector in `R^{n1}` /// - /// Does not panic + /// # Examples + /// + /// Using the default scalar type (`f64`): + /// + /// ``` + /// use optimization_engine::{alm::AlmCache, panoc::PANOCCache}; + /// + /// let panoc_cache = PANOCCache::new(4, 1e-6, 8); + /// let _alm_cache = AlmCache::new(panoc_cache, 2, 1); + /// ``` + /// + /// Using `f32` explicitly: + /// + /// ``` + /// use optimization_engine::{alm::AlmCache, panoc::PANOCCache}; + /// + /// let panoc_cache = PANOCCache::new(4, 1e-5_f32, 8); + /// let _alm_cache = AlmCache::::new(panoc_cache, 2, 1); + /// ``` /// pub fn new(panoc_cache: PANOCCache, n1: usize, n2: usize) -> Self { AlmCache { @@ -114,9 +137,27 @@ where } } - /// Resets the cache to its virgin state, and resets the stored instance - /// of `PANOCCache` + /// Resets the cache to its initial iteration state + /// + /// This method: + /// + /// - resets the stored [`PANOCCache`] + /// - clears the outer iteration counters + /// - resets the stored infeasibility and fixed-point-residual related norms + /// + /// The allocated work vectors remain allocated so the cache can be reused + /// without additional memory allocations. + /// + /// # Examples + /// + /// ``` + /// use optimization_engine::{alm::AlmCache, panoc::PANOCCache}; + /// + /// let panoc_cache = PANOCCache::new(3, 1e-6, 5); + /// let mut alm_cache = AlmCache::new(panoc_cache, 1, 1); /// + /// alm_cache.reset(); + /// ``` pub fn reset(&mut self) { self.panoc_cache.reset(); self.iteration = 0; From 00e3787a06f1d73914485f730299b7649c5df795 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 14:30:24 +0000 Subject: [PATCH 067/133] examples in constraints docs --- src/constraints/affine_space.rs | 13 +++++++++++++ src/constraints/ball1.rs | 10 ++++++++++ src/constraints/ball2.rs | 10 ++++++++++ src/constraints/ballinf.rs | 11 ++++++++++- src/constraints/cartesian_product.rs | 15 +++++++++++++++ src/constraints/epigraph_squared_norm.rs | 10 ++++++++++ src/constraints/no_constraints.rs | 10 ++++++++++ src/constraints/rectangle.rs | 12 ++++++++++++ src/constraints/simplex.rs | 10 ++++++++++ src/constraints/soc.rs | 10 ++++++++++ src/constraints/sphere2.rs | 10 ++++++++++ src/constraints/zero.rs | 10 ++++++++++ 12 files changed, 130 insertions(+), 1 deletion(-) diff --git a/src/constraints/affine_space.rs b/src/constraints/affine_space.rs index 84b38855..bfa8c304 100644 --- a/src/constraints/affine_space.rs +++ b/src/constraints/affine_space.rs @@ -71,6 +71,19 @@ where /// /// Returns a new [`AffineSpace`] on success, or an [`AffineSpaceError`] if /// the provided data are invalid. + /// + /// ## Example + /// + /// ```rust + /// use optimization_engine::constraints::{AffineSpace, Constraint}; + /// + /// let a = vec![1.0, 1.0, 1.0, -1.0]; + /// let b = vec![1.0, 0.0]; + /// let affine_space = AffineSpace::try_new(a, b).unwrap(); + /// + /// let mut x = [2.0, 2.0]; + /// affine_space.project(&mut x); + /// ``` pub fn try_new(a: Vec, b: Vec) -> Result { let n_rows = b.len(); let n_elements_a = a.len(); diff --git a/src/constraints/ball1.rs b/src/constraints/ball1.rs index 3f605229..de2ee721 100644 --- a/src/constraints/ball1.rs +++ b/src/constraints/ball1.rs @@ -15,6 +15,16 @@ pub struct Ball1<'a, T = f64> { impl<'a, T: Float> Ball1<'a, T> { /// Construct a new ball-1 with given center and radius. /// If no `center` is given, then it is assumed to be in the origin + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Ball1, Constraint}; + /// + /// let ball = Ball1::new(None, 1.0); + /// let mut x = [2.0, -0.5]; + /// ball.project(&mut x); + /// ``` pub fn new(center: Option<&'a [T]>, radius: T) -> Self { assert!(radius > T::zero()); let simplex = Simplex::new(radius); diff --git a/src/constraints/ball2.rs b/src/constraints/ball2.rs index c360f681..347654ad 100644 --- a/src/constraints/ball2.rs +++ b/src/constraints/ball2.rs @@ -13,6 +13,16 @@ pub struct Ball2<'a, T = f64> { impl<'a, T: Float> Ball2<'a, T> { /// Construct a new Euclidean ball with given center and radius /// If no `center` is given, then it is assumed to be in the origin + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Ball2, Constraint}; + /// + /// let ball = Ball2::new(None, 1.0); + /// let mut x = [2.0, 0.0]; + /// ball.project(&mut x); + /// ``` pub fn new(center: Option<&'a [T]>, radius: T) -> Self { assert!(radius > T::zero()); diff --git a/src/constraints/ballinf.rs b/src/constraints/ballinf.rs index 4b528764..50b8fa37 100644 --- a/src/constraints/ballinf.rs +++ b/src/constraints/ballinf.rs @@ -14,7 +14,16 @@ pub struct BallInf<'a, T = f64> { impl<'a, T: Float> BallInf<'a, T> { /// Construct a new infinity-norm ball with given center and radius /// If no `center` is given, then it is assumed to be in the origin - /// + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{BallInf, Constraint}; + /// + /// let ball = BallInf::new(None, 1.0); + /// let mut x = [2.0, -0.2, -3.0]; + /// ball.project(&mut x); + /// ``` pub fn new(center: Option<&'a [T]>, radius: T) -> Self { assert!(radius > T::zero()); BallInf { center, radius } diff --git a/src/constraints/cartesian_product.rs b/src/constraints/cartesian_product.rs index fe12f194..2e7d12b0 100644 --- a/src/constraints/cartesian_product.rs +++ b/src/constraints/cartesian_product.rs @@ -36,6 +36,21 @@ impl<'a, T> CartesianProduct<'a, T> { /// when possible (provided you have an estimate of the number of sets /// your Cartesian product will consist of). /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Ball2, CartesianProduct, Constraint, Rectangle}; + /// + /// let xmin = [-1.0, -1.0]; + /// let xmax = [1.0, 1.0]; + /// let cartesian = CartesianProduct::new() + /// .add_constraint(2, Rectangle::new(Some(&xmin), Some(&xmax))) + /// .add_constraint(4, Ball2::new(None, 1.0)); + /// + /// let mut x = [3.0, -2.0, 2.0, 0.0]; + /// cartesian.project(&mut x); + /// ``` + /// pub fn new() -> Self { CartesianProduct { idx: Vec::new(), diff --git a/src/constraints/epigraph_squared_norm.rs b/src/constraints/epigraph_squared_norm.rs index 12844773..d4ff435b 100644 --- a/src/constraints/epigraph_squared_norm.rs +++ b/src/constraints/epigraph_squared_norm.rs @@ -19,6 +19,16 @@ impl EpigraphSquaredNorm { /// Create a new instance of the epigraph of the squared norm. /// /// Note that you do not need to specify the dimension. + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, EpigraphSquaredNorm}; + /// + /// let epi = EpigraphSquaredNorm::new(); + /// let mut x = [1.0, 2.0, 1.0]; + /// epi.project(&mut x); + /// ``` #[must_use] pub fn new() -> Self { EpigraphSquaredNorm {} diff --git a/src/constraints/no_constraints.rs b/src/constraints/no_constraints.rs index 7263a9b1..1d156db4 100644 --- a/src/constraints/no_constraints.rs +++ b/src/constraints/no_constraints.rs @@ -7,6 +7,16 @@ pub struct NoConstraints {} impl NoConstraints { /// Constructs new instance of `NoConstraints` /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, NoConstraints}; + /// + /// let no_constraints = NoConstraints::new(); + /// let mut x = [1.0, -2.0, 3.0]; + /// no_constraints.project(&mut x); + /// ``` + /// #[must_use] pub fn new() -> NoConstraints { NoConstraints {} diff --git a/src/constraints/rectangle.rs b/src/constraints/rectangle.rs index e71c9395..d4df786f 100644 --- a/src/constraints/rectangle.rs +++ b/src/constraints/rectangle.rs @@ -35,6 +35,18 @@ impl<'a, T: Float> Rectangle<'a, T> { /// - Both `xmin` and `xmax` have been provided, but they have incompatible /// dimensions /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, Rectangle}; + /// + /// let xmin = [-1.0, 0.0]; + /// let xmax = [1.0, 2.0]; + /// let rectangle = Rectangle::new(Some(&xmin), Some(&xmax)); + /// let mut x = [3.0, -4.0]; + /// rectangle.project(&mut x); + /// ``` + /// pub fn new(xmin: Option<&'a [T]>, xmax: Option<&'a [T]>) -> Self { assert!(xmin.is_some() || xmax.is_some()); // xmin or xmax must be Some assert!( diff --git a/src/constraints/simplex.rs b/src/constraints/simplex.rs index fcf7cb4e..f0e34cb5 100644 --- a/src/constraints/simplex.rs +++ b/src/constraints/simplex.rs @@ -15,6 +15,16 @@ pub struct Simplex { impl Simplex { /// Construct a new simplex with given (positive) $\alpha$. The user does not need /// to specify the dimension of the simplex. + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, Simplex}; + /// + /// let simplex = Simplex::new(1.0); + /// let mut x = [0.5, -0.5, 2.0]; + /// simplex.project(&mut x); + /// ``` pub fn new(alpha: T) -> Self { assert!(alpha > T::zero(), "alpha is nonpositive"); Simplex { alpha } diff --git a/src/constraints/soc.rs b/src/constraints/soc.rs index d2418543..0f12b40b 100644 --- a/src/constraints/soc.rs +++ b/src/constraints/soc.rs @@ -40,6 +40,16 @@ impl SecondOrderCone { /// # Panics /// /// The method panics if the given parameter `alpha` is nonpositive. + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, SecondOrderCone}; + /// + /// let cone = SecondOrderCone::new(1.0); + /// let mut x = [2.0, 0.0, 0.5]; + /// cone.project(&mut x); + /// ``` pub fn new(alpha: T) -> SecondOrderCone { assert!(alpha > T::zero()); // alpha must be positive SecondOrderCone { alpha } diff --git a/src/constraints/sphere2.rs b/src/constraints/sphere2.rs index f56f6e0c..11a7f57f 100644 --- a/src/constraints/sphere2.rs +++ b/src/constraints/sphere2.rs @@ -22,6 +22,16 @@ pub struct Sphere2<'a, T = f64> { impl<'a, T: Float> Sphere2<'a, T> { /// Construct a new Euclidean sphere with given center and radius /// If no `center` is given, then it is assumed to be in the origin + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, Sphere2}; + /// + /// let sphere = Sphere2::new(None, 1.0); + /// let mut x = [3.0, 4.0]; + /// sphere.project(&mut x); + /// ``` pub fn new(center: Option<&'a [T]>, radius: T) -> Self { assert!(radius > T::zero()); Sphere2 { center, radius } diff --git a/src/constraints/zero.rs b/src/constraints/zero.rs index 76998510..e2bb1541 100644 --- a/src/constraints/zero.rs +++ b/src/constraints/zero.rs @@ -7,6 +7,16 @@ pub struct Zero {} impl Zero { /// Constructs new instance of `Zero` + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, Zero}; + /// + /// let zero = Zero::new(); + /// let mut x = [1.0, -2.0, 3.0]; + /// zero.project(&mut x); + /// ``` #[must_use] pub fn new() -> Self { Zero {} From 827705ebbc7509a3dac54a8e11bd85c91d58f645 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 14:33:39 +0000 Subject: [PATCH 068/133] OpEn v0.12.0-alpha.1 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 1eb2049e..1987411b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,7 +42,7 @@ homepage = "https://alphaville.github.io/optimization-engine/" repository = "https://github.com/alphaville/optimization-engine" # Version of this crate (SemVer) -version = "0.12.0" +version = "0.12.0-alpha.1" edition = "2018" From a2a8e202fc2fae7ca941d255fa7268d745dd6b85 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 14:44:10 +0000 Subject: [PATCH 069/133] [ci skip] update website docs --- docs/openrust-arithmetic.mdx | 108 ++++++++++++++--------------------- 1 file changed, 43 insertions(+), 65 deletions(-) diff --git a/docs/openrust-arithmetic.mdx b/docs/openrust-arithmetic.mdx index eab443cc..d18093a0 100644 --- a/docs/openrust-arithmetic.mdx +++ b/docs/openrust-arithmetic.mdx @@ -4,132 +4,110 @@ title: Single and double precision description: OpEn with f32 and f64 number types --- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + :::note Info -The functionality presented here was introduced in OpEn version [`0.12.0`](https://pypi.org/project/opengen/#history). -The new API is fully backward-compatible with previous versions of OpEn. +The functionality presented here was introduced in OpEn version [`0.12.0`](https://crates.io/crates/optimization_engine/0.12.0-alpha.1). +The new API is fully backward-compatible with previous versions of OpEn +with `f64` being the default scalar type. ::: ## Overview -OpEn's Rust API supports both `f64` and `f32`. - -Most public Rust types are generic over a scalar type `T` with `T: num::Float`, and in most places the default type is `f64`. This means: - -- if you do nothing special, you will usually get `f64` -- if you want single precision, you can explicitly use `f32` -- all quantities involved in one solver instance should use the same scalar type - -In particular, this applies to: - -- cost and gradient functions -- constraints -- `Problem` -- caches such as `PANOCCache`, `FBSCache`, and `AlmCache` -- optimizers such as `PANOCOptimizer`, `FBSOptimizer`, and `AlmOptimizer` -- solver status types such as `SolverStatus` and `AlmOptimizerStatus` - -## When to use `f64` and when to use `f32` - -### `f64` +OpEn's Rust API now supports both `f64` and `f32`. Note that with `f32` +you may encounter issues with convergence, especially if you are solving +particularly ill-conditioned problems. On the other hand, `f32` is sometimes +the preferred type for embedded applications and can lead to lower +solve times. -Use `f64` when you want maximum numerical robustness and accuracy. This is the safest default for: +When using `f32`: (i) make sure the problem is properly scaled, +and (ii) you may want to opt for less demanding tolerances. -- desktop applications -- difficult nonlinear problems -- problems with tight tolerances -- problems that are sensitive to conditioning +## PANOC example -### `f32` +Below you can see two examples of using the solver with single and double +precision arithmetic. -Use `f32` when memory footprint and throughput matter more than ultimate accuracy. This is often useful for: + -- embedded applications -- high-rate MPC loops -- applications where moderate tolerances are acceptable -In general, `f32` may require: - -- slightly looser tolerances -- more careful scaling of the problem -- fewer expectations about extremely small residuals - -## The default: `f64` - -If your functions, constants, and vectors use `f64`, you can often omit the scalar type completely. + ```rust use optimization_engine::{constraints, panoc::PANOCCache, Problem, SolverError}; use optimization_engine::panoc::PANOCOptimizer; -let tolerance = 1e-6; +let tolerance = 1e-4_f32; let lbfgs_memory = 10; -let radius = 1.0; +let radius = 1.0_f32; let bounds = constraints::Ball2::new(None, radius); -let df = |u: &[f64], grad: &mut [f64]| -> Result<(), SolverError> { - grad[0] = u[0] + u[1] + 1.0; - grad[1] = u[0] + 2.0 * u[1] - 1.0; +let df = |u: &[f32], grad: &mut [f32]| -> Result<(), SolverError> { + grad[0] = u[0] + u[1] + 1.0_f32; + grad[1] = u[0] + 2.0_f32 * u[1] - 1.0_f32; Ok(()) }; -let f = |u: &[f64], cost: &mut f64| -> Result<(), SolverError> { - *cost = 0.5 * (u[0] * u[0] + u[1] * u[1]); +let f = |u: &[f32], cost: &mut f32| -> Result<(), SolverError> { + *cost = 0.5_f32 * (u[0] * u[0] + u[1] * u[1]); Ok(()) }; let problem = Problem::new(&bounds, df, f); -let mut cache = PANOCCache::new(2, tolerance, lbfgs_memory); +let mut cache = PANOCCache::::new(2, tolerance, lbfgs_memory); let mut optimizer = PANOCOptimizer::new(problem, &mut cache); -let mut u = [0.0, 0.0]; +let mut u = [0.0_f32, 0.0_f32]; let status = optimizer.solve(&mut u).unwrap(); assert!(status.has_converged()); ``` + -Because all literals and function signatures above are `f64`, the compiler infers `T = f64`. - -## Using `f32` - -To use single precision, make the scalar type explicit throughout the problem definition. + ```rust use optimization_engine::{constraints, panoc::PANOCCache, Problem, SolverError}; use optimization_engine::panoc::PANOCOptimizer; -let tolerance = 1e-4_f32; +let tolerance = 1e-6; let lbfgs_memory = 10; -let radius = 1.0_f32; +let radius = 1.0; let bounds = constraints::Ball2::new(None, radius); -let df = |u: &[f32], grad: &mut [f32]| -> Result<(), SolverError> { - grad[0] = u[0] + u[1] + 1.0_f32; - grad[1] = u[0] + 2.0_f32 * u[1] - 1.0_f32; +let df = |u: &[f64], grad: &mut [f64]| -> Result<(), SolverError> { + grad[0] = u[0] + u[1] + 1.0; + grad[1] = u[0] + 2.0 * u[1] - 1.0; Ok(()) }; -let f = |u: &[f32], cost: &mut f32| -> Result<(), SolverError> { - *cost = 0.5_f32 * (u[0] * u[0] + u[1] * u[1]); +let f = |u: &[f64], cost: &mut f64| -> Result<(), SolverError> { + *cost = 0.5 * (u[0] * u[0] + u[1] * u[1]); Ok(()) }; let problem = Problem::new(&bounds, df, f); -let mut cache = PANOCCache::::new(2, tolerance, lbfgs_memory); +let mut cache = PANOCCache::new(2, tolerance, lbfgs_memory); let mut optimizer = PANOCOptimizer::new(problem, &mut cache); -let mut u = [0.0_f32, 0.0_f32]; +let mut u = [0.0, 0.0]; let status = optimizer.solve(&mut u).unwrap(); assert!(status.has_converged()); ``` + + + -The key idea is that the same scalar type must be used consistently in: +To use single precision, make sure that the following are all using `f32`: - the initial guess `u` - the closures for the cost and gradient - the constraints - the cache - any tolerances and numerical constants +- You are explicitly using `PANOCCache::` as in the above example ## Example with FBS From 56934b7468ef66c123bd1a3d6552988f490ba18e Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 18:26:33 +0000 Subject: [PATCH 070/133] [ci skip] website docs --- docs/openrust-arithmetic.mdx | 25 ++++--------------------- docs/openrust-basic.md | 23 +++++++++++++++-------- 2 files changed, 19 insertions(+), 29 deletions(-) diff --git a/docs/openrust-arithmetic.mdx b/docs/openrust-arithmetic.mdx index d18093a0..9d4592a9 100644 --- a/docs/openrust-arithmetic.mdx +++ b/docs/openrust-arithmetic.mdx @@ -153,7 +153,7 @@ For example, if you use: then the whole ALM solve runs in single precision. -If instead you use plain `f64` literals and `&[f64]` closures, the solver runs in double precision. +If instead you use plain `f64` literals and `&[f64]` closures, the solver runs in double precision. This is the default behaviour. ## Type inference tips @@ -166,28 +166,11 @@ Good ways to make `f32` intent clear are: - annotate caches explicitly, for example `PANOCCache::::new(...)` - annotate closure arguments, for example `|u: &[f32], grad: &mut [f32]|` -## Important rule: do not mix `f32` and `f64` - -The following combinations are problematic: +:::warning Important rule: do not mix `f32` and `f64` +For example, the following combinations are problematic: - `u: &[f32]` with a cost function writing to `&mut f64` - `Ball2::new(None, 1.0_f64)` together with `PANOCCache::` -- `tolerance = 1e-6` in one place and `1e-6_f32` elsewhere if inference becomes ambiguous Choose one scalar type per optimization problem and use it everywhere. - -## Choosing tolerances - -When moving from `f64` to `f32`, it is often a good idea to relax tolerances. - -Typical starting points are: - -- `f64`: `1e-6`, `1e-8`, or smaller if needed -- `f32`: `1e-4` or `1e-5` - -The right choice depends on: - -- scaling of the problem -- conditioning -- solver settings -- whether the problem is solved repeatedly in real time +::: diff --git a/docs/openrust-basic.md b/docs/openrust-basic.md index edaaaf13..f4369f92 100644 --- a/docs/openrust-basic.md +++ b/docs/openrust-basic.md @@ -25,6 +25,13 @@ The definition of an optimization problem consists in specifying the following t - the set of constraints, $U$, as an implementation of a trait ### Cost functions + +:::note Info +Throughout this document we will be using `f64`, which is the default +scalar type. However, OpEn now supports `f32` as well. +::: + + The **cost function** `f` is a Rust function of type `|u: &[f64], cost: &mut f64| -> Result<(), SolverError>`. The first argument, `u`, is the argument of the function. The second argument, is a mutable reference to the result (cost). The function returns a *status code* of the type `Result<(), SolverError>` and the status code `Ok(())` means that the computation was successful. Other status codes can be used to encode errors/exceptions as defined in the [`SolverError`] enum. As an example, consider the cost function $f:\mathbb{R}^2\to\mathbb{R}$ that maps a two-dimensional @@ -33,8 +40,8 @@ vector $u$ to $f(u) = 5 u_1 - u_2^2$. This will be: ```rust let f = |u: &[f64], c: &mut f64| -> Result<(), SolverError> { - *c = 5.0 * u[0] - u[1].powi(2); - Ok(()) + *c = 5.0 * u[0] - u[1].powi(2); + Ok(()) }; ``` @@ -50,9 +57,9 @@ This function can be implemented as follows: ```rust let df = |u: &[f64], grad: &mut [f64]| -> Result<(), SolverError> { - grad[0] = 5.0; - grad[1] = -2.0*u[1]; - Ok(()) + grad[0] = 5.0; + grad[1] = -2.0*u[1]; + Ok(()) }; ``` @@ -290,9 +297,9 @@ fn main() { } }; - // define the bounds at every iteration - let bounds = constraints::Ball2::new(None, radius); - + // define the bounds at every iteration + let bounds = constraints::Ball2::new(None, radius); + // the problem definition is updated at every iteration let problem = Problem::new(&bounds, df, f); From e0af721a3f1859e5c9f393ff12e73654aa471b3e Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 18:41:15 +0000 Subject: [PATCH 071/133] TCP error include more information - wrong number of parameters specified - errors 1600, 1700, 3003 updated --- .../opengen/templates/tcp/tcp_server.rs | 21 ++++++++++++++++--- open-codegen/test/test.py | 18 ++++++++++++++++ 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/open-codegen/opengen/templates/tcp/tcp_server.rs b/open-codegen/opengen/templates/tcp/tcp_server.rs index 042e176f..d3c98f04 100644 --- a/open-codegen/opengen/templates/tcp/tcp_server.rs +++ b/open-codegen/opengen/templates/tcp/tcp_server.rs @@ -150,7 +150,12 @@ fn execution_handler( Some(u0) => { if u0.len() != {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES { warn!("initial guess has incompatible dimensions"); - write_error_message(stream, 1600, "Initial guess has incompatible dimensions"); + let error_message = format!( + "initial guess has incompatible dimensions: provided {}, expected {}", + u0.len(), + {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES + ); + write_error_message(stream, 1600, &error_message); return; } u.copy_from_slice(u0); @@ -162,7 +167,12 @@ fn execution_handler( // ---------------------------------------------------- if let Some(y0) = &execution_parameter.initial_lagrange_multipliers { if y0.len() != {{meta.optimizer_name|upper}}_N1 { - write_error_message(stream, 1700, "wrong dimension of Langrange multipliers"); + let error_message = format!( + "wrong dimension of Langrange multipliers: provided {}, expected {}", + y0.len(), + {{meta.optimizer_name|upper}}_N1 + ); + write_error_message(stream, 1700, &error_message); return; } } @@ -172,7 +182,12 @@ fn execution_handler( // ---------------------------------------------------- let parameter = &execution_parameter.parameter; if parameter.len() != {{meta.optimizer_name|upper}}_NUM_PARAMETERS { - write_error_message(stream, 3003, "wrong number of parameters"); + let error_message = format!( + "wrong number of parameters: provided {}, expected {}", + parameter.len(), + {{meta.optimizer_name|upper}}_NUM_PARAMETERS + ); + write_error_message(stream, 3003, &error_message); return; } p.copy_from_slice(parameter); diff --git a/open-codegen/test/test.py b/open-codegen/test/test.py index d56f3075..c451df8d 100644 --- a/open-codegen/test/test.py +++ b/open-codegen/test/test.py @@ -351,18 +351,27 @@ def test_rust_build_only_f1(self): self.assertFalse(response.is_ok()) self.assertEqual(True, isinstance(status, og.tcp.SolverError)) self.assertEqual(3003, status.code) + self.assertEqual( + "wrong number of parameters: provided 3, expected 2", + status.message) response = mng.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2]) self.assertFalse(response.is_ok()) status = response.get() self.assertEqual(True, isinstance(status, og.tcp.SolverError)) self.assertEqual(1600, status.code) + self.assertEqual( + "initial guess has incompatible dimensions: provided 2, expected 5", + status.message) response = mng.call(p=[2.0, 10.0], initial_y=[0.1]) status = response.get() self.assertFalse(response.is_ok()) self.assertEqual(True, isinstance(status, og.tcp.SolverError)) self.assertEqual(1700, status.code) + self.assertEqual( + "wrong dimension of Langrange multipliers: provided 1, expected 2", + status.message) mng.kill() @@ -405,18 +414,27 @@ def test_rust_build_only_f2_preconditioned(self): status = response.get() self.assertEqual(True, isinstance(status, og.tcp.SolverError)) self.assertEqual(3003, status.code) + self.assertEqual( + "wrong number of parameters: provided 3, expected 2", + status.message) response = mng1.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2]) self.assertFalse(response.is_ok()) status = response.get() self.assertEqual(True, isinstance(status, og.tcp.SolverError)) self.assertEqual(1600, status.code) + self.assertEqual( + "initial guess has incompatible dimensions: provided 2, expected 5", + status.message) response = mng1.call(p=[2.0, 10.0], initial_y=[0.1]) self.assertFalse(response.is_ok()) status = response.get() self.assertEqual(True, isinstance(status, og.tcp.SolverError)) self.assertEqual(1700, status.code) + self.assertEqual( + "wrong dimension of Langrange multipliers: provided 1, expected 0", + status.message) finally: mng1.kill() mng2.kill() From 99813688124ac22e34d3c9e4911fa3ea1834610d Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 19:08:29 +0000 Subject: [PATCH 072/133] improve TCP error reporting - better error handling - tighter unit tests - unit tests safely kill managers at the end --- open-codegen/opengen/tcp/solver_error.py | 4 +- .../opengen/templates/tcp/tcp_server.rs | 44 ++- open-codegen/test/test.py | 258 ++++++++++++------ 3 files changed, 207 insertions(+), 99 deletions(-) diff --git a/open-codegen/opengen/tcp/solver_error.py b/open-codegen/opengen/tcp/solver_error.py index 61ab7fc8..e2d2d72c 100644 --- a/open-codegen/opengen/tcp/solver_error.py +++ b/open-codegen/opengen/tcp/solver_error.py @@ -19,10 +19,10 @@ def code(self): Possible error codes are: - - **1000**: Invalid request: Malformed or invalid JSON + - **1000**: Invalid request: malformed JSON or invalid UTF-8 payload - **1600**: Initial guess has incomplete dimensions - **1700**: Wrong dimension of Lagrange multipliers - - **2000**: Problem solution failed (solver error) + - **2000**: Problem solution failed (message may include the solver reason) - **3003**: Parameter vector has wrong length :return: Error code diff --git a/open-codegen/opengen/templates/tcp/tcp_server.rs b/open-codegen/opengen/templates/tcp/tcp_server.rs index d3c98f04..0200e3df 100644 --- a/open-codegen/opengen/templates/tcp/tcp_server.rs +++ b/open-codegen/opengen/templates/tcp/tcp_server.rs @@ -91,13 +91,22 @@ fn pong(stream: &mut std::net::TcpStream, code: i32) { } /// Writes an error to the communication stream +#[derive(Serialize)] +struct ErrorResponse<'a> { + #[serde(rename = "type")] + response_type: &'a str, + code: i32, + message: &'a str, +} + fn write_error_message(stream: &mut std::net::TcpStream, code: i32, error_msg: &str) { - let error_message = format!( - {% raw %}"{{\n\t\"type\" : \"Error\", \n\t\"code\" : {}, \n\t\"message\" : \"{}\"\n}}\n"{% endraw %}, + let error_response = ErrorResponse { + response_type: "Error", code, - error_msg - ); - warn!("Invalid request {:?}", code); + message: error_msg, + }; + let error_message = serde_json::to_string_pretty(&error_response).unwrap(); + warn!("TCP error {}: {}", code, error_msg); stream .write_all(error_message.as_bytes()) .expect("cannot write to stream"); @@ -200,8 +209,9 @@ fn execution_handler( Ok(ok_status) => { return_solution_to_client(ok_status, u, stream); } - Err(_) => { - write_error_message(stream, 2000, "Problem solution failed (solver error)"); + Err(err) => { + let error_message = format!("problem solution failed: {:?}", err); + write_error_message(stream, 2000, &error_message); } } } @@ -214,7 +224,7 @@ fn run_server(tcp_config: &TcpServerConfiguration) { let listener = TcpListener::bind(format!("{}:{}", tcp_config.ip, tcp_config.port)).unwrap(); let mut u = [0.0; {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES]; info!("listening started, ready to accept connections at {}:{}", tcp_config.ip, tcp_config.port); - for stream in listener.incoming() { + 'incoming: for stream in listener.incoming() { let mut stream = stream.unwrap(); //The following is more robust compared to `read_to_string` @@ -225,8 +235,17 @@ fn run_server(tcp_config: &TcpServerConfiguration) { read_data_length = stream .read(&mut bytes_buffer) .expect("could not read stream"); - let new_string = String::from_utf8(bytes_buffer[0..read_data_length].to_vec()) - .expect("sent data is not UFT-8"); + let new_string = match String::from_utf8(bytes_buffer[0..read_data_length].to_vec()) { + Ok(new_string) => new_string, + Err(err) => { + let error_message = format!( + "invalid request: request body is not valid UTF-8 ({})", + err.utf8_error() + ); + write_error_message(&mut stream, 1000, &error_message); + continue 'incoming; + } + }; buffer.push_str(&new_string); } @@ -251,8 +270,9 @@ fn run_server(tcp_config: &TcpServerConfiguration) { pong(&mut stream, ping_code); } }, - Err(_) => { - write_error_message(&mut stream, 1000, "Invalid request"); + Err(err) => { + let error_message = format!("invalid request: {}", err); + write_error_message(&mut stream, 1000, &error_message); } } } diff --git a/open-codegen/test/test.py b/open-codegen/test/test.py index c451df8d..852b49f6 100644 --- a/open-codegen/test/test.py +++ b/open-codegen/test/test.py @@ -1,5 +1,7 @@ import os import unittest +import json +import socket import casadi.casadi as cs import opengen as og import subprocess @@ -227,6 +229,53 @@ def setUpHalfspace(cls): cls.solverConfig()) builder.build() + @classmethod + def setUpSolverError(cls): + u = cs.MX.sym("u", 1) + p = cs.MX.sym("p", 1) + phi = cs.dot(u, u) + bounds = og.constraints.Rectangle(xmin=[-1.0], xmax=[1.0]) + tcp_config = og.config.TcpServerConfiguration(bind_port=3310) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name("solver_error") + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ + .with_build_directory(RustBuildTestCase.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_tcp_interface_config(tcp_interface_config=tcp_config) + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=cls.solverConfig()) \ + .build() + + target_lib = os.path.join( + RustBuildTestCase.TEST_DIR, "solver_error", "src", "lib.rs") + with open(target_lib, "r", encoding="utf-8") as fh: + solver_lib = fh.read() + + # Look for this excerpt inside lib.rs (in the auto-generated solver)... + anchor = ( + ' assert_eq!(u.len(), SOLVER_ERROR_NUM_DECISION_VARIABLES, ' + '"Wrong number of decision variables (u)");\n' + ) + # Replace the anchor with this so that if p[0] < 0, the function `solve` + # will reutrn an error of type SolverError::Cost + injected_guard = ( + anchor + + '\n' + ' if p[0] < 0.0 {\n' + ' return Err(SolverError::Cost);\n' + ' }\n' + ) + if anchor not in solver_lib: + raise RuntimeError("Could not inject deterministic solver error") + + with open(target_lib, "w", encoding="utf-8") as fh: + fh.write(solver_lib.replace(anchor, injected_guard, 1)) + @classmethod def setUpClass(cls): cls.setUpPythonBindings() @@ -237,6 +286,30 @@ def setUpClass(cls): cls.setUpPlain() cls.setUpOnlyParametricF2() cls.setUpHalfspace() + cls.setUpSolverError() + + @staticmethod + def raw_tcp_request(ip, port, payload, buffer_size=4096): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as conn_socket: + conn_socket.connect((ip, port)) + if isinstance(payload, str): + payload = payload.encode() + conn_socket.sendall(payload) + conn_socket.shutdown(socket.SHUT_WR) + + data = b'' + while True: + data_chunk = conn_socket.recv(buffer_size) + if not data_chunk: + break + data += data_chunk + + return json.loads(data.decode()) + + def start_tcp_manager(self, manager): + manager.start() + self.addCleanup(manager.kill) # at the end, kill the TCP mngr + return manager def test_python_bindings(self): import sys @@ -314,22 +387,18 @@ def test_start_multiple_servers(self): # Start all servers for m in all_managers: - m.start() + self.start_tcp_manager(m) # Ping all for m in all_managers: m.ping() - # Kill all - for m in all_managers: - m.kill() - def test_rust_build_only_f1(self): # Start the server using a custom bind IP and port - mng = og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/only_f1', - ip='0.0.0.0', - port=13757) - mng.start() + mng = self.start_tcp_manager( + og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/only_f1', + ip='0.0.0.0', + port=13757)) pong = mng.ping() # check if the server is alive self.assertEqual(1, pong["Pong"]) @@ -373,75 +442,67 @@ def test_rust_build_only_f1(self): "wrong dimension of Langrange multipliers: provided 1, expected 2", status.message) - mng.kill() - def test_rust_build_only_f2_preconditioned(self): - mng1 = og.tcp.OptimizerTcpManager( - RustBuildTestCase.TEST_DIR + '/only_f2') - mng2 = og.tcp.OptimizerTcpManager( - RustBuildTestCase.TEST_DIR + '/only_f2_precond') - mng1.start() - mng2.start() - - try: - response1 = mng1.call(p=[0.5, 8.5], initial_guess=[ - 1, 2, 3, 4, 0]).get() - response2 = mng2.call(p=[0.5, 8.5], initial_guess=[ - 1, 2, 3, 4, 0]).get() - - self.assertEqual("Converged", response1.exit_status) - self.assertEqual("Converged", response2.exit_status) - - # Further testing - slv_cfg = RustBuildTestCase.solverConfig() - # check that the solution is (near-) feasible - self.assertTrue(response1.f2_norm < slv_cfg.constraints_tolerance) - self.assertTrue(response2.f2_norm < slv_cfg.constraints_tolerance) - # check the nrom of the FPR - self.assertTrue(response1.last_problem_norm_fpr < - slv_cfg.tolerance) - self.assertTrue(response2.last_problem_norm_fpr < - slv_cfg.tolerance) - # compare the costs - self.assertAlmostEqual(response1.cost, response2.cost, 4) - - x1, x2 = response1.solution, response2.solution - for i in range(len(x1)): - self.assertAlmostEqual(x1[i], x2[i], delta=5e-4) - - response = mng1.call(p=[2.0, 10.0, 50.0]) - self.assertFalse(response.is_ok()) - status = response.get() - self.assertEqual(True, isinstance(status, og.tcp.SolverError)) - self.assertEqual(3003, status.code) - self.assertEqual( - "wrong number of parameters: provided 3, expected 2", - status.message) - - response = mng1.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2]) - self.assertFalse(response.is_ok()) - status = response.get() - self.assertEqual(True, isinstance(status, og.tcp.SolverError)) - self.assertEqual(1600, status.code) - self.assertEqual( - "initial guess has incompatible dimensions: provided 2, expected 5", - status.message) - - response = mng1.call(p=[2.0, 10.0], initial_y=[0.1]) - self.assertFalse(response.is_ok()) - status = response.get() - self.assertEqual(True, isinstance(status, og.tcp.SolverError)) - self.assertEqual(1700, status.code) - self.assertEqual( - "wrong dimension of Langrange multipliers: provided 1, expected 0", - status.message) - finally: - mng1.kill() - mng2.kill() + mng1 = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/only_f2')) + mng2 = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/only_f2_precond')) + + response1 = mng1.call(p=[0.5, 8.5], initial_guess=[ + 1, 2, 3, 4, 0]).get() + response2 = mng2.call(p=[0.5, 8.5], initial_guess=[ + 1, 2, 3, 4, 0]).get() + + self.assertEqual("Converged", response1.exit_status) + self.assertEqual("Converged", response2.exit_status) + + # Further testing + slv_cfg = RustBuildTestCase.solverConfig() + # check that the solution is (near-) feasible + self.assertTrue(response1.f2_norm < slv_cfg.constraints_tolerance) + self.assertTrue(response2.f2_norm < slv_cfg.constraints_tolerance) + # check the nrom of the FPR + self.assertTrue(response1.last_problem_norm_fpr < + slv_cfg.tolerance) + self.assertTrue(response2.last_problem_norm_fpr < + slv_cfg.tolerance) + # compare the costs + self.assertAlmostEqual(response1.cost, response2.cost, 4) + + x1, x2 = response1.solution, response2.solution + for i in range(len(x1)): + self.assertAlmostEqual(x1[i], x2[i], delta=5e-4) + + response = mng1.call(p=[2.0, 10.0, 50.0]) + self.assertFalse(response.is_ok()) + status = response.get() + self.assertEqual(True, isinstance(status, og.tcp.SolverError)) + self.assertEqual(3003, status.code) + self.assertEqual( + "wrong number of parameters: provided 3, expected 2", + status.message) + + response = mng1.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2]) + self.assertFalse(response.is_ok()) + status = response.get() + self.assertEqual(True, isinstance(status, og.tcp.SolverError)) + self.assertEqual(1600, status.code) + self.assertEqual( + "initial guess has incompatible dimensions: provided 2, expected 5", + status.message) + + response = mng1.call(p=[2.0, 10.0], initial_y=[0.1]) + self.assertFalse(response.is_ok()) + status = response.get() + self.assertEqual(True, isinstance(status, og.tcp.SolverError)) + self.assertEqual(1700, status.code) + self.assertEqual( + "wrong dimension of Langrange multipliers: provided 1, expected 0", + status.message) def test_rust_build_plain(self): - mng = og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/plain') - mng.start() + mng = self.start_tcp_manager( + og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/plain')) pong = mng.ping() # check if the server is alive self.assertEqual(1, pong["Pong"]) @@ -451,13 +512,44 @@ def test_rust_build_plain(self): status = response.get() self.assertEqual("Converged", status.exit_status) - mng.kill() + def test_rust_build_plain_invalid_request_details(self): + self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/plain', + ip='127.0.0.1', + port=13758)) + + malformed_response = og.tcp.SolverResponse( + RustBuildTestCase.raw_tcp_request('127.0.0.1', 13758, '{"Run":')) + self.assertFalse(malformed_response.is_ok()) + malformed_status = malformed_response.get() + self.assertEqual(1000, malformed_status.code) + self.assertTrue( + malformed_status.message.startswith("invalid request:")) + self.assertIn("line 1 column", malformed_status.message) + + utf8_response = og.tcp.SolverResponse( + RustBuildTestCase.raw_tcp_request('127.0.0.1', 13758, b'\xff\xfe')) + self.assertFalse(utf8_response.is_ok()) + utf8_status = utf8_response.get() + self.assertEqual(1000, utf8_status.code) + self.assertTrue( + utf8_status.message.startswith( + "invalid request: request body is not valid UTF-8")) + + def test_rust_build_solver_error_details(self): + mng = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/solver_error')) + + response = mng.call(p=[-1.0]) + self.assertFalse(response.is_ok()) + status = response.get() + self.assertEqual(2000, status.code) + self.assertEqual("problem solution failed: Cost", status.message) def test_rust_build_parametric_f2(self): # introduced to tackle issue #123 - mng = og.tcp.OptimizerTcpManager( - RustBuildTestCase.TEST_DIR + '/parametric_f2') - mng.start() + mng = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/parametric_f2')) pong = mng.ping() # check if the server is alive self.assertEqual(1, pong["Pong"]) @@ -467,12 +559,10 @@ def test_rust_build_parametric_f2(self): status = response.get() self.assertEqual("Converged", status.exit_status) self.assertTrue(status.f2_norm < 1e-4) - mng.kill() def test_rust_build_parametric_halfspace(self): - mng = og.tcp.OptimizerTcpManager( - RustBuildTestCase.TEST_DIR + '/halfspace_optimizer') - mng.start() + mng = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/halfspace_optimizer')) pong = mng.ping() # check if the server is alive self.assertEqual(1, pong["Pong"]) @@ -488,8 +578,6 @@ def test_rust_build_parametric_halfspace(self): self.assertTrue(sum([u[i] * c[i] for i in range(5)]) - b <= eps) self.assertTrue(-sum([u[i] * c[i] for i in range(5)]) + b <= eps) - mng.kill() - @staticmethod def c_bindings_helper(optimizer_name): p = subprocess.Popen(["/usr/bin/gcc", From f6b80de5e7f9048da7bfd906fd6795cd09ee5679 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 19:39:27 +0000 Subject: [PATCH 073/133] OpEn: good error handling --- examples/panoc_ex1.rs | 8 +- .../templates/c/optimizer_cinterface.rs.jinja | 9 +- .../opengen/templates/tcp/tcp_server.rs | 2 +- open-codegen/test/test.py | 6 +- src/alm/alm_factory.rs | 8 +- src/alm/alm_optimizer.rs | 55 +++++--- src/constraints/affine_space.rs | 18 ++- src/constraints/ball1.rs | 13 +- src/constraints/ball2.rs | 4 +- src/constraints/ballinf.rs | 4 +- src/constraints/ballp.rs | 17 ++- src/constraints/cartesian_product.rs | 15 +-- src/constraints/epigraph_squared_norm.rs | 35 +++-- src/constraints/finite.rs | 4 +- src/constraints/halfspace.rs | 4 +- src/constraints/hyperplane.rs | 4 +- src/constraints/mod.rs | 4 +- src/constraints/no_constraints.rs | 5 +- src/constraints/rectangle.rs | 15 ++- src/constraints/simplex.rs | 4 +- src/constraints/soc.rs | 4 +- src/constraints/sphere2.rs | 8 +- src/constraints/tests.rs | 124 +++++++++--------- src/constraints/zero.rs | 4 +- src/core/fbs/fbs_engine.rs | 27 ++-- src/core/fbs/fbs_optimizer.rs | 4 +- src/core/fbs/tests.rs | 2 +- src/core/panoc/panoc_engine.rs | 49 +++++-- src/core/panoc/panoc_optimizer.rs | 6 +- src/lib.rs | 46 ++++++- 30 files changed, 334 insertions(+), 174 deletions(-) diff --git a/examples/panoc_ex1.rs b/examples/panoc_ex1.rs index 01f3e497..c50d3301 100644 --- a/examples/panoc_ex1.rs +++ b/examples/panoc_ex1.rs @@ -29,7 +29,9 @@ fn main() { // define the cost function and its gradient let df = |u: &[f64], grad: &mut [f64]| -> Result<(), SolverError> { if a < 0.0 || b < 0.0 { - Err(SolverError::Cost) + Err(SolverError::Cost( + "Rosenbrock parameters must be nonnegative", + )) } else { rosenbrock_grad(a, b, u, grad); Ok(()) @@ -38,7 +40,9 @@ fn main() { let f = |u: &[f64], c: &mut f64| -> Result<(), SolverError> { if a < 0.0 || b < 0.0 { - Err(SolverError::Cost) + Err(SolverError::Cost( + "Rosenbrock parameters must be nonnegative", + )) } else { *c = rosenbrock_cost(a, b, u); Ok(()) diff --git a/open-codegen/opengen/templates/c/optimizer_cinterface.rs.jinja b/open-codegen/opengen/templates/c/optimizer_cinterface.rs.jinja index 039dfe0f..c27c356e 100644 --- a/open-codegen/opengen/templates/c/optimizer_cinterface.rs.jinja +++ b/open-codegen/opengen/templates/c/optimizer_cinterface.rs.jinja @@ -179,8 +179,11 @@ pub unsafe extern "C" fn {{meta.optimizer_name|lower}}_solve( }, Err(e) => {{meta.optimizer_name}}SolverStatus { exit_status: match e { - SolverError::Cost => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedCost, - SolverError::NotFiniteComputation => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedNotFiniteComputation, + SolverError::Cost(_) + | SolverError::ProjectionFailed(_) + | SolverError::LinearAlgebraFailure(_) + | SolverError::InvalidProblemState(_) => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedCost, + SolverError::NotFiniteComputation(_) => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedNotFiniteComputation, }, num_outer_iterations: u64::MAX as c_ulong, num_inner_iterations: u64::MAX as c_ulong, @@ -209,4 +212,4 @@ pub unsafe extern "C" fn {{meta.optimizer_name|lower}}_free(instance: *mut {{met assert!(!instance.is_null()); drop(Box::from_raw(instance)); } -{% endif %} \ No newline at end of file +{% endif %} diff --git a/open-codegen/opengen/templates/tcp/tcp_server.rs b/open-codegen/opengen/templates/tcp/tcp_server.rs index 0200e3df..14823532 100644 --- a/open-codegen/opengen/templates/tcp/tcp_server.rs +++ b/open-codegen/opengen/templates/tcp/tcp_server.rs @@ -210,7 +210,7 @@ fn execution_handler( return_solution_to_client(ok_status, u, stream); } Err(err) => { - let error_message = format!("problem solution failed: {:?}", err); + let error_message = format!("problem solution failed: {}", err); write_error_message(stream, 2000, &error_message); } } diff --git a/open-codegen/test/test.py b/open-codegen/test/test.py index 852b49f6..3f69413f 100644 --- a/open-codegen/test/test.py +++ b/open-codegen/test/test.py @@ -267,7 +267,7 @@ def setUpSolverError(cls): anchor + '\n' ' if p[0] < 0.0 {\n' - ' return Err(SolverError::Cost);\n' + ' return Err(SolverError::Cost("forced solver error for TCP test"));\n' ' }\n' ) if anchor not in solver_lib: @@ -544,7 +544,9 @@ def test_rust_build_solver_error_details(self): self.assertFalse(response.is_ok()) status = response.get() self.assertEqual(2000, status.code) - self.assertEqual("problem solution failed: Cost", status.message) + self.assertEqual( + "problem solution failed: cost or gradient evaluation failed: forced solver error for TCP test", + status.message) def test_rust_build_parametric_f2(self): # introduced to tackle issue #123 diff --git a/src/alm/alm_factory.rs b/src/alm/alm_factory.rs index 21776453..b94cd104 100644 --- a/src/alm/alm_factory.rs +++ b/src/alm/alm_factory.rs @@ -233,7 +233,7 @@ where .zip(y_lagrange_mult.iter()) .for_each(|(ti, yi)| *ti += yi / f64::max(penalty_parameter, 1.0)); s.copy_from_slice(&f1_u_plus_y_over_c); - set_c.project(&mut s); + set_c.project(&mut s)?; *cost += 0.5 * penalty_parameter * matrix_operations::norm2_squared_diff(&f1_u_plus_y_over_c, &s); @@ -296,7 +296,7 @@ where .zip(y_lagrange_mult.iter()) .for_each(|(ti, yi)| *ti += yi / c_penalty_parameter); s_aux_var.copy_from_slice(&f1_u_plus_y_over_c); // s = t - set_c.project(&mut s_aux_var); // s = Proj_C(F1(u) + y/c) + set_c.project(&mut s_aux_var)?; // s = Proj_C(F1(u) + y/c) // t = F1(u) + y/c - Proj_C(F1(u) + y/c) f1_u_plus_y_over_c @@ -412,7 +412,9 @@ mod tests { let f2 = mapping_f2; let jac_f2_tr = |_u: &[f64], _d: &[f64], _res: &mut [f64]| -> Result<(), crate::SolverError> { - Err(SolverError::NotFiniteComputation) + Err(SolverError::NotFiniteComputation( + "mock Jacobian-transpose product returned a non-finite result", + )) }; let factory = AlmFactory::new( mocks::f0, diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index c7d9ec08..c7a99281 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -650,7 +650,7 @@ where .for_each(|((y_plus_i, y_i), w_alm_aux_i)| *y_plus_i = w_alm_aux_i + y_i / c); // Step #3: y_plus := Proj_C(y_plus) - alm_set_c.project(y_plus); + alm_set_c.project(y_plus)?; // Step #4 y_plus @@ -667,7 +667,7 @@ where } /// Project y on set Y - fn project_on_set_y(&mut self) { + fn project_on_set_y(&mut self) -> FunctionCallResult { let problem = &self.alm_problem; if let Some(y_set) = &problem.alm_set_y { // NOTE: as_mut() converts from &mut Option to Option<&mut T> @@ -676,9 +676,10 @@ where // * which can be treated as Option<&mut [f64]> // * y_vec is &mut [f64] if let Some(xi_vec) = self.alm_cache.xi.as_mut() { - y_set.project(&mut xi_vec[1..]); + y_set.project(&mut xi_vec[1..])?; } } + Ok(()) } /// Solve inner problem @@ -740,7 +741,7 @@ where inner_solver.solve(u) } - fn is_exit_criterion_satisfied(&self) -> bool { + fn is_exit_criterion_satisfied(&self) -> Result { let cache = &self.alm_cache; let problem = &self.alm_problem; // Criterion 1: ||Delta y|| <= c * delta @@ -763,9 +764,14 @@ where // This function will panic is there is no akkt_tolerance // This should never happen because we set the AKKT tolerance // in the constructor and can never become `None` again - let criterion_3 = - cache.panoc_cache.akkt_tolerance.unwrap() <= self.epsilon_tolerance + SMALL_EPSILON; - criterion_1 && criterion_2 && criterion_3 + let criterion_3 = cache + .panoc_cache + .akkt_tolerance + .ok_or(SolverError::InvalidProblemState( + "missing inner AKKT tolerance while checking the exit criterion", + ))? + <= self.epsilon_tolerance + SMALL_EPSILON; + Ok(criterion_1 && criterion_2 && criterion_3) } /// Whether the penalty parameter should not be updated @@ -802,13 +808,20 @@ where } } - fn update_inner_akkt_tolerance(&mut self) { + fn update_inner_akkt_tolerance(&mut self) -> FunctionCallResult { let cache = &mut self.alm_cache; // epsilon_{nu+1} := max(epsilon, beta*epsilon_nu) + let akkt_tolerance = cache + .panoc_cache + .akkt_tolerance + .ok_or(SolverError::InvalidProblemState( + "missing inner AKKT tolerance while updating it", + ))?; cache.panoc_cache.set_akkt_tolerance(f64::max( - cache.panoc_cache.akkt_tolerance.unwrap() * self.epsilon_update_factor, + akkt_tolerance * self.epsilon_update_factor, self.epsilon_tolerance, )); + Ok(()) } fn final_cache_update(&mut self) { @@ -843,7 +856,7 @@ where let mut inner_exit_status: ExitStatus = ExitStatus::Converged; // Project y on Y - self.project_on_set_y(); + self.project_on_set_y()?; // If the inner problem fails miserably, the failure should be propagated // upstream (using `?`). If the inner problem has not converged, that is fine, @@ -867,7 +880,7 @@ where self.compute_alm_infeasibility()?; // ALM: ||y_plus - y|| // Check exit criterion - if self.is_exit_criterion_satisfied() { + if self.is_exit_criterion_satisfied()? { // Do not continue the outer iteration // An (epsilon, delta)-AKKT point has been found return Ok(InnerProblemStatus::new(false, inner_exit_status)); @@ -876,7 +889,7 @@ where } // Update inner problem tolerance - self.update_inner_akkt_tolerance(); + self.update_inner_akkt_tolerance()?; // conclusive step: updated iteration count, resets PANOC cache, // sets f2_norm = f2_norm_plus etc @@ -983,7 +996,9 @@ where self.alm_cache .y_plus .as_ref() - .expect("Although n1 > 0, there is no vector y (Lagrange multipliers)"), + .ok_or(SolverError::InvalidProblemState( + "missing Lagrange multipliers at the ALM solution", + ))?, ); Ok(status) } else { @@ -1129,7 +1144,7 @@ mod tests { .with_initial_penalty(25.0) .with_initial_lagrange_multipliers(&[2., 3., 4., 10.]); - alm_optimizer.project_on_set_y(); + alm_optimizer.project_on_set_y().unwrap(); if let Some(xi_after_proj) = &alm_optimizer.alm_cache.xi { println!("xi = {:#?}", xi_after_proj); let y_projected_correct = [ @@ -1282,7 +1297,7 @@ mod tests { .with_initial_inner_tolerance(1e-1) .with_inner_tolerance_update_factor(0.2); - alm_optimizer.update_inner_akkt_tolerance(); + alm_optimizer.update_inner_akkt_tolerance().unwrap(); unit_test_utils::assert_nearly_equal( 0.1, @@ -1305,7 +1320,7 @@ mod tests { ); for _i in 1..=5 { - alm_optimizer.update_inner_akkt_tolerance(); + alm_optimizer.update_inner_akkt_tolerance().unwrap(); } unit_test_utils::assert_nearly_equal( 2e-5, @@ -1411,20 +1426,20 @@ mod tests { // should not exit yet... assert!( - !alm_optimizer.is_exit_criterion_satisfied(), + !alm_optimizer.is_exit_criterion_satisfied().unwrap(), "exists right away" ); let alm_optimizer = alm_optimizer .with_initial_inner_tolerance(1e-3) .with_epsilon_tolerance(1e-3); - assert!(!alm_optimizer.is_exit_criterion_satisfied()); + assert!(!alm_optimizer.is_exit_criterion_satisfied().unwrap()); alm_optimizer.alm_cache.delta_y_norm_plus = 1e-3; - assert!(!alm_optimizer.is_exit_criterion_satisfied()); + assert!(!alm_optimizer.is_exit_criterion_satisfied().unwrap()); alm_optimizer.alm_cache.f2_norm_plus = 1e-3; - assert!(alm_optimizer.is_exit_criterion_satisfied()); + assert!(alm_optimizer.is_exit_criterion_satisfied().unwrap()); } #[test] diff --git a/src/constraints/affine_space.rs b/src/constraints/affine_space.rs index 2f25ceba..7ca78608 100644 --- a/src/constraints/affine_space.rs +++ b/src/constraints/affine_space.rs @@ -1,6 +1,5 @@ use super::Constraint; -use crate::matrix_operations; -use crate::CholeskyFactorizer; +use crate::{matrix_operations, CholeskyFactorizer, FunctionCallResult, SolverError}; use ndarray::{ArrayView1, ArrayView2}; @@ -82,26 +81,33 @@ impl Constraint for AffineSpace { /// ``` /// /// The result is stored in `x` and it can be verified that $Ax = b$. - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { let n = self.n_cols; assert!(x.len() == n, "x has wrong dimension"); // Step 1: Compute e = Ax - b let a = ArrayView2::from_shape((self.n_rows, self.n_cols), &self.a_mat) - .expect("invalid A shape"); + .map_err(|_| { + SolverError::InvalidProblemState( + "failed to construct the affine-space matrix view", + ) + })?; let x_view = ArrayView1::from(&x[..]); let b = ArrayView1::from(&self.b_vec[..]); let e = a.dot(&x_view) - b; - let e_slice: &[f64] = e.as_slice().unwrap(); + let e_slice: &[f64] = e.as_slice().ok_or(SolverError::InvalidProblemState( + "affine-space residual vector is not stored contiguously", + ))?; // Step 2: Solve AA' z = e and compute z - let z = self.factorizer.solve(e_slice).unwrap(); + let z = self.factorizer.solve(e_slice)?; // Step 3: Compute x = x - A'z let at_z = a.t().dot(&ArrayView1::from(&z[..])); for (xi, corr) in x.iter_mut().zip(at_z.iter()) { *xi -= *corr; } + Ok(()) } /// Affine sets are convex. diff --git a/src/constraints/ball1.rs b/src/constraints/ball1.rs index eb1ee77f..3a4fd921 100644 --- a/src/constraints/ball1.rs +++ b/src/constraints/ball1.rs @@ -1,5 +1,6 @@ use super::Constraint; use super::Simplex; +use crate::FunctionCallResult; #[derive(Copy, Clone)] /// A norm-1 ball, that is, a set given by $B_1^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert_1 \leq r\\}$ @@ -23,7 +24,7 @@ impl<'a> Ball1<'a> { } } - fn project_on_ball1_centered_at_origin(&self, x: &mut [f64]) { + fn project_on_ball1_centered_at_origin(&self, x: &mut [f64]) -> FunctionCallResult { if crate::matrix_operations::norm1(x) > self.radius { // u = |x| (copied) let mut u = vec![0.0; x.len()]; @@ -31,16 +32,17 @@ impl<'a> Ball1<'a> { .zip(x.iter()) .for_each(|(ui, &xi)| *ui = f64::abs(xi)); // u = P_simplex(u) - self.simplex.project(&mut u); + self.simplex.project(&mut u)?; x.iter_mut() .zip(u.iter()) .for_each(|(xi, &ui)| *xi = f64::signum(*xi) * ui); } + Ok(()) } } impl<'a> Constraint for Ball1<'a> { - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { if let Some(center) = &self.center { assert_eq!( x.len(), @@ -50,13 +52,14 @@ impl<'a> Constraint for Ball1<'a> { x.iter_mut() .zip(center.iter()) .for_each(|(xi, &ci)| *xi -= ci); - self.project_on_ball1_centered_at_origin(x); + self.project_on_ball1_centered_at_origin(x)?; x.iter_mut() .zip(center.iter()) .for_each(|(xi, &ci)| *xi += ci); } else { - self.project_on_ball1_centered_at_origin(x); + self.project_on_ball1_centered_at_origin(x)?; } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/ball2.rs b/src/constraints/ball2.rs index c4475cde..1cbd8e8d 100644 --- a/src/constraints/ball2.rs +++ b/src/constraints/ball2.rs @@ -1,4 +1,5 @@ use super::Constraint; +use crate::FunctionCallResult; #[derive(Copy, Clone)] /// A Euclidean ball, that is, a set given by $B_2^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert \leq r\\}$ @@ -19,7 +20,7 @@ impl<'a> Ball2<'a> { } impl<'a> Constraint for Ball2<'a> { - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { if let Some(center) = &self.center { assert_eq!( x.len(), @@ -46,6 +47,7 @@ impl<'a> Constraint for Ball2<'a> { x.iter_mut().for_each(|x_| *x_ /= norm_over_radius); } } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/ballinf.rs b/src/constraints/ballinf.rs index 8b87c688..ddae792d 100644 --- a/src/constraints/ballinf.rs +++ b/src/constraints/ballinf.rs @@ -1,4 +1,5 @@ use super::Constraint; +use crate::FunctionCallResult; #[derive(Copy, Clone)] /// An infinity ball defined as $B_\infty^r = \\{x\in\mathbb{R}^n {}:{} \Vert{}x{}\Vert_{\infty} \leq r\\}$, @@ -42,7 +43,7 @@ impl<'a> Constraint for BallInf<'a> { /// /// for all $i=1,\ldots, n$. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { if let Some(center) = &self.center { assert_eq!( x.len(), @@ -58,6 +59,7 @@ impl<'a> Constraint for BallInf<'a> { .filter(|xi| xi.abs() > self.radius) .for_each(|xi| *xi = xi.signum() * self.radius); } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/ballp.rs b/src/constraints/ballp.rs index 3a0d893b..7abbb22f 100644 --- a/src/constraints/ballp.rs +++ b/src/constraints/ballp.rs @@ -1,4 +1,5 @@ use super::Constraint; +use crate::{FunctionCallResult, SolverError}; #[derive(Copy, Clone)] /// An $\\ell_p$ ball, that is, @@ -157,7 +158,7 @@ impl<'a> BallP<'a> { .powf(1.0 / self.p) } - fn project_lp_ball(&self, x: &mut [f64]) { + fn project_lp_ball(&self, x: &mut [f64]) -> FunctionCallResult { let p = self.p; let r = self.radius; let tol = self.tolerance; @@ -165,7 +166,7 @@ impl<'a> BallP<'a> { let current_norm = self.lp_norm(x); if current_norm <= r { - return; + return Ok(()); } let abs_x: Vec = x.iter().map(|xi| xi.abs()).collect(); @@ -188,7 +189,9 @@ impl<'a> BallP<'a> { while radius_error(lambda_hi) > 0.0 { lambda_hi *= 2.0; if lambda_hi > 1e20 { - panic!("Failed to bracket the Lagrange multiplier"); + return Err(SolverError::ProjectionFailed( + "failed to bracket the Lagrange multiplier", + )); } } @@ -215,6 +218,7 @@ impl<'a> BallP<'a> { let u = Self::solve_coordinate_newton(a, lambda_star, p, tol, max_iter); *xi = xi.signum() * u; }); + Ok(()) } /// Solve for u >= 0 the equation u + lambda * p * u^(p-1) = a @@ -271,7 +275,7 @@ impl<'a> BallP<'a> { } impl<'a> Constraint for BallP<'a> { - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { if let Some(center) = &self.center { assert_eq!( x.len(), @@ -285,14 +289,15 @@ impl<'a> Constraint for BallP<'a> { .zip(x.iter().zip(center.iter())) .for_each(|(s, (xi, ci))| *s = *xi - *ci); - self.project_lp_ball(&mut shifted); + self.project_lp_ball(&mut shifted)?; x.iter_mut() .zip(shifted.iter().zip(center.iter())) .for_each(|(xi, (si, ci))| *xi = *ci + *si); } else { - self.project_lp_ball(x); + self.project_lp_ball(x)?; } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/cartesian_product.rs b/src/constraints/cartesian_product.rs index edf15b5f..83d2eacc 100644 --- a/src/constraints/cartesian_product.rs +++ b/src/constraints/cartesian_product.rs @@ -1,4 +1,5 @@ use super::Constraint; +use crate::FunctionCallResult; /// Cartesian product of constraints. /// @@ -143,16 +144,14 @@ impl<'a> Constraint for CartesianProduct<'a> { /// /// The method will panic if the dimension of `x` is not equal to the /// dimension of the Cartesian product (see `dimension()`) - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { assert!(x.len() == self.dimension(), "x has wrong size"); let mut j = 0; - self.idx - .iter() - .zip(self.constraints.iter()) - .for_each(|(&i, c)| { - c.project(&mut x[j..i]); - j = i; - }); + for (&i, c) in self.idx.iter().zip(self.constraints.iter()) { + c.project(&mut x[j..i])?; + j = i; + } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/epigraph_squared_norm.rs b/src/constraints/epigraph_squared_norm.rs index f91617c8..a0032654 100644 --- a/src/constraints/epigraph_squared_norm.rs +++ b/src/constraints/epigraph_squared_norm.rs @@ -1,4 +1,4 @@ -use crate::matrix_operations; +use crate::{matrix_operations, FunctionCallResult, SolverError}; use super::Constraint; @@ -46,7 +46,10 @@ impl Constraint for EpigraphSquaredNorm { /// /// Panics if: /// - /// - `x.len() < 2`, + /// - `x.len() < 2`. + /// + /// Returns an error if: + /// /// - no admissible real root is found, /// - the Newton derivative becomes too small, /// - the final scaling factor is numerically singular. @@ -63,7 +66,7 @@ impl Constraint for EpigraphSquaredNorm { /// /// epi.project(&mut x); /// ``` - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { assert!( x.len() >= 2, "EpigraphSquaredNorm::project requires x.len() >= 2" @@ -76,7 +79,7 @@ impl Constraint for EpigraphSquaredNorm { // Already feasible if norm_z_sq <= t { - return; + return Ok(()); } // Cubic: @@ -106,8 +109,9 @@ impl Constraint for EpigraphSquaredNorm { } } - let mut zsol = - right_root.expect("EpigraphSquaredNorm::project: no admissible real root found"); + let mut zsol = right_root.ok_or(SolverError::ProjectionFailed( + "no admissible real root found for the cubic projection equation", + ))?; // Newton refinement let newton_max_iters: usize = 5; @@ -123,10 +127,11 @@ impl Constraint for EpigraphSquaredNorm { } let dp_z = 3.0 * a3 * zsol_sq + 2.0 * a2 * zsol + a1; - assert!( - dp_z.abs() > 1e-15, - "EpigraphSquaredNorm::project: Newton derivative too small" - ); + if dp_z.abs() <= 1e-15 { + return Err(SolverError::ProjectionFailed( + "Newton refinement derivative is too small", + )); + } zsol -= p_z / dp_z; } @@ -134,16 +139,18 @@ impl Constraint for EpigraphSquaredNorm { let right_root = zsol; let scaling = 1.0 + 2.0 * (right_root - t); - assert!( - scaling.abs() > 1e-15, - "EpigraphSquaredNorm::project: scaling factor too small" - ); + if scaling.abs() <= 1e-15 { + return Err(SolverError::ProjectionFailed( + "projection scaling factor is numerically singular", + )); + } // Projection for xi in x.iter_mut().take(nx) { *xi /= scaling; } x[nx] = right_root; + Ok(()) } /// This is a convex set, so this function returns `true`. diff --git a/src/constraints/finite.rs b/src/constraints/finite.rs index 3f9393cd..fd0dc9f7 100644 --- a/src/constraints/finite.rs +++ b/src/constraints/finite.rs @@ -1,4 +1,5 @@ use super::Constraint; +use crate::FunctionCallResult; /// /// A finite set, $X = \\{x_1, x_2, \ldots, x_n\\}\subseteq\mathbb{R}^n$, given vectors @@ -85,7 +86,7 @@ impl<'a> Constraint for FiniteSet<'a> { /// This method panics if the dimension of `x` is not equal to the /// dimension of the points in the finite set. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { assert_eq!(x.len(), self.data[0].len(), "x has incompatible dimension"); let mut idx: usize = 0; let mut best_distance: f64 = num::Float::infinity(); @@ -97,6 +98,7 @@ impl<'a> Constraint for FiniteSet<'a> { } } x.copy_from_slice(self.data[idx]); + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/halfspace.rs b/src/constraints/halfspace.rs index 5442ca54..069b38c6 100644 --- a/src/constraints/halfspace.rs +++ b/src/constraints/halfspace.rs @@ -1,5 +1,6 @@ use super::Constraint; use crate::matrix_operations; +use crate::FunctionCallResult; #[derive(Clone)] /// A halfspace is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle \leq b\\}$. @@ -79,7 +80,7 @@ impl<'a> Constraint for Halfspace<'a> { /// This method panics if the length of `x` is not equal to the dimension /// of the halfspace. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { let inner_product = matrix_operations::inner_product(x, self.normal_vector); if inner_product > self.offset { let factor = (inner_product - self.offset) / self.normal_vector_squared_norm; @@ -87,6 +88,7 @@ impl<'a> Constraint for Halfspace<'a> { .zip(self.normal_vector.iter()) .for_each(|(x, normal_vector_i)| *x -= factor * normal_vector_i); } + Ok(()) } /// Halfspaces are convex sets diff --git a/src/constraints/hyperplane.rs b/src/constraints/hyperplane.rs index 886fd494..01362f11 100644 --- a/src/constraints/hyperplane.rs +++ b/src/constraints/hyperplane.rs @@ -1,5 +1,6 @@ use super::Constraint; use crate::matrix_operations; +use crate::FunctionCallResult; #[derive(Clone)] /// A hyperplane is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle = b\\}$. @@ -79,13 +80,14 @@ impl<'a> Constraint for Hyperplane<'a> { /// This method panics if the length of `x` is not equal to the dimension /// of the hyperplane. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { assert_eq!(x.len(), self.normal_vector.len(), "x has wrong dimension"); let inner_product = matrix_operations::inner_product(x, self.normal_vector); let factor = (inner_product - self.offset) / self.normal_vector_squared_norm; x.iter_mut() .zip(self.normal_vector.iter()) .for_each(|(x, nrm_vct)| *x -= factor * nrm_vct); + Ok(()) } /// Hyperplanes are convex sets diff --git a/src/constraints/mod.rs b/src/constraints/mod.rs index d4ce2462..a4cf3c22 100644 --- a/src/constraints/mod.rs +++ b/src/constraints/mod.rs @@ -8,6 +8,8 @@ //! //! [`Constraint`]: trait.Constraint.html +use crate::FunctionCallResult; + mod affine_space; mod ball1; mod ball2; @@ -57,7 +59,7 @@ pub trait Constraint { /// /// - `x`: The given vector $x$ is updated with the projection on the set /// - fn project(&self, x: &mut [f64]); + fn project(&self, x: &mut [f64]) -> FunctionCallResult; /// Returns true if and only if the set is convex fn is_convex(&self) -> bool; diff --git a/src/constraints/no_constraints.rs b/src/constraints/no_constraints.rs index 88df8845..cebb0057 100644 --- a/src/constraints/no_constraints.rs +++ b/src/constraints/no_constraints.rs @@ -1,4 +1,5 @@ use super::Constraint; +use crate::FunctionCallResult; /// The whole space, no constraints #[derive(Default, Clone, Copy)] @@ -13,7 +14,9 @@ impl NoConstraints { } impl Constraint for NoConstraints { - fn project(&self, _x: &mut [f64]) {} + fn project(&self, _x: &mut [f64]) -> FunctionCallResult { + Ok(()) + } fn is_convex(&self) -> bool { true diff --git a/src/constraints/rectangle.rs b/src/constraints/rectangle.rs index 76b52ceb..a4511b59 100644 --- a/src/constraints/rectangle.rs +++ b/src/constraints/rectangle.rs @@ -1,4 +1,5 @@ use super::Constraint; +use crate::FunctionCallResult; #[derive(Clone, Copy)] /// @@ -36,10 +37,13 @@ impl<'a> Rectangle<'a> { /// pub fn new(xmin: Option<&'a [f64]>, xmax: Option<&'a [f64]>) -> Self { assert!(xmin.is_some() || xmax.is_some()); // xmin or xmax must be Some - assert!( - xmin.is_none() || xmax.is_none() || xmin.unwrap().len() == xmax.unwrap().len(), - "incompatible dimensions of xmin and xmax" - ); + if let (Some(xmin), Some(xmax)) = (xmin, xmax) { + assert_eq!( + xmin.len(), + xmax.len(), + "incompatible dimensions of xmin and xmax" + ); + } if let (Some(xmin), Some(xmax)) = (xmin, xmax) { assert!( xmin.iter() @@ -54,7 +58,7 @@ impl<'a> Rectangle<'a> { } impl<'a> Constraint for Rectangle<'a> { - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { if let Some(xmin) = &self.xmin { assert_eq!( x.len(), @@ -80,6 +84,7 @@ impl<'a> Constraint for Rectangle<'a> { }; }); } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/simplex.rs b/src/constraints/simplex.rs index 061b390e..2963cbc4 100644 --- a/src/constraints/simplex.rs +++ b/src/constraints/simplex.rs @@ -1,4 +1,5 @@ use super::Constraint; +use crate::FunctionCallResult; #[derive(Copy, Clone)] /// A simplex with level $\alpha$ is a set of the form @@ -24,7 +25,7 @@ impl Constraint for Simplex { /// See: Laurent Condat. Fast Projection onto the Simplex and the $\ell_1$ Ball. /// Mathematical Programming, Series A, Springer, 2016, 158 (1), pp.575-585. /// ⟨10.1007/s10107-015-0946-6⟩. - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { assert!(!x.is_empty(), "x must be nonempty"); let a = &self.alpha; @@ -82,6 +83,7 @@ impl Constraint for Simplex { // ---- step 6 let zero: f64 = 0.0; x.iter_mut().for_each(|x_n| *x_n = zero.max(*x_n - rho)); + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/soc.rs b/src/constraints/soc.rs index 8ff0759b..91fce945 100644 --- a/src/constraints/soc.rs +++ b/src/constraints/soc.rs @@ -1,5 +1,6 @@ use super::Constraint; use crate::matrix_operations; +use crate::FunctionCallResult; #[derive(Clone, Copy)] /// @@ -56,7 +57,7 @@ impl Constraint for SecondOrderCone { /// /// This method panics if the length of `x` is less than 2. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { // x = (z, r) let n = x.len(); assert!(n >= 2, "x must be of dimension at least 2"); @@ -72,6 +73,7 @@ impl Constraint for SecondOrderCone { .for_each(|v| *v *= self.alpha * beta / norm_z); x[n - 1] = beta; } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/sphere2.rs b/src/constraints/sphere2.rs index 86433855..8cb38695 100644 --- a/src/constraints/sphere2.rs +++ b/src/constraints/sphere2.rs @@ -1,4 +1,5 @@ use super::Constraint; +use crate::FunctionCallResult; #[derive(Copy, Clone)] /// A Euclidean sphere, that is, a set given by $S_2^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert = r\\}$ @@ -38,7 +39,7 @@ impl<'a> Constraint for Sphere2<'a> { /// Panics if `x` is empty or, when a center is provided, if `x` and /// `center` have incompatible dimensions. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { let epsilon = 1e-12; assert!(!x.is_empty(), "x must be nonempty"); if let Some(center) = &self.center { @@ -51,7 +52,7 @@ impl<'a> Constraint for Sphere2<'a> { if norm_difference <= epsilon { x.copy_from_slice(center); x[0] += self.radius; - return; + return Ok(()); } x.iter_mut().zip(center.iter()).for_each(|(x, c)| { *x = *c + self.radius * (*x - *c) / norm_difference; @@ -60,11 +61,12 @@ impl<'a> Constraint for Sphere2<'a> { let norm_x = crate::matrix_operations::norm2(x); if norm_x <= epsilon { x[0] += self.radius; - return; + return Ok(()); } let norm_over_radius = self.radius / norm_x; x.iter_mut().for_each(|x_| *x_ *= norm_over_radius); } + Ok(()) } /// Returns false (the sphere is not a convex set) diff --git a/src/constraints/tests.rs b/src/constraints/tests.rs index 58f88f25..a6734b8b 100644 --- a/src/constraints/tests.rs +++ b/src/constraints/tests.rs @@ -10,7 +10,7 @@ fn t_zero_set() { let zero = Zero::new(); let mut x = [1.0, 2.0, 3.0]; let x_projection = [0.0; 3]; - zero.project(&mut x); + zero.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x_projection, &x, @@ -31,7 +31,7 @@ fn t_hyperplane() { 0.285_714_285_714_286, 0.928_571_428_571_429, ]; - hyperplane.project(&mut x); + hyperplane.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x, &x_proj_expected, @@ -54,7 +54,7 @@ fn t_hyperplane_wrong_dimension() { let normal_vector = [1.0, 2.0, 3.0]; let hyperplane = Hyperplane::new(&normal_vector, 1.0); let mut x = [1.0, 2.0]; - hyperplane.project(&mut x); + hyperplane.project(&mut x).unwrap(); } #[test] @@ -64,7 +64,7 @@ fn t_halfspace_project_inside() { let halfspace = Halfspace::new(&normal_vector, offset); let mut x = [-1., 3.]; let x_expected = [-1., 3.]; - halfspace.project(&mut x); + halfspace.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x, &x_expected, @@ -81,7 +81,7 @@ fn t_halfspace_project_outside() { let halfspace = Halfspace::new(&normal_vector, offset); let mut x = [-1., 3.]; let x_expected = [-1.8, 1.4]; - halfspace.project(&mut x); + halfspace.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x, &x_expected, @@ -112,7 +112,7 @@ fn t_finite_set() { let data: &[&[f64]] = &[&[0.0, 0.0], &[1.0, 1.0], &[0.0, 1.0], &[1.0, 0.0]]; let finite_set = FiniteSet::new(data); let mut x = [0.7, 0.6]; - finite_set.project(&mut x); + finite_set.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[1.0, 1.0], &x, @@ -121,7 +121,7 @@ fn t_finite_set() { "projection is wrong (should be [1,1])", ); x = [-0.1, 0.2]; - finite_set.project(&mut x); + finite_set.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[0.0, 0.0], &x, @@ -130,7 +130,7 @@ fn t_finite_set() { "projection is wrong (should be [0,0])", ); x = [0.48, 0.501]; - finite_set.project(&mut x); + finite_set.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[0.0, 1.0], &x, @@ -139,7 +139,7 @@ fn t_finite_set() { "projection is wrong (should be [0,1])", ); x = [0.7, 0.2]; - finite_set.project(&mut x); + finite_set.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[1.0, 0.0], &x, @@ -155,7 +155,7 @@ fn t_finite_set_project_wrong_dimension() { let data: &[&[f64]] = &[&[0.0, 0.0], &[1.0, 1.0]]; let finite_set = FiniteSet::new(data); let mut x = [0.5, 0.5, 0.5]; - finite_set.project(&mut x); + finite_set.project(&mut x).unwrap(); } #[test] @@ -165,7 +165,7 @@ fn t_rectangle_bounded() { let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); let mut x = [1.0, 2.0, 3.0, 4.0, 5.0]; - rectangle.project(&mut x); + rectangle.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[2.0, 2.0, 3.0, 4.0, 4.5], @@ -183,7 +183,7 @@ fn t_rectangle_infinite_bounds() { let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); let mut x = [-2.0, 3.0, 1.0]; - rectangle.project(&mut x); + rectangle.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[-1.0, 3.0, 1.0], @@ -217,7 +217,7 @@ fn t_rectangle_bounded_negative_entries() { let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); let mut x = [-6.0, -3.0, 0.0, 3.0, -5.0, 1.0, 2.0, 3.0, -1.0, 0.0, 0.0]; - rectangle.project(&mut x); + rectangle.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[-5.0, -3.0, -1.0, 2.0, -1.0, 0.0, 2.0, 3.0, 3.0, 4.0, 5.0], @@ -234,7 +234,7 @@ fn t_rectangle_only_xmin() { let rectangle = Rectangle::new(Some(&xmin[..]), None); let mut x = [1.0, 2.0, 3.0, 4.0, 5.0]; - rectangle.project(&mut x); + rectangle.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[2.0, 2.0, 3.0, 4.0, 5.0], @@ -251,7 +251,7 @@ fn t_rectangle_only_xmax() { let rectangle = Rectangle::new(None, Some(&xmax[..])); let mut x = [-10.0, -20.0, 0.0, 5.0, 3.0]; - rectangle.project(&mut x); + rectangle.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[-10.0, -20.0, -3.0, -3.0, -3.0], @@ -268,7 +268,7 @@ fn t_ball2_at_origin() { let mut x = [1.0, 1.0]; let ball = Ball2::new(None, radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[ @@ -287,7 +287,7 @@ fn t_ball2_at_origin_different_radius_outside() { let radius = 0.8; let mut x = [1.0, 1.0]; let ball = Ball2::new(None, radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); let norm_proj_x = crate::matrix_operations::norm2(&x); unit_test_utils::assert_nearly_equal(radius, norm_proj_x, 1e-10, 1e-12, "wrong norm"); } @@ -297,7 +297,7 @@ fn t_ball2_at_origin_different_radius_inside() { let radius = 0.8; let mut x = [-0.2, 0.15]; let ball = Ball2::new(None, radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&x, &[-0.2, 0.15], 1e-10, 1e-12, "wrong"); } @@ -307,7 +307,7 @@ fn t_ball2_at_center_different_radius_outside() { let mut x = [1.0, 1.0]; let center = [-0.8, -1.1]; let ball = Ball2::new(Some(¢er), radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); let norm_x_minus_c = crate::matrix_operations::norm2_squared_diff(&x, ¢er).sqrt(); unit_test_utils::assert_nearly_equal(radius, norm_x_minus_c, 1e-10, 1e-12, "wrong norm"); } @@ -318,7 +318,7 @@ fn t_ball2_at_center_different_radius_inside() { let mut x = [-0.9, -0.85]; let center = [-0.8, -1.1]; let ball = Ball2::new(Some(¢er), radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[-0.9, -0.85], &x, 1e-10, 1e-12, "wrong result"); } @@ -329,7 +329,7 @@ fn t_ball2_elsewhere() { let mut x = [2.0, 2.0]; let ball = Ball2::new(Some(¢er[..]), radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); let expected_proj_element = std::f64::consts::FRAC_1_SQRT_2 + 1.; unit_test_utils::assert_nearly_equal_array( @@ -346,7 +346,7 @@ fn t_no_constraints() { let mut x = [1.0, 2.0, 3.0]; let whole_space = NoConstraints::new(); - whole_space.project(&mut x); + whole_space.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[1., 2., 3.], &x, 1e-10, 1e-15, "x is wrong"); } @@ -370,7 +370,7 @@ fn t_cartesian_product_constraints_wrong_vector_dim() { .add_constraint(3, ball1) .add_constraint(10, ball2); let mut x = [0.0; 30]; - cart_prod.project(&mut x); + cart_prod.project(&mut x).unwrap(); } #[test] @@ -385,7 +385,7 @@ fn t_cartesian_product_constraints() { .add_constraint(idx1, ball1) .add_constraint(idx2, ball2); let mut x = [3.0, 4.0, 5.0, 2.0, 1.0]; - cart_prod.project(&mut x); + cart_prod.project(&mut x).unwrap(); let r1 = crate::matrix_operations::norm2(&x[0..idx1]); let r2 = crate::matrix_operations::norm2(&x[idx1..idx2]); unit_test_utils::assert_nearly_equal(r1, radius1, 1e-8, 1e-12, "r1 is wrong"); @@ -416,7 +416,7 @@ fn t_cartesian_product_ball_and_rectangle() { /* Projection */ let mut x = [-10.0, 0.5, 10.0, 0.01, -0.01, 0.1, 10.0, -1.0, 1.0]; - cart_prod.project(&mut x); + cart_prod.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x[0..3], @@ -443,7 +443,7 @@ fn t_second_order_cone_case_i() { let soc = SecondOrderCone::new(1.0); let mut x = vec![1.0, 1.0, 1.42]; let x_copy = x.clone(); - soc.project(&mut x); + soc.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&x, &x_copy, 1e-10, 1e-12, "x has been modified"); } @@ -452,7 +452,7 @@ fn t_second_order_cone_case_ii() { let alpha = 0.5; let soc = SecondOrderCone::new(alpha); let mut x = vec![1.0, 1.0, -0.71]; - soc.project(&mut x); + soc.project(&mut x).unwrap(); let expected = vec![0.0; 3]; unit_test_utils::assert_nearly_equal_array( &x, @@ -468,7 +468,7 @@ fn t_second_order_cone_case_iii() { let alpha = 1.5; let soc = SecondOrderCone::new(alpha); let mut x = vec![1.0, 1.0, 0.1]; - soc.project(&mut x); + soc.project(&mut x).unwrap(); // make sure the new `x` is in the cone let norm_z = crate::matrix_operations::norm2(&x[..=1]); assert!(norm_z <= alpha * x[2]); @@ -496,7 +496,7 @@ fn t_second_order_cone_short_vector() { let alpha = 1.0; let soc = SecondOrderCone::new(alpha); let mut _x = vec![1.0]; - soc.project(&mut _x); + soc.project(&mut _x).unwrap(); } #[test] @@ -516,7 +516,7 @@ fn t_cartesian_product_dimension() { // let's do a projection to make sure this works // Note: we've used the same set (finite_set), twice let mut x = [-0.5, 1.1, 0.45, 0.55, 10.0, 10.0, -500.0, 1.0, 1.0, 1.0]; - cartesian.project(&mut x); + cartesian.project(&mut x).unwrap(); println!("X = {:#?}", x); let sqrt_3_over_3 = 3.0_f64.sqrt() / 3.; unit_test_utils::assert_nearly_equal_array( @@ -552,7 +552,7 @@ fn t_cartesian_ball_no_constraint() { .add_constraint(9, no_constraints); assert_eq!(9, cartesian.dimension()); let mut x = [100., -200., 0.5, 1.5, 3.5, 1000., 5., -500., 2_000_000.]; - cartesian.project(&mut x); + cartesian.project(&mut x).unwrap(); let x_proj_ball = [0.869811089019176, 0.390566732942472, 0.911322376865767]; unit_test_utils::assert_nearly_equal_array( &x[0..=1], @@ -576,7 +576,7 @@ fn t_ball_inf_origin() { let ball_inf = BallInf::new(None, 1.0); let mut x = [0.0, -0.5, 0.5, 1.5, 3.5, 0.8, 1.1, -5.0, -10.0]; let x_correct = [0.0, -0.5, 0.5, 1.0, 1.0, 0.8, 1.0, -1.0, -1.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x_correct, &x, @@ -592,31 +592,31 @@ fn t_ball_inf_center() { let xc = [5.0, -6.0]; let ball_inf = BallInf::new(Some(&xc), 1.5); let mut x = [11.0, -0.5]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[6.5, -4.5], &x, 1e-10, 1e-12, "upper right"); let mut x = [3.0, -7.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[3.5, -7.0], &x, 1e-10, 1e-12, "left"); let mut x = [800.0, -5.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[6.5, -5.0], &x, 1e-10, 1e-12, "right"); let mut x = [9.0, -10.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[6.5, -7.5], &x, 1e-10, 1e-12, "down right"); let mut x = [3.0, 0.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[3.5, -4.5], &x, 1e-10, 1e-12, "top left"); let mut x = [6.0, -5.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[6.0, -5.0], &x, 1e-10, 1e-12, "inside"); let mut x = [5.0, -6.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[5.0, -6.0], &x, 1e-10, 1e-12, "centre"); } @@ -690,7 +690,7 @@ fn t_simplex_projection() { let mut x = [1.0, 2.0, 3.0]; let alpha = 3.0; let my_simplex = Simplex::new(alpha); - my_simplex.project(&mut x); + my_simplex.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal( crate::matrix_operations::sum(&x), alpha, @@ -712,7 +712,7 @@ fn t_simplex_projection_random_spam() { let alpha_scale = 20.; let alpha = alpha_scale * rand::random::(); let simplex = Simplex::new(alpha); - simplex.project(&mut x); + simplex.project(&mut x).unwrap(); println!("x = {:?}", x); assert!(x.iter().all(|&xi| xi >= -1e-12)); unit_test_utils::assert_nearly_equal( @@ -737,7 +737,7 @@ fn t_simplex_projection_random_optimality() { let alpha = alpha_scale * rand::random::(); let simplex = Simplex::new(alpha); let y = z.clone(); - simplex.project(&mut z); + simplex.project(&mut z).unwrap(); for j in 0..n { let w = alpha * (y[j] - z[j]) - crate::matrix_operations::inner_product(&z, &y) + crate::matrix_operations::norm2_squared(&z); @@ -769,7 +769,7 @@ fn t_simplex_alpha_negative() { fn t_simplex_empty_vector() { let simplex = Simplex::new(1.0); let mut x = []; - simplex.project(&mut x); + simplex.project(&mut x).unwrap(); } #[test] @@ -786,7 +786,7 @@ fn t_ball1_random_optimality_conditions() { x.copy_from_slice(&x_star); let radius = 5. * rand::random::(); let ball1 = Ball1::new(None, radius); - ball1.project(&mut x_star); + ball1.project(&mut x_star).unwrap(); // make sure |x|_1 <= radius assert!( crate::matrix_operations::norm1(&x_star) <= radius * (1. + 1e-9), @@ -835,7 +835,7 @@ fn t_ball1_random_optimality_conditions_centered() { .for_each(|xi| *xi = scale_xc * (2. * rand::random::() - 1.)); let radius = 5. * rand::random::(); let ball1 = Ball1::new(Some(&xc), radius); - ball1.project(&mut x); + ball1.project(&mut x).unwrap(); // x = x - xc x.iter_mut() .zip(xc.iter()) @@ -855,7 +855,7 @@ fn t_ball1_wrong_dimensions() { let mut x = vec![3.0, 4.0, 5.0]; let radius = 1.0; let ball1 = Ball1::new(Some(&xc), radius); - ball1.project(&mut x); + ball1.project(&mut x).unwrap(); } #[test] @@ -864,8 +864,8 @@ fn t_sphere2_no_center() { let mut x_out = [1.0, 1.0]; let mut x_in = [-0.3, -0.2]; let unit_sphere = Sphere2::new(None, radius); - unit_sphere.project(&mut x_out); - unit_sphere.project(&mut x_in); + unit_sphere.project(&mut x_out).unwrap(); + unit_sphere.project(&mut x_in).unwrap(); let norm_out = crate::matrix_operations::norm2(&x_out); let norm_in = crate::matrix_operations::norm2(&x_in); unit_test_utils::assert_nearly_equal(radius, norm_out, 1e-10, 1e-12, "norm_out is not 1.0"); @@ -877,7 +877,7 @@ fn t_sphere2_no_center_projection_of_zero() { let radius = 0.9; let mut x = [0.0, 0.0]; let unit_sphere = Sphere2::new(None, radius); - unit_sphere.project(&mut x); + unit_sphere.project(&mut x).unwrap(); let norm_result = crate::matrix_operations::norm2(&x); unit_test_utils::assert_nearly_equal(radius, norm_result, 1e-10, 1e-12, "norm_out is not 1.0"); } @@ -889,7 +889,7 @@ fn t_sphere2_center() { let mut x = [1.0, 1.0]; let unit_sphere = Sphere2::new(Some(¢er), radius); - unit_sphere.project(&mut x); + unit_sphere.project(&mut x).unwrap(); let mut x_minus_c = [0.0; 2]; x.iter() .zip(center.iter()) @@ -909,7 +909,7 @@ fn t_sphere2_center_projection_of_center() { let mut x = [-3.0, 5.0]; let unit_sphere = Sphere2::new(Some(¢er), radius); - unit_sphere.project(&mut x); + unit_sphere.project(&mut x).unwrap(); let mut x_minus_c = [0.0; 2]; x.iter() .zip(center.iter()) @@ -928,7 +928,7 @@ fn t_sphere2_empty_vector() { let radius = 1.0; let unit_sphere = Sphere2::new(None, radius); let mut x = []; - unit_sphere.project(&mut x); + unit_sphere.project(&mut x).unwrap(); } #[test] @@ -938,7 +938,7 @@ fn t_sphere2_center_wrong_dimension() { let center = [1.0, 2.0, 3.0]; let unit_sphere = Sphere2::new(Some(¢er), radius); let mut x = [1.0, 2.0]; - unit_sphere.project(&mut x); + unit_sphere.project(&mut x).unwrap(); } #[test] @@ -952,7 +952,7 @@ fn t_epigraph_squared_norm_inside() { let epi = EpigraphSquaredNorm::new(); let mut x = [1., 2., 10.]; let x_correct = x.clone(); - epi.project(&mut x); + epi.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x_correct, &x, @@ -968,7 +968,7 @@ fn t_epigraph_squared_norm() { for i in 0..100 { let t = 0.01 * i as f64; let mut x = [1., 2., 3., t]; - epi.project(&mut x); + epi.project(&mut x).unwrap(); let err = (matrix_operations::norm2_squared(&x[..3]) - x[3]).abs(); assert!(err < 1e-10, "wrong projection on epigraph of squared norm"); } @@ -984,7 +984,7 @@ fn t_epigraph_squared_norm_correctness() { 1.680426686710711, 4.392630432414829, ]; - epi.project(&mut x); + epi.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x_correct, &x, @@ -1002,7 +1002,7 @@ fn t_affine_space() { let b = vec![1., 2., -0.5]; let affine_set = AffineSpace::new(a, b); let mut x = [1., -2., -0.3, 0.5]; - affine_set.project(&mut x); + affine_set.project(&mut x).unwrap(); let x_correct = [ 1.888564346697095, 5.629857182200888, @@ -1026,7 +1026,7 @@ fn t_affine_space_larger() { let b = vec![1., -2., 3., 4.]; let affine_set = AffineSpace::new(a, b); let mut x = [10., 11., -9., 4., 5.]; - affine_set.project(&mut x); + affine_set.project(&mut x).unwrap(); let x_correct = [ 9.238095238095237, -0.714285714285714, @@ -1049,7 +1049,7 @@ fn t_affine_space_single_row() { let b = vec![1.]; let affine_set = AffineSpace::new(a, b); let mut x = [5., 6., 10., 25.]; - affine_set.project(&mut x); + affine_set.project(&mut x).unwrap(); let s = x.iter().sum(); unit_test_utils::assert_nearly_equal(1., s, 1e-12, 1e-14, "wrong sum"); } @@ -1197,7 +1197,7 @@ fn t_ballp_at_origin_projection() { let tol = 1e-16; let max_iters: usize = 200; let ball = BallP::new(None, radius, p, tol, max_iters); - ball.project(&mut x); + ball.project(&mut x).unwrap(); assert!(is_norm_p_projection(&x0, &x, p, radius, 10_000)); } @@ -1210,7 +1210,7 @@ fn t_ballp_at_origin_x_already_inside() { let tol = 1e-16; let max_iters: usize = 1200; let ball = BallP::new(None, radius, p, tol, max_iters); - ball.project(&mut x); + ball.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x0, &x, @@ -1229,7 +1229,7 @@ fn t_ballp_at_xc_projection() { let tol = 1e-16; let max_iters: usize = 200; let ball = BallP::new(Some(&x_center), radius, p, tol, max_iters); - ball.project(&mut x); + ball.project(&mut x).unwrap(); let nrm = (x .iter() diff --git a/src/constraints/zero.rs b/src/constraints/zero.rs index 81064d33..fe9a1114 100644 --- a/src/constraints/zero.rs +++ b/src/constraints/zero.rs @@ -1,4 +1,5 @@ use super::Constraint; +use crate::FunctionCallResult; #[derive(Clone, Copy, Default)] /// Set Zero, $\\{0\\}$ @@ -14,8 +15,9 @@ impl Zero { impl Constraint for Zero { /// Computes the projection on $\\{0\\}$, that is, $\Pi_{\\{0\\}}(x) = 0$ /// for all $x$ - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [f64]) -> FunctionCallResult { x.iter_mut().for_each(|xi| *xi = 0.0); + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/core/fbs/fbs_engine.rs b/src/core/fbs/fbs_engine.rs index 717e70c6..fc7392c4 100644 --- a/src/core/fbs/fbs_engine.rs +++ b/src/core/fbs/fbs_engine.rs @@ -42,22 +42,24 @@ where FBSEngine { problem, cache } } - fn gradient_step(&mut self, u_current: &mut [f64]) { - assert_eq!( - Ok(()), - (self.problem.gradf)(u_current, &mut self.cache.work_gradient_u), - "The computation of the gradient of the cost failed miserably" - ); + fn gradient_step(&mut self, u_current: &mut [f64]) -> FunctionCallResult { + (self.problem.gradf)(u_current, &mut self.cache.work_gradient_u)?; + if !crate::matrix_operations::is_finite(&self.cache.work_gradient_u) { + return Err(SolverError::NotFiniteComputation( + "gradient evaluation returned a non-finite value during an FBS step", + )); + } // take a gradient step: u_currect -= gamma * gradient u_current .iter_mut() .zip(self.cache.work_gradient_u.iter()) .for_each(|(u, w)| *u -= self.cache.gamma * *w); + Ok(()) } - fn projection_step(&mut self, u_current: &mut [f64]) { - self.problem.constraints.project(u_current); + fn projection_step(&mut self, u_current: &mut [f64]) -> FunctionCallResult { + self.problem.constraints.project(u_current) } } @@ -85,8 +87,13 @@ where /// or the cost function panics. fn step(&mut self, u_current: &mut [f64]) -> Result { self.cache.work_u_previous.copy_from_slice(u_current); // cache the previous step - self.gradient_step(u_current); // compute the gradient - self.projection_step(u_current); // project + self.gradient_step(u_current)?; // compute the gradient + self.projection_step(u_current)?; // project + if !crate::matrix_operations::is_finite(u_current) { + return Err(SolverError::NotFiniteComputation( + "projected iterate contains a non-finite value during an FBS step", + )); + } self.cache.norm_fpr = matrix_operations::norm_inf_diff(u_current, &self.cache.work_u_previous); diff --git a/src/core/fbs/fbs_optimizer.rs b/src/core/fbs/fbs_optimizer.rs index d714ab67..fbdb62e6 100644 --- a/src/core/fbs/fbs_optimizer.rs +++ b/src/core/fbs/fbs_optimizer.rs @@ -124,7 +124,9 @@ where (self.fbs_engine.problem.cost)(u, &mut cost_value)?; if !matrix_operations::is_finite(u) || !cost_value.is_finite() { - return Err(SolverError::NotFiniteComputation); + return Err(SolverError::NotFiniteComputation( + "final FBS iterate or cost is non-finite", + )); } // export solution status diff --git a/src/core/fbs/tests.rs b/src/core/fbs/tests.rs index b94425b1..7b8f49fb 100644 --- a/src/core/fbs/tests.rs +++ b/src/core/fbs/tests.rs @@ -48,7 +48,7 @@ fn t_solve_fbs_hard_failure_nan() { let mut u = [-12., -160., 55.]; let mut optimizer = FBSOptimizer::new(problem, &mut fbs_cache).with_max_iter(10000); let status = optimizer.solve(&mut u); - assert_eq!(Err(SolverError::NotFiniteComputation), status); + assert!(matches!(status, Err(SolverError::NotFiniteComputation(_)))); } #[test] diff --git a/src/core/panoc/panoc_engine.rs b/src/core/panoc/panoc_engine.rs index 63ad862d..385dd9d9 100644 --- a/src/core/panoc/panoc_engine.rs +++ b/src/core/panoc/panoc_engine.rs @@ -135,13 +135,14 @@ where } /// Computes a projection on `gradient_step` - fn half_step(&mut self) { + fn half_step(&mut self) -> FunctionCallResult { let cache = &mut self.cache; // u_half_step ← projection(gradient_step) cache.u_half_step.copy_from_slice(&cache.gradient_step); - self.problem.constraints.project(&mut cache.u_half_step); + self.problem.constraints.project(&mut cache.u_half_step)?; cache.gradient_step_u_half_step_diff_norm_sq = matrix_operations::norm2_squared_diff(&cache.gradient_step, &cache.u_half_step); + Ok(()) } /// Computes an LBFGS direction; updates `cache.direction_lbfgs` @@ -179,7 +180,11 @@ where // Compute the cost at the half step (self.problem.cost)(&self.cache.u_half_step, &mut cost_u_half_step)?; - debug_assert!(matrix_operations::is_finite(&[self.cache.cost_value])); + if !matrix_operations::is_finite(&[self.cache.cost_value, cost_u_half_step]) { + return Err(SolverError::NotFiniteComputation( + "cost evaluation returned a non-finite value during Lipschitz estimation", + )); + } let mut it_lipschitz_search = 0; @@ -195,11 +200,16 @@ where // recompute the half step... self.gradient_step(u_current); // updates self.cache.gradient_step - self.half_step(); // updates self.cache.u_half_step + self.half_step()?; // updates self.cache.u_half_step // recompute the cost at the half step // update `cost_u_half_step` (self.problem.cost)(&self.cache.u_half_step, &mut cost_u_half_step)?; + if !cost_u_half_step.is_finite() { + return Err(SolverError::NotFiniteComputation( + "half-step cost became non-finite during Lipschitz backtracking", + )); + } // recompute the FPR and the square of its norm self.compute_fpr(u_current); @@ -254,10 +264,16 @@ where // point `u_plus` (self.problem.cost)(&self.cache.u_plus, &mut self.cache.cost_value)?; (self.problem.gradf)(&self.cache.u_plus, &mut self.cache.gradient_u)?; + if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u) + { + return Err(SolverError::NotFiniteComputation( + "line-search candidate produced a non-finite cost or gradient", + )); + } self.cache_gradient_norm(); self.gradient_step_uplus(); // gradient_step ← u_plus - gamma * gradient_u - self.half_step(); // u_half_step ← project(gradient_step) + self.half_step()?; // u_half_step ← project(gradient_step) // Update the LHS of the line search condition self.cache.lhs_ls = self.cache.cost_value - 0.5 * gamma * self.cache.gradient_u_norm_sq @@ -271,9 +287,15 @@ where u_current.copy_from_slice(&self.cache.u_half_step); // set u_current ← u_half_step (self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value (self.problem.gradf)(u_current, &mut self.cache.gradient_u)?; // compute gradient + if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u) + { + return Err(SolverError::NotFiniteComputation( + "first PANOC iterate produced a non-finite cost or gradient", + )); + } self.cache_gradient_norm(); self.gradient_step(u_current); // updated self.cache.gradient_step - self.half_step(); // updates self.cache.u_half_step + self.half_step()?; // updates self.cache.u_half_step Ok(()) } @@ -302,6 +324,11 @@ where pub(crate) fn cost_value_at_best_half_step(&mut self) -> Result { let mut cost = 0.0; (self.problem.cost)(&self.cache.best_u_half_step, &mut cost)?; + if !cost.is_finite() { + return Err(SolverError::NotFiniteComputation( + "best cached half-step cost is non-finite", + )); + } Ok(cost) } } @@ -362,11 +389,17 @@ where self.cache.reset(); (self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value self.estimate_loc_lip(u_current)?; // computes the gradient as well! (self.cache.gradient_u) + if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u) + { + return Err(SolverError::NotFiniteComputation( + "initial PANOC cost or gradient is non-finite", + )); + } self.cache_gradient_norm(); self.cache.gamma = GAMMA_L_COEFF / f64::max(self.cache.lipschitz_constant, MIN_L_ESTIMATE); self.cache.sigma = (1.0 - GAMMA_L_COEFF) / (4.0 * self.cache.gamma); self.gradient_step(u_current); // updated self.cache.gradient_step - self.half_step(); // updates self.cache.u_half_step + self.half_step()?; // updates self.cache.u_half_step Ok(()) } @@ -478,7 +511,7 @@ mod tests { .gradient_step .copy_from_slice(&[40., 50.]); - panoc_engine.half_step(); // u_half_step ← projection(gradient_step) + panoc_engine.half_step().unwrap(); // u_half_step ← projection(gradient_step) unit_test_utils::assert_nearly_equal_array( &[0.312_347_523_777_212, 0.390_434_404_721_515], diff --git a/src/core/panoc/panoc_optimizer.rs b/src/core/panoc/panoc_optimizer.rs index 98f87667..95f1a919 100644 --- a/src/core/panoc/panoc_optimizer.rs +++ b/src/core/panoc/panoc_optimizer.rs @@ -159,7 +159,9 @@ where // check for possible NaN/inf if !matrix_operations::is_finite(u) { - return Err(SolverError::NotFiniteComputation); + return Err(SolverError::NotFiniteComputation( + "final PANOC iterate contains a non-finite value", + )); } // exit status @@ -239,7 +241,7 @@ mod tests { /* CHECK FEASIBILITY */ let mut u_project = [0.0; 2]; u_project.copy_from_slice(&u_solution); - bounds.project(&mut u_project); + bounds.project(&mut u_project).unwrap(); unit_test_utils::assert_nearly_equal_array( &u_solution, &u_project, diff --git a/src/lib.rs b/src/lib.rs index e0f61bb0..01362d68 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -40,13 +40,55 @@ extern crate num; +use std::fmt; + /// Exceptions/Errors that may arise while solving a problem #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum SolverError { /// If the gradient or cost function cannot be evaluated - Cost, + Cost(&'static str), /// Computation failed and NaN/Infinite value was obtained - NotFiniteComputation, + NotFiniteComputation(&'static str), + /// A projection could not be computed numerically + ProjectionFailed(&'static str), + /// A linear algebra operation failed + LinearAlgebraFailure(&'static str), + /// The solver reached an unexpected internal state + InvalidProblemState(&'static str), +} + +impl fmt::Display for SolverError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SolverError::Cost(reason) => { + write!(f, "cost or gradient evaluation failed: {}", reason) + } + SolverError::NotFiniteComputation(reason) => { + write!(f, "non-finite computation: {}", reason) + } + SolverError::ProjectionFailed(reason) => write!(f, "projection failed: {}", reason), + SolverError::LinearAlgebraFailure(reason) => { + write!(f, "linear algebra failure: {}", reason) + } + SolverError::InvalidProblemState(reason) => { + write!(f, "invalid internal problem state: {}", reason) + } + } + } +} + +impl std::error::Error for SolverError {} + +impl From for SolverError { + fn from(_: crate::matrix_operations::MatrixError) -> Self { + SolverError::LinearAlgebraFailure("matrix operation failed") + } +} + +impl From for SolverError { + fn from(_: crate::cholesky_factorizer::CholeskyError) -> Self { + SolverError::LinearAlgebraFailure("Cholesky factorization or solve failed") + } } /// Result of a function call (status) From 1a7bbd76a0f64cb5481ede86e374fa1231f0f7b9 Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 19:41:22 +0000 Subject: [PATCH 074/133] [ci skip] prepare new releases From bfa66241133c45caa102397daab5dcf59b9f204e Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 19:42:22 +0000 Subject: [PATCH 075/133] cargo fmt --- src/alm/alm_optimizer.rs | 41 ++++++++++++++++----------------- src/constraints/affine_space.rs | 9 +++----- src/core/panoc/panoc_engine.rs | 9 +++++--- 3 files changed, 29 insertions(+), 30 deletions(-) diff --git a/src/alm/alm_optimizer.rs b/src/alm/alm_optimizer.rs index c7a99281..29cd1f34 100644 --- a/src/alm/alm_optimizer.rs +++ b/src/alm/alm_optimizer.rs @@ -764,13 +764,14 @@ where // This function will panic is there is no akkt_tolerance // This should never happen because we set the AKKT tolerance // in the constructor and can never become `None` again - let criterion_3 = cache - .panoc_cache - .akkt_tolerance - .ok_or(SolverError::InvalidProblemState( - "missing inner AKKT tolerance while checking the exit criterion", - ))? - <= self.epsilon_tolerance + SMALL_EPSILON; + let criterion_3 = + cache + .panoc_cache + .akkt_tolerance + .ok_or(SolverError::InvalidProblemState( + "missing inner AKKT tolerance while checking the exit criterion", + ))? + <= self.epsilon_tolerance + SMALL_EPSILON; Ok(criterion_1 && criterion_2 && criterion_3) } @@ -811,12 +812,13 @@ where fn update_inner_akkt_tolerance(&mut self) -> FunctionCallResult { let cache = &mut self.alm_cache; // epsilon_{nu+1} := max(epsilon, beta*epsilon_nu) - let akkt_tolerance = cache - .panoc_cache - .akkt_tolerance - .ok_or(SolverError::InvalidProblemState( - "missing inner AKKT tolerance while updating it", - ))?; + let akkt_tolerance = + cache + .panoc_cache + .akkt_tolerance + .ok_or(SolverError::InvalidProblemState( + "missing inner AKKT tolerance while updating it", + ))?; cache.panoc_cache.set_akkt_tolerance(f64::max( akkt_tolerance * self.epsilon_update_factor, self.epsilon_tolerance, @@ -992,14 +994,11 @@ where .with_penalty(c) .with_cost(cost); if self.alm_problem.n1 > 0 { - let status = status.with_lagrange_multipliers( - self.alm_cache - .y_plus - .as_ref() - .ok_or(SolverError::InvalidProblemState( - "missing Lagrange multipliers at the ALM solution", - ))?, - ); + let status = status.with_lagrange_multipliers(self.alm_cache.y_plus.as_ref().ok_or( + SolverError::InvalidProblemState( + "missing Lagrange multipliers at the ALM solution", + ), + )?); Ok(status) } else { Ok(status) diff --git a/src/constraints/affine_space.rs b/src/constraints/affine_space.rs index 7ca78608..e783c1ee 100644 --- a/src/constraints/affine_space.rs +++ b/src/constraints/affine_space.rs @@ -86,12 +86,9 @@ impl Constraint for AffineSpace { assert!(x.len() == n, "x has wrong dimension"); // Step 1: Compute e = Ax - b - let a = ArrayView2::from_shape((self.n_rows, self.n_cols), &self.a_mat) - .map_err(|_| { - SolverError::InvalidProblemState( - "failed to construct the affine-space matrix view", - ) - })?; + let a = ArrayView2::from_shape((self.n_rows, self.n_cols), &self.a_mat).map_err(|_| { + SolverError::InvalidProblemState("failed to construct the affine-space matrix view") + })?; let x_view = ArrayView1::from(&x[..]); let b = ArrayView1::from(&self.b_vec[..]); let e = a.dot(&x_view) - b; diff --git a/src/core/panoc/panoc_engine.rs b/src/core/panoc/panoc_engine.rs index 385dd9d9..554ca439 100644 --- a/src/core/panoc/panoc_engine.rs +++ b/src/core/panoc/panoc_engine.rs @@ -264,7 +264,8 @@ where // point `u_plus` (self.problem.cost)(&self.cache.u_plus, &mut self.cache.cost_value)?; (self.problem.gradf)(&self.cache.u_plus, &mut self.cache.gradient_u)?; - if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u) + if !self.cache.cost_value.is_finite() + || !matrix_operations::is_finite(&self.cache.gradient_u) { return Err(SolverError::NotFiniteComputation( "line-search candidate produced a non-finite cost or gradient", @@ -287,7 +288,8 @@ where u_current.copy_from_slice(&self.cache.u_half_step); // set u_current ← u_half_step (self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value (self.problem.gradf)(u_current, &mut self.cache.gradient_u)?; // compute gradient - if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u) + if !self.cache.cost_value.is_finite() + || !matrix_operations::is_finite(&self.cache.gradient_u) { return Err(SolverError::NotFiniteComputation( "first PANOC iterate produced a non-finite cost or gradient", @@ -389,7 +391,8 @@ where self.cache.reset(); (self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value self.estimate_loc_lip(u_current)?; // computes the gradient as well! (self.cache.gradient_u) - if !self.cache.cost_value.is_finite() || !matrix_operations::is_finite(&self.cache.gradient_u) + if !self.cache.cost_value.is_finite() + || !matrix_operations::is_finite(&self.cache.gradient_u) { return Err(SolverError::NotFiniteComputation( "initial PANOC cost or gradient is non-finite", From 0bfc8bb24b9b7c03f29d64fdab5dc7da5d92e4bd Mon Sep 17 00:00:00 2001 From: Pantelis Sopasakis Date: Fri, 27 Mar 2026 20:16:26 +0000 Subject: [PATCH 076/133] [ci skip] update website docs and changelog --- CHANGELOG.md | 2 +- docs/openrust-features.mdx | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6a402e38..03ccc2a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,7 @@ Note: This is the main Changelog file for the Rust solver. The Changelog file fo - Rust solver supports generic float types - Expanded Rust constraint test coverage with constructor validation, boundary/idempotence checks, and additional `BallP` / epigraph projection cases - +- Swap the cross-platform timer dependency to web-time, remove instant-specific wasm feature wiring, update optimizer timing call sites to use `web_time::Instant`, keep existing native and wasm timing behavior without stdweb risk ## [v0.12.0] - Unreleased +### Added + +- Richer Rust-side solver errors with human-readable reasons for projection failures, non-finite computations, linear algebra failures, and invalid internal solver states +- Fallible constraint projections via `Constraint::project(...) -> FunctionCallResult`, with error propagation through FBS, PANOC, and ALM +- Checked affine-space construction through `AffineSpace::try_new(...)` and `AffineSpaceError` ### Changed - Rust solver supports generic float types -- Expanded Rust constraint test coverage with constructor validation, boundary/idempotence checks, and additional `BallP` / epigraph projection cases -- Swap the cross-platform timer dependency to web-time, remove instant-specific wasm feature wiring, update optimizer timing call sites to use `web_time::Instant`, keep existing native and wasm timing behavior without stdweb risk +- Expanded Rust constraint and solver test coverage with constructor validation, boundary/idempotence checks, additional `BallP` / epigraph projection cases, and broader `f32`/`f64` coverage +- Swapped the cross-platform timer dependency to `web-time`, removed the old `instant`-specific wasm feature wiring, and updated optimizer timing call sites to use `web_time::Instant` +- Improved Rust-side error handling across constraints and core solvers so projection failures and invalid numerical states are reported explicitly instead of being silently flattened +- Refined `BallP`, `EpigraphSquaredNorm`, and related constraint implementations and docs for stronger numerical robustness and clearer behavior diff --git a/docs/openrust-features.mdx b/docs/content/openrust-features.mdx similarity index 100% rename from docs/openrust-features.mdx rename to docs/content/openrust-features.mdx diff --git a/docs/python-advanced.mdx b/docs/content/python-advanced.mdx similarity index 99% rename from docs/python-advanced.mdx rename to docs/content/python-advanced.mdx index a9b5a8fa..0eb6dc5b 100644 --- a/docs/python-advanced.mdx +++ b/docs/content/python-advanced.mdx @@ -200,7 +200,7 @@ All build options are shown below | `with_target_system` | Target system (to be used when you need to cross-compile) | | `with_build_c_bindings` | Enalbe generation of C/C++ bindings | | `with_rebuild` | Whether to do a clean build | -| `with_open_version` | Use a certain version of OpEn (see [all versions]), e.g., `with_open_version("0.6.0")`, or a local version of OpEn (this is useful when you want to download the latest version of OpEn from github). You can do so using `with_open_version(local_path="/path/to/open/")`. | +| `with_open_version` | Use a certain version of OpEn (see [all versions]), e.g., `with_open_version("0.6.0")`, or a local version of OpEn (this is useful when you want to download the latest version of OpEn from github). You can do so using `with_open_version(local_path="/path/to/open/rust")`. | |`with_allocator` | Available in `opengen >= 0.6.6`. Compile with a different memory allocator. The available allocators are the entries of `RustAllocator`. OpEn currently supports [Jemalloc](https://github.com/gnzlbg/jemallocator) and [Rpmalloc](https://github.com/EmbarkStudios/rpmalloc-rs).| [all versions]: https://crates.io/crates/optimization_engine/versions diff --git a/docs/python-bindings.md b/docs/content/python-bindings.md similarity index 100% rename from docs/python-bindings.md rename to docs/content/python-bindings.md diff --git a/docs/python-c.mdx b/docs/content/python-c.mdx similarity index 100% rename from docs/python-c.mdx rename to docs/content/python-c.mdx diff --git a/docs/python-examples.md b/docs/content/python-examples.md similarity index 100% rename from docs/python-examples.md rename to docs/content/python-examples.md diff --git a/docs/python-interface.md b/docs/content/python-interface.md similarity index 100% rename from docs/python-interface.md rename to docs/content/python-interface.md diff --git a/docs/python-ocp-1.mdx b/docs/content/python-ocp-1.mdx similarity index 100% rename from docs/python-ocp-1.mdx rename to docs/content/python-ocp-1.mdx diff --git a/docs/python-ocp-2.md b/docs/content/python-ocp-2.md similarity index 100% rename from docs/python-ocp-2.md rename to docs/content/python-ocp-2.md diff --git a/docs/python-ocp-3.md b/docs/content/python-ocp-3.md similarity index 100% rename from docs/python-ocp-3.md rename to docs/content/python-ocp-3.md diff --git a/docs/python-ocp-4.md b/docs/content/python-ocp-4.md similarity index 100% rename from docs/python-ocp-4.md rename to docs/content/python-ocp-4.md diff --git a/docs/python-ros.md b/docs/content/python-ros.md similarity index 100% rename from docs/python-ros.md rename to docs/content/python-ros.md diff --git a/docs/python-ros2.mdx b/docs/content/python-ros2.mdx similarity index 100% rename from docs/python-ros2.mdx rename to docs/content/python-ros2.mdx diff --git a/docs/python-tcp-ip.md b/docs/content/python-tcp-ip.md similarity index 100% rename from docs/python-tcp-ip.md rename to docs/content/python-tcp-ip.md diff --git a/docs/udp-sockets.md b/docs/content/udp-sockets.md similarity index 100% rename from docs/udp-sockets.md rename to docs/content/udp-sockets.md diff --git a/docs/sphinx/Makefile b/docs/sphinx/Makefile new file mode 100644 index 00000000..d0c3cbf1 --- /dev/null +++ b/docs/sphinx/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/sphinx/make.bat b/docs/sphinx/make.bat new file mode 100644 index 00000000..747ffb7b --- /dev/null +++ b/docs/sphinx/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/sphinx/source/conf.py b/docs/sphinx/source/conf.py new file mode 100644 index 00000000..f580a304 --- /dev/null +++ b/docs/sphinx/source/conf.py @@ -0,0 +1,44 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +import os +import sys +sys.path.insert(0, os.path.abspath("../../../python/opengen")) + + +def skip(app, what, name, obj, would_skip, options): + if name == "__init__": + return False + return would_skip + + +def setup(app): + app.connect("autodoc-skip-member", skip) + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + + +project = 'OpEn' +copyright = '2022, Pantelis Sopasakis' +author = 'Pantelis Sopasakis' + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.mathjax', +] + +templates_path = ['_templates'] +exclude_patterns = ['test/*'] + + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = 'sphinx_rtd_theme' +html_static_path = [] diff --git a/docs/sphinx/source/index.rst b/docs/sphinx/source/index.rst new file mode 100644 index 00000000..5a5c6619 --- /dev/null +++ b/docs/sphinx/source/index.rst @@ -0,0 +1,27 @@ +.. OpEn documentation master file, created by + sphinx-quickstart on Sat Nov 26 02:57:18 2022. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Opengen API docs +================================ + +This is the opengen API documentation. Check out the main `OpEn documentation page `_ to get started with OpEn. + +You can get a list of all `modules `_ here. + + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + + +Contents +================== + +* `Modules `_ +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/docs/website/README.md b/docs/website/README.md new file mode 100644 index 00000000..4357f6ca --- /dev/null +++ b/docs/website/README.md @@ -0,0 +1,59 @@ +This website now uses Docusaurus v3. + +# Development + +Install dependencies: + +```sh +yarn +``` + +Start the local dev server: + +```sh +yarn start +``` + +Build the production site: + +```sh +yarn build +``` + +Preview the production build locally: + +```sh +yarn serve +``` + +Deploy to GitHub Pages: + +```sh +yarn deploy +``` + +# Project Layout + +``` +optimization-engine/ + docs/ + content/ # documentation markdown files + sphinx/ # Sphinx API docs + website/ + blog/ # blog posts + src/ + css/ + pages/ + static/ + img/ + js/ + docusaurus.config.js + sidebars.js + package.json +``` + +# Notes + +- The docs content lives under `/docs/content`. +- Legacy inline MathJax and widget scripts are stripped at build time, and equivalent site-wide support is loaded from `docs/website/static/js`. +- Sidebar ordering now lives in `docs/website/sidebars.js`. diff --git a/docs/website/blog/2019-02-28-new-version.md b/docs/website/blog/2019-02-28-new-version.md new file mode 100644 index 00000000..7d4eeb2d --- /dev/null +++ b/docs/website/blog/2019-02-28-new-version.md @@ -0,0 +1,20 @@ +--- +title: First Release of OpEn +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + +This is the first release of **Optimization Engine** (OpEn) and we're very excited about it! + + + +## Additional Info + +Mauris vestibulum ullamcorper nibh, ut semper purus pulvinar ut. Donec volutpat orci sit amet mauris malesuada, non pulvinar augue aliquam. Vestibulum ultricies at urna ut suscipit. Morbi iaculis, erat at imperdiet semper, ipsum nulla sodales erat, eget tincidunt justo dui quis justo. Pellentesque dictum bibendum diam at aliquet. Sed pulvinar, dolor quis finibus ornare, eros odio facilisis erat, eu rhoncus nunc dui sed ex. Nunc gravida dui massa, sed ornare arcu tincidunt sit amet. Maecenas efficitur sapien neque, a laoreet libero feugiat ut. + +Nulla facilisi. Maecenas sodales nec purus eget posuere. Sed sapien quam, pretium a risus in, porttitor dapibus erat. Sed sit amet fringilla ipsum, eget iaculis augue. Integer sollicitudin tortor quis ultricies aliquam. Suspendisse fringilla nunc in tellus cursus, at placerat tellus scelerisque. Sed tempus elit a sollicitudin rhoncus. Nulla facilisi. Morbi nec dolor dolor. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Cras et aliquet lectus. Pellentesque sit amet eros nisi. Quisque ac sapien in sapien congue accumsan. Nullam in posuere ante. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin lacinia leo a nibh fringilla pharetra. + +Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin venenatis lectus dui, vel ultrices ante bibendum hendrerit. Aenean egestas feugiat dui id hendrerit. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Curabitur in tellus laoreet, eleifend nunc id, viverra leo. Proin vulputate non dolor vel vulputate. Curabitur pretium lobortis felis, sit amet finibus lorem suscipit ut. Sed non mollis risus. Duis sagittis, mi in euismod tincidunt, nunc mauris vestibulum urna, at euismod est elit quis erat. Phasellus accumsan vitae neque eu placerat. In elementum arcu nec tellus imperdiet, eget maximus nulla sodales. Curabitur eu sapien eget nisl sodales fermentum. + +Phasellus pulvinar ex id commodo imperdiet. Praesent odio nibh, sollicitudin sit amet faucibus id, placerat at metus. Donec vitae eros vitae tortor hendrerit finibus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Quisque vitae purus dolor. Duis suscipit ac nulla et finibus. Phasellus ac sem sed dui dictum gravida. Phasellus eleifend vestibulum facilisis. Integer pharetra nec enim vitae mattis. Duis auctor, lectus quis condimentum bibendum, nunc dolor aliquam massa, id bibendum orci velit quis magna. Ut volutpat nulla nunc, sed interdum magna condimentum non. Sed urna metus, scelerisque vitae consectetur a, feugiat quis magna. Donec dignissim ornare nisl, eget tempor risus malesuada quis. \ No newline at end of file diff --git a/docs/website/blog/2019-03-01-blog.md b/docs/website/blog/2019-03-01-blog.md new file mode 100644 index 00000000..487f1f61 --- /dev/null +++ b/docs/website/blog/2019-03-01-blog.md @@ -0,0 +1,10 @@ +--- +title: UAV navigation at ECC19 +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + +![Micro aerial vehicle](/optimization-engine/img/mav.png) + +Our paper titled "Aerial navigation in obstructed environments with embedded nonlinear model predictive control" (authors: E. Small, P. Sopasakis, E. Fresk, P. Patrinos and G. Nikolakopoulos) has been accepted for publication in European Control Conference, 2019. diff --git a/docs/website/blog/2019-03-02-superscs.md b/docs/website/blog/2019-03-02-superscs.md new file mode 100644 index 00000000..e59606e3 --- /dev/null +++ b/docs/website/blog/2019-03-02-superscs.md @@ -0,0 +1,12 @@ +--- +title: "SuperSCS: OpEn's sister project" +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + +[SuperSCS](https://kul-forbes.github.io/scs/) is a sister project of **OpEn**. + +SuperSCS is a fast solver for large-scale conic problems. It is written in C and can be interfaced from a lot of different programming languages. + +The paper titled "SuperSCS: fast and accurate large-scale conic optimization" (authors: P. Sopasakis, K. Menounou, P. Patrinos) has been accepted at European Control Conference 2019. diff --git a/docs/website/blog/2019-03-03-risk-averse.md b/docs/website/blog/2019-03-03-risk-averse.md new file mode 100644 index 00000000..7cba7e1b --- /dev/null +++ b/docs/website/blog/2019-03-03-risk-averse.md @@ -0,0 +1,10 @@ +--- +title: Risk-averse optimal control +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + +Our paper titled "Risk-averse risk-constrained optimal control," (authors: P. Sopasakis, M. Schuurmans, P. Patrinos) has been accepted for publication in European Control Conference 2019. + +This work is accompanied by the MATLAB toolbox [`marietta`](https://github.com/kul-forbes/risk-averse) which is distributed under the MIT open source license. \ No newline at end of file diff --git a/docs/website/blog/2019-03-05-matlab-raspberry.md b/docs/website/blog/2019-03-05-matlab-raspberry.md new file mode 100644 index 00000000..06ed1f9b --- /dev/null +++ b/docs/website/blog/2019-03-05-matlab-raspberry.md @@ -0,0 +1,83 @@ +--- +title: OpEn on Raspberry Pi +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + +Here we give an example of building a parametric optimizer in MATLAB, which will run on a **Raspberry Pi**. The parametric optimizer will listen for requests on a **UDP socket**. We will then establish a connection to the optimizer, remotely, and consume the service. + +![Raspberry Pi](/optimization-engine/img/rpi.jpeg) + + + +**NOTE:** Please, read the documentation of the [MATLAB interface](../../../../docs/matlab-interface) and the [UDP communication protocol](../../../../docs/udp-sockets) first. + +In MATLAB, we run the following script: + +```matlab +nu = 6; % number of decision variables +np = 2; % number of parameters + +u = casadi.SX.sym('u', nu); % decision variables +p = casadi.SX.sym('p', np); % parameters + +phi = rosenbrock(u, p); % cost function phi(u; p) (Rosenbrock) + +constraints = OpEnConstraints.make_ball_at_origin(1.0); + +build_config = open_build_config(); +build_config.target = 'rpi'; +build_config.build_mode = 'release'; +build_config.udp_interface.bind_address = '0.0.0.0'; +build_config.udp_interface.port = 3498; + +open_generate_code(build_config, constraints, u, p, phi); +``` + +Note that we have specified that the target hardware is a raspberry pi, `rpi` (equivalently `arm-unknown-linux-gnueabihf`), the bind address is `0.0.0.0` (so that the service will bind on all valid IP addresses) and the port is `3498`. We have also set the `build_mode` to `release` for maximum performance. + +After we execute the script, the code generator will have created a new project at + +```text +build/autogenerated_optimizer/ +``` + +We then copy the binary file from + +```text +build/autogenerated_optimizer/target/arm-unknown-linux-gnueabihf/release +``` + +onto our Raspberry Pi and execute it. + +Then, on our PC (assuming we run on Linux), we run: + +```bash +netcat -u {ip_address_of_raspberry} 3498 +``` + +where `{ip_address_of_raspberry}` is the IP address (or domain name) of our Raspberry. This will start a console, where we can send data to the PI. For example, we write + +```json +{"parameter":[50, 100]} +``` + +and press Enter, to receive: + + +```json +{ + "p" : [50.0, 100.0], + "u" : [1.2597243950, 1.3938323941, 1.6995734606, 2.6752011118, 7.0289056698, 49.3661825375], + "n" : 88, + "f" : -5.243242641581731, + "dt" : "1.207756ms" +} +``` + +So, **what do you think**? Let us know by providing some feedback (you can reach us live [**here**](../../../2019/03/06/talk-to-us)). + +If you like **OpEn**, please give it [**a star on github**](https://github.com/alphaville/optimization-engine). + +If you find a bug, please create [**an issue on on github**](https://github.com/alphaville/optimization-engine/issues/new). \ No newline at end of file diff --git a/docs/website/blog/2019-03-05-udp-sockets.md b/docs/website/blog/2019-03-05-udp-sockets.md new file mode 100644 index 00000000..0ba3ccd8 --- /dev/null +++ b/docs/website/blog/2019-03-05-udp-sockets.md @@ -0,0 +1,10 @@ +--- +title: OpEn UDP socket interface +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + +You may now auto-generate an **Optimization Engine** module, which solves parametric optimization problems (such as model predictive control problems) and can be consumed over **UDP sockets**! Read more [here](https://alphaville.github.io/optimization-engine/docs/matlab-interface). This way, the parametric optimizer can be easily called from any programming language. More importantly, the code generation can be carried out in one line of code, while, if necessary, it can be fully configured. + +![UDP socket interface](/optimization-engine/img/udp_socket.png) \ No newline at end of file diff --git a/docs/website/blog/2019-03-06-talk-to-us.md b/docs/website/blog/2019-03-06-talk-to-us.md new file mode 100644 index 00000000..356e7bdd --- /dev/null +++ b/docs/website/blog/2019-03-06-talk-to-us.md @@ -0,0 +1,21 @@ +--- +title: Let's talk +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + +![Chat with us](/optimization-engine/img/chat.jpeg) + + + + + +Are you interested in **OpEn**? You may join our growing community: + +- Join us on [Discord](https://discord.gg/mfYpn4V) +- or on [Gitter](https://gitter.im/alphaville/optimization-engine) diff --git a/docs/website/blog/2019-03-15-pure-rust-optimization.md b/docs/website/blog/2019-03-15-pure-rust-optimization.md new file mode 100644 index 00000000..03c58c23 --- /dev/null +++ b/docs/website/blog/2019-03-15-pure-rust-optimization.md @@ -0,0 +1,12 @@ +--- +title: "OpEn: a pure Rust optimizer" +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + +The majority of optimization packages in Rust, such as [IPOPT](https://crates.io/crates/ipopt), [OSQP](https://crates.io/crates/osqp), [NLOPT](https://crates.io/crates/nlopt), are essentially bindings (interfaces) to other software. There are a few pure-Rust packages, such as [rustimization](https://crates.io/crates/rustimization), [argmin](https://crates.io/crates/argmin), they implement algorithms which are not suitable for embedded nonconvex optimization. + +![Rust language](/optimization-engine/img/rust1.jpeg) + +**OpEn** is the first **pure-Rust** package diff --git a/docs/website/blog/2019-03-19-rust-robotics.md b/docs/website/blog/2019-03-19-rust-robotics.md new file mode 100644 index 00000000..5f3de67c --- /dev/null +++ b/docs/website/blog/2019-03-19-rust-robotics.md @@ -0,0 +1,17 @@ +--- +title: Rust for robots +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + + +Is the **Rust** programming language the future of robotics? [Some](https://medium.com/luosrobotics/why-rust-is-the-future-of-robotics-81d7fb68fe37) claim that its memory and thread safety model and the fact that it can run on embedded and bare metal devices makes it the perfect candidate for robotics. + +![rusty bot](/optimization-engine/img/rustybot.jpeg) + +Despite its young age, Rust has all but failed to go unnoticed: a [stackoverflow survey](https://insights.stackoverflow.com/survey/2018/#most-loved-dreaded-and-wanted) revealed that Rust is the top most loved programming language and nowhere around the 25 most dreaded ones. + +Rust comes with all the perks of high-level languages (such as C++ and Java) related to code structure and organisation, the capabilities and efficiency of low-level languages (such as C), plus it is t**Rust**worthy. + +This is why we decided to write **OpEn** in Rust. \ No newline at end of file diff --git a/docs/website/blog/2019-03-21-fast-udp-connection.md b/docs/website/blog/2019-03-21-fast-udp-connection.md new file mode 100644 index 00000000..2f6606d4 --- /dev/null +++ b/docs/website/blog/2019-03-21-fast-udp-connection.md @@ -0,0 +1,13 @@ +--- +title: Faster UDP connection +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + +After a recent update, the [UDP interface](/optimization-engine/docs/udp-sockets) if **OpEn** is now significantly faster. Make sure you use version `0.2.2` or later. + +![UDP socket interface](/optimization-engine/img/udp_socket.png) + +Coming up: TCP interface + diff --git a/docs/website/blog/2022-07-30-blog.md b/docs/website/blog/2022-07-30-blog.md new file mode 100644 index 00000000..fd42f151 --- /dev/null +++ b/docs/website/blog/2022-07-30-blog.md @@ -0,0 +1,12 @@ +--- +title: Autonomous Racing of Scale Vehicles at SMC22 +author: Giuseppe Silano +authorURL: https://giuseppesilano.net +authorImageURL: https://giuseppesilano.net/images/headshot.jpg +--- + +![Autonomous Racing of Scale Vehicles](/optimization-engine/img/f1-10-main-car_orig.png) + +Our paper titled "A Nonlinear Model Predictive Control Strategy for Autonomous Racing of Scale Vehicles" (authors: V. Cataffo, G. Silano, L. Iannelli, V. Puig and G. Glielmo) has been accepted for publication in IEEE International Conference on Systems, Man, and Cybernetics (SMC), 2022. + +A preprint is available here. diff --git a/docs/website/blog/2026-03-21-python-ocp-module.md b/docs/website/blog/2026-03-21-python-ocp-module.md new file mode 100644 index 00000000..c5b08b8b --- /dev/null +++ b/docs/website/blog/2026-03-21-python-ocp-module.md @@ -0,0 +1,34 @@ +--- +title: New Python OCP Module for Optimal Control and MPC +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + +![State trajectories generated with the Python OCP module](/optimization-engine/img/ocp-states.png) + +OpEn now comes with a Python OCP module that facilitates the design of optimal control problems in a direct and intuitive way. + +With the new module, you can describe the key ingredients of an optimal control problem from Python, including: + +- stage and terminal costs +- system dynamics +- state and input constraints +- problem parameters and defaults + +This makes it much easier to formulate nonlinear optimal control problems and model predictive control (MPC) schemes before generating an embedded optimizer with OpEn. + + + +The new OCP workflow is documented in the Python OCP guide: + +- [Getting started with the OCP module](/optimization-engine/docs/python-ocp-1) +- [OCP problem formulation](/optimization-engine/docs/python-ocp-2) +- [Building the optimizer](/optimization-engine/docs/python-ocp-3) +- [Running closed-loop simulations](/optimization-engine/docs/python-ocp-4) + +If you want to try it right away, you can also run the Colab notebook: + +- [Try the Python OCP module in Google Colab](https://colab.research.google.com/drive/17vbVUbqcah9seIg17aN6bW0-T15FWrBo?usp=sharing) + +This functionality was introduced in `opengen` version `0.10.0a1`, and it opens the door to a smoother workflow for designing optimal controllers and MPC applications directly in Python. diff --git a/docs/website/docusaurus.config.js b/docs/website/docusaurus.config.js new file mode 100644 index 00000000..59e6f016 --- /dev/null +++ b/docs/website/docusaurus.config.js @@ -0,0 +1,176 @@ +const {themes} = require('prism-react-renderer'); + +const baseUrl = '/optimization-engine/'; + +const users = [ + { + caption: 'alphaville', + image: 'img/box.png', + infoLink: 'https://alphaville.github.io', + pinned: true, + }, +]; + +function preprocessLegacyMarkdown({fileContent}) { + return fileContent + .replace(/\s*/gi, '') + .replace(/]*font-awesome[^>]*>\s*/gi, '') + .replace(/(!\[[^\]]*]\()\/optimization-engine\//g, '$1pathname:///optimization-engine/') + .replace( + /onclick="toggleCollapseExpand\('([^']+)',\s*'([^']+)',\s*'([^']+)'\)"/gi, + 'data-legacy-toggle-button="$1" data-legacy-toggle-target="$2" data-legacy-toggle-label="$3"', + ) + .replace(/target=(['"])blank\1/gi, 'target="_blank"') + .replace(/\n{3,}/g, '\n\n'); +} + +module.exports = { + title: 'OpEn', + tagline: 'Fast and Accurate Nonconvex Optimization', + favicon: 'img/box.png', + url: 'https://alphaville.github.io', + baseUrl, + organizationName: 'alphaville', + projectName: 'optimization-engine', + deploymentBranch: 'gh-pages', + onBrokenLinks: 'warn', + trailingSlash: false, + baseUrlIssueBanner: true, + markdown: { + format: 'detect', + hooks: { + onBrokenMarkdownLinks: 'warn', + }, + mdx1Compat: { + comments: true, + admonitions: true, + headingIds: true, + }, + preprocessor: preprocessLegacyMarkdown, + }, + themes: [], + presets: [ + [ + 'classic', + { + docs: { + path: '../content', + routeBasePath: 'docs', + sidebarPath: require.resolve('./sidebars.js'), + }, + blog: { + path: './blog', + showReadingTime: true, + onInlineAuthors: 'ignore', + onUntruncatedBlogPosts: 'ignore', + }, + theme: { + customCss: require.resolve('./src/css/custom.css'), + }, + }, + ], + ], + stylesheets: [ + 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css', + ], + scripts: [ + `${baseUrl}js/mathjax-config.js`, + { + src: 'https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js', + defer: true, + }, + { + src: `${baseUrl}js/legacy-docs.js`, + defer: true, + }, + { + src: 'https://buttons.github.io/buttons.js', + async: true, + }, + ], + customFields: { + users, + }, + themeConfig: { + image: 'img/open-functionality.jpg', + colorMode: { + defaultMode: 'light', + disableSwitch: true, + respectPrefersColorScheme: false, + }, + prism: { + theme: themes.github, + additionalLanguages: ['bash', 'matlab'], + }, + navbar: { + title: 'Optimization Engine', + logo: { + alt: 'OpEn logo', + src: 'img/box.png', + }, + items: [ + { + type: 'docSidebar', + sidebarId: 'docsSidebar', + position: 'left', + label: 'Docs', + }, + {to: '/blog', label: 'Blog', position: 'left'}, + { + href: 'https://docs.rs/optimization_engine/*/optimization_engine/', + label: 'Rust API', + position: 'right', + }, + { + href: 'https://alphaville.github.io/optimization-engine/api-dox/html/index.html', + label: 'Opengen API', + position: 'right', + }, + { + to: '/blog/2019/03/06/talk-to-us', + label: 'Chat', + position: 'right', + }, + { + href: 'https://github.com/alphaville/optimization-engine', + label: 'GitHub', + position: 'right', + }, + ], + }, + footer: { + style: 'dark', + links: [ + { + title: 'Docs', + items: [ + {label: 'Getting Started', to: '/docs/open-intro'}, + {label: 'Python Interface', to: '/docs/python-interface'}, + {label: 'MATLAB Interface', to: '/docs/matlab-interface'}, + {label: 'Docker', to: '/docs/docker'}, + ], + }, + { + title: 'Community', + items: [ + {label: 'User Showcase', to: '/users'}, + {label: 'Discord community', href: 'https://discord.gg/mfYpn4V'}, + {label: 'Chat on Gitter', href: 'https://gitter.im/alphaville/optimization-engine'}, + ], + }, + { + title: 'More', + items: [ + {label: 'Blog', to: '/blog'}, + {label: 'GitHub', href: 'https://github.com/alphaville/optimization-engine'}, + {label: 'OpenHub', href: 'https://www.openhub.net/p/optimization-engine'}, + { + html: 'GitHub stars for alphaville/optimization-engine', + }, + ], + }, + ], + copyright: `Copyright © ${new Date().getFullYear()} Pantelis Sopasakis and Emil Fresk
    Box icon made by Freepik from Flaticon.`, + }, + }, +}; diff --git a/docs/website/package.json b/docs/website/package.json new file mode 100644 index 00000000..0dc7e221 --- /dev/null +++ b/docs/website/package.json @@ -0,0 +1,19 @@ +{ + "name": "optimization-engine-docs", + "private": true, + "scripts": { + "start": "docusaurus start", + "build": "docusaurus build", + "serve": "docusaurus serve", + "deploy": "docusaurus deploy", + "clear": "docusaurus clear" + }, + "license": "(MIT OR Apache-2.0)", + "devDependencies": { + "@docusaurus/core": "^3.9.2", + "@docusaurus/preset-classic": "^3.9.2", + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e" +} diff --git a/docs/website/publish.sh b/docs/website/publish.sh new file mode 100755 index 00000000..9af6f4d9 --- /dev/null +++ b/docs/website/publish.sh @@ -0,0 +1,5 @@ +#!/bin/bash +GIT_USER=alphaville \ + CURRENT_BRANCH=master \ + USE_SSH=true \ + yarn deploy diff --git a/docs/website/sidebars.js b/docs/website/sidebars.js new file mode 100644 index 00000000..a407782f --- /dev/null +++ b/docs/website/sidebars.js @@ -0,0 +1,43 @@ +module.exports = { + docsSidebar: [ + { + type: 'category', + label: 'OpEn Guide', + items: ['open-intro', 'installation', 'cite_open'], + }, + { + type: 'category', + label: 'Python', + items: [ + 'python-interface', + 'python-advanced', + 'python-c', + 'python-bindings', + 'python-tcp-ip', + 'python-ros2', + 'python-ros', + 'python-examples', + ], + }, + { + type: 'category', + label: 'Optimal Control', + items: ['python-ocp-1', 'python-ocp-2', 'python-ocp-3', 'python-ocp-4'], + }, + { + type: 'category', + label: 'Rust', + items: ['openrust-basic', 'openrust-alm', 'openrust-features', 'openrust-arithmetic'], + }, + { + type: 'category', + label: 'MATLAB', + items: ['matlab-interface', 'matlab-examples'], + }, + { + type: 'category', + label: 'Extras', + items: ['docker', 'algorithm', 'faq', 'contributing'], + }, + ], +}; diff --git a/docs/website/src/css/custom.css b/docs/website/src/css/custom.css new file mode 100644 index 00000000..568b4152 --- /dev/null +++ b/docs/website/src/css/custom.css @@ -0,0 +1,787 @@ +:root { + --ifm-color-primary: #7a1f1f; + --ifm-color-primary-dark: #6c1b1b; + --ifm-color-primary-darker: #641919; + --ifm-color-primary-darkest: #531414; + --ifm-color-primary-light: #882323; + --ifm-color-primary-lighter: #902525; + --ifm-color-primary-lightest: #a02c2c; + --ifm-code-font-size: 95%; + --ifm-font-family-base: "Avenir Next", "Segoe UI", sans-serif; + --ifm-heading-font-family: "Avenir Next", "Segoe UI", sans-serif; + --ifm-background-color: #fcfaf8; + --ifm-footer-background-color: #261414; + --ifm-navbar-background-color: rgba(122, 31, 31, 0.95); + --ifm-navbar-link-color: #fff8f3; + --ifm-navbar-link-hover-color: #ffd6bf; + --open-page-accent: #f1e2d5; + --open-page-border: #d9c1ae; + --open-page-surface: #fffdfb; + --open-page-muted: #70564b; + --open-page-shadow: 0 24px 60px rgba(86, 44, 28, 0.12); +} + +html[data-theme='dark'] { + --ifm-background-color: #fcfaf8; +} + +body { + background: + radial-gradient(circle at top left, rgba(241, 226, 213, 0.9), transparent 35%), + linear-gradient(180deg, #fffdfa 0%, #fcfaf8 100%); +} + +.navbar { + box-shadow: 0 10px 30px rgba(86, 44, 28, 0.12); +} + +.footer--dark { + background: + linear-gradient(180deg, rgba(38, 20, 20, 0.98), rgba(28, 14, 14, 0.98)), + #261414; +} + +.homePage { + padding-bottom: 4rem; +} + +.homeHero { + padding: 4rem 0 3rem; +} + +.homeHero__content, +.homeSection, +.simplePage__hero, +.simplePage__content { + width: min(1120px, calc(100% - 2rem)); + margin: 0 auto; +} + +.homeHero__content { + background: + linear-gradient(140deg, rgba(122, 31, 31, 0.98), rgba(60, 32, 21, 0.95)), + #7a1f1f; + border: 1px solid rgba(255, 224, 204, 0.18); + border-radius: 32px; + color: #fff8f3; + padding: 3rem; + box-shadow: var(--open-page-shadow); +} + +.homeHero__headline { + display: flex; + align-items: center; + gap: 1rem; +} + +.homeHero__logo { + width: 86px; + height: 86px; + flex-shrink: 0; +} + +.homeHero__eyebrow, +.homeSection__eyebrow { + margin: 0 0 0.35rem; + color: #c57f6e; + font-size: 0.9rem; + font-weight: 700; + letter-spacing: 0.12em; + text-transform: uppercase; +} + +.homeHero h1, +.simplePage__hero h1 { + margin: 0; + font-size: clamp(2.6rem, 5vw, 4.4rem); + line-height: 0.95; +} + +.homeHero__lead, +.simplePage__hero p { + max-width: 48rem; + margin: 1.25rem 0 0; + font-size: 1.15rem; + color: rgba(255, 248, 243, 0.9); +} + +.homeHero__actions { + display: flex; + flex-wrap: wrap; + gap: 0.75rem; + margin-top: 1.75rem; +} + +.homeHero__actions .button--secondary { + background: rgba(255, 248, 243, 0.12); + border-color: rgba(255, 248, 243, 0.28); + color: #fff8f3; +} + +.homeHero__actions .button--secondary:hover { + background: rgba(255, 248, 243, 0.2); +} + +.homeHero__stats { + display: grid; + grid-template-columns: repeat(3, minmax(0, 1fr)); + gap: 1rem; + margin-top: 2rem; +} + +.homeHero__stat { + background: rgba(255, 248, 243, 0.08); + border: 1px solid rgba(255, 248, 243, 0.14); + border-radius: 18px; + padding: 1rem 1.1rem; +} + +.homeHero__stat span { + display: block; + font-size: 0.78rem; + letter-spacing: 0.06em; + text-transform: uppercase; + color: rgba(255, 248, 243, 0.72); +} + +.homeHero__stat strong { + display: block; + margin-top: 0.35rem; + font-size: 1rem; + color: #fff8f3; +} + +.homeSection, +.simplePage__hero, +.simplePage__content { + padding: 3rem 0; +} + +.homeSection--alt { + background: rgba(255, 250, 245, 0.8); +} + +.homeSection__header { + max-width: 44rem; + margin-bottom: 1.5rem; +} + +.homeSection__header h2, +.homeSplit__copy h2 { + margin: 0; + color: #2f1a14; + font-size: clamp(2rem, 4vw, 3rem); + line-height: 1.02; +} + +.homeVideoCard h3 { + margin: 0; + color: #2f1a14; + font-size: 1.4rem; + line-height: 1.2; +} + +.homeCardGrid, +.usersGrid, +.helpGrid { + display: grid; + grid-template-columns: repeat(3, minmax(0, 1fr)); + gap: 1.25rem; +} + +.homeCard, +.usersCard, +.homeVideoCard, +.homeMediaCard, +.homeCodeBlock { + background: var(--open-page-surface); + border: 1px solid var(--open-page-border); + border-radius: 15px; + box-shadow: var(--open-page-shadow); +} + +.homeCard { + padding: 1.5rem; +} + +.homeCard img { + width: 64px; + height: 64px; + margin-bottom: 1rem; +} + +.homeCard img.homeCard__image--xl { + width: min(100%, 240px); + height: auto; + max-height: none; +} + +.homeCard img.homeCard__image--feature { + width: min(100%, 360px); + height: auto; + max-height: none; +} + +.homeCard img.homeCard__image--benchmark { + width: min(100%, 430px); +} + +.homeCard h3, +.usersCard strong { + margin-bottom: 0.5rem; + color: #2f1a14; +} + +.homeCard p, +.homeSplit__copy p, +.homeVideoCard p, +.usersCard { + color: var(--open-page-muted); +} + +.homeSplit { + display: grid; + grid-template-columns: minmax(0, 1.1fr) minmax(320px, 0.9fr); + gap: 1.5rem; + align-items: start; +} + +.homeFeatureGrid { + display: grid; + grid-template-columns: repeat(2, minmax(0, 1fr)); + gap: 1.5rem; +} + +.homeFeatureFooter { + display: grid; + grid-template-columns: repeat(2, minmax(0, 1fr)); + gap: 1.5rem; + margin-top: 1.5rem; +} + +.homeFeatureDocsRow { + display: flex; + justify-content: center; + margin-top: 1.5rem; +} + +.homeCard--feature { + min-height: 100%; +} + +.homeCard--feature h3 { + font-size: 1.45rem; +} + +.homeCard--feature p { + font-size: 1.02rem; +} + +.homeCard--documented { + max-width: 440px; + width: 100%; + text-align: center; + background: transparent; + border: 0; + border-radius: 0; + box-shadow: none; + padding: 0.5rem 0; +} + +.homeCard--documented img { + margin-left: auto; + margin-right: auto; + width: 72px; + height: 72px; +} + +.homeCard--documented h3 { + font-size: 1.5rem; +} + +.homeCard--documented p { + margin-left: auto; + margin-right: auto; + max-width: 32rem; +} + +.homeCard--topic { + text-align: left; +} + +.homeCard--topic img { + width: min(100%, 320px); + height: auto; + max-height: none; +} + +.homeCard--topic h3 { + font-size: 1.35rem; +} + +.homeCard--topic p { + font-size: 0.96rem; + line-height: 1.55; +} + +.homeVideoSection { + width: min(920px, 100%); + margin: 0 auto; +} + +.homeVideoSection__card { + /* Spacing between "well documented" and "presentation at IFAC" */ + margin-top: -2.0rem; +} + +.homeOcpPromo { + display: grid; + grid-template-columns: minmax(0, 1.05fr) minmax(300px, 0.95fr); + gap: 1.5rem; + align-items: center; + width: min(1100px, calc(100% - 2rem)); + margin: 0 auto; +} + +.homeOcpPromo__content { + min-width: 0; +} + +.homeOcpPromo__content h2 { + margin: 0; + color: #2f1a14; + font-size: clamp(2rem, 4vw, 3rem); + line-height: 1.16; + margin-bottom: 0.85rem; +} + +.homeOcpPromo__content p { + color: var(--open-page-muted); +} + +.homeOcpPromo__visual { + min-width: 0; +} + +.homeOcpPromo__image { + width: 100%; +} + +.homeRos2Promo { + display: grid; + grid-template-columns: minmax(0, 1.05fr) minmax(320px, 0.95fr); + gap: 1.5rem; + align-items: center; + width: min(1100px, calc(100% - 2rem)); + margin: 0 auto; + padding: 2rem; + background: + linear-gradient(145deg, rgba(164, 62, 53, 0.88), rgba(141, 33, 183, 0.92)), + #843129; + border: 1px solid rgba(255, 224, 204, 0.2); + border-radius: 28px; + box-shadow: var(--open-page-shadow); +} + +.homeRos2Promo__content, +.homeRos2Promo__code { + min-width: 0; +} + +.homeRos2Promo__content h2 { + margin: 0 0 0.85rem; + color: #fff8f3; + font-size: clamp(2rem, 4vw, 3rem); + line-height: 1.08; +} + +.homeRos2Promo__content p { + color: rgba(255, 248, 243, 0.88); +} + +.homeRos2Promo__robot { + display: block; + width: 200px; + height: 200px; + margin: 0 auto 1rem; +} + +.homeRos2Promo__attribution { + margin: -0.4rem 0 0.9rem; + text-align: center; + font-size: 0.68rem; + line-height: 1.25; +} + +.homeRos2Promo__attribution a { + color: rgba(255, 248, 243, 0.82); + text-decoration: none; +} + +.homeRos2Promo__attribution a:hover { + color: #fff8f3; + text-decoration: underline; +} + +.homeRos2Promo__codeBlock { + margin-top: 0; + background: rgba(255, 248, 243, 0.96); + border-color: rgba(255, 224, 204, 0.3); +} + +.homeRos2Promo__codeBlock .theme-code-block { + margin-bottom: 0; +} + +.homeDockerPromo { + display: grid; + grid-template-columns: minmax(0, 1.02fr) minmax(320px, 0.98fr); + gap: 1.5rem; + align-items: center; + width: min(1100px, calc(100% - 2rem)); + margin: 0 auto; +} + +.homeDockerPromo__content, +.homeDockerPromo__visual { + min-width: 0; +} + +.homeDockerPromo__content h2 { + margin: 0 0 0.85rem; + color: #2f1a14; + font-size: clamp(2rem, 4vw, 3rem); + line-height: 1.08; +} + +.homeDockerPromo__content p { + color: var(--open-page-muted); +} + +.homeDockerPromo__image { + display: block; + width: min(100%, 280px); + margin: 0 auto 1rem; +} + +.homeDockerPromo__codeBlock { + margin-top: 0; +} + +.homeDockerPromo__codeBlock .theme-code-block { + margin-bottom: 0; +} + +.homeSplit__copy, +.homeSplit__media { + min-width: 0; +} + +.homeMediaCard { + display: block; + width: 100%; + padding: 0; +} + +.homeCodeBlock { + margin-top: 1.5rem; + overflow: hidden; +} + +.homeCodeBlock pre { + margin: 0; +} + +.homeList { + padding-left: 1.2rem; +} + +.homeList li + li { + margin-top: 0.45rem; +} + +.homeVideoCard { + padding: 1.5rem; +} + +.homeVideoFrame { + position: relative; + width: 100%; + padding-top: 56.25%; + overflow: hidden; + border-radius: 18px; + margin-top: 1rem; +} + +.homeVideoFrame iframe { + position: absolute; + inset: 0; + width: 100%; + height: 100%; + border: 0; +} + +.simplePage__hero { + padding-bottom: 1rem; +} + +.simplePage__hero p { + color: var(--open-page-muted); +} + +.usersCard { + display: flex; + flex-direction: column; + align-items: center; + gap: 1rem; + padding: 1.5rem; + text-align: center; + text-decoration: none; +} + +.usersCard img { + width: 96px; + height: 96px; +} + +.helpGrid .homeCard { + display: flex; + flex-direction: column; + gap: 1rem; +} + +.markdown img, +.theme-doc-markdown img { + max-width: 100%; + height: auto; +} + +.markdown iframe, +.theme-doc-markdown iframe { + max-width: 100%; +} + +.markdown button[data-legacy-toggle-button], +.theme-doc-markdown button[data-legacy-toggle-button] { + width: 100%; + border: 0; + border-radius: 14px; + background: var(--ifm-color-primary); + color: #fff; + cursor: pointer; + display: inline-block; + font: inherit; + margin: 1rem 0 0.75rem; + padding: 0.85rem 1rem; + text-align: left; +} + +.markdown button[data-legacy-toggle-button]:hover, +.theme-doc-markdown button[data-legacy-toggle-button]:hover { + background: var(--ifm-color-primary-dark); +} + +.markdown button.but, +.theme-doc-markdown button.but { + width: 250px; + max-width: 100%; + display: inline-block; +} + +.homeCard__zoomButton { + width: auto; + margin: 0 0 1rem; + padding: 0; + border: 0; + background: transparent; + cursor: zoom-in; +} + +.homeCard__zoomButton:hover, +.homeCard__zoomButton:focus { + background: transparent; +} + +.homeCard__zoomButton img { + margin-bottom: 0; +} + +.mycontainer { + width: 100%; + border: 1px solid var(--open-page-border); + border-radius: 16px; + display: none; + padding: 1rem 1.1rem; + margin-bottom: 1rem; + background: #fffdf9; +} + +.alert { + border-radius: 16px; + border: 1px solid transparent; + margin: 1rem 0; + padding: 1rem 1.1rem; +} + +.alert-info { + color: #0d4f62; + background: #e6f7fb; + border-color: #b5e4ee; +} + +.alert-success { + color: #21563b; + background: #ebf8ef; + border-color: #c9eace; +} + +.alert-warning { + color: #7b5619; + background: #fff5de; + border-color: #f2d596; +} + +.alert-danger { + color: #812d2d; + background: #fdecec; + border-color: #f3c2c2; +} + +.imageZoomOverlay { + position: fixed; + inset: 0; + z-index: 1000; + display: flex; + align-items: center; + justify-content: center; + padding: 1.5rem; + background: rgba(27, 16, 14, 0.82); + backdrop-filter: blur(6px); +} + +.imageZoomOverlay__content { + position: relative; + width: min(1100px, 100%); + max-height: calc(100vh - 3rem); + padding: 1rem; + border: 1px solid rgba(255, 248, 243, 0.18); + border-radius: 24px; + background: #fffdf9; + box-shadow: 0 24px 80px rgba(0, 0, 0, 0.28); +} + +.imageZoomOverlay__content img { + display: block; + width: 100%; + height: auto; + max-height: calc(100vh - 7rem); + object-fit: contain; + border-radius: 16px; +} + +.imageZoomOverlay__close { + width: auto; + margin: 0 0 1rem auto; + border-radius: 999px; + padding: 0.55rem 0.9rem; +} + +@media (max-width: 996px) { + .homeHero__content { + padding: 2rem; + } + + .homeHero__headline, + .homeSplit { + grid-template-columns: 1fr; + display: grid; + } + + .homeHero__headline { + align-items: start; + } + + .homeHero__stats, + .homeCardGrid, + .homeFeatureGrid, + .usersGrid, + .helpGrid { + grid-template-columns: 1fr; + } + + .homeFeatureFooter { + grid-template-columns: 1fr; + } + + .homeFeatureDocsRow { + display: block; + } + + .homeCard--documented { + max-width: none; + } + + .homeOcpPromo, + .homeRos2Promo, + .homeDockerPromo { + grid-template-columns: 1fr; + } +} + +@media (min-width: 997px) { + .navbar { + backdrop-filter: blur(16px); + } +} + +@media (max-width: 996px) { + .navbar-sidebar, + .navbar-sidebar__items, + .navbar-sidebar__item.menu { + background: #f8eee7; + } + + .navbar-sidebar__brand, + .navbar-sidebar__back, + .navbar-sidebar__close, + .navbar-sidebar .menu__link, + .navbar-sidebar .menu__caret, + .navbar-sidebar .menu__link--sublist::after { + color: #221714; + } + + .navbar-sidebar .menu__link { + font-weight: 500; + } + + .navbar-sidebar .menu__link:hover, + .navbar-sidebar .menu__link--active, + .navbar-sidebar .menu__list-item-collapsible:hover { + background: rgba(122, 31, 31, 0.08); + color: #221714; + } +} + +@media (max-width: 640px) { + .homeHero { + padding-top: 2rem; + } + + .homeHero__content { + border-radius: 24px; + padding: 1.5rem; + } + + .homeHero__headline { + gap: 0.75rem; + } + + .homeHero__logo { + width: 64px; + height: 64px; + } + + .homeRos2Promo { + padding: 1.5rem; + } +} diff --git a/docs/website/src/pages/help.js b/docs/website/src/pages/help.js new file mode 100644 index 00000000..07aa0382 --- /dev/null +++ b/docs/website/src/pages/help.js @@ -0,0 +1,50 @@ +import React from 'react'; +import Layout from '@theme/Layout'; +import Link from '@docusaurus/Link'; + +const helpLinks = [ + { + title: 'Browse Docs', + description: 'Start with installation, the Python interface, and the optimal control tutorials.', + to: '/docs/open-intro', + }, + { + title: 'Join the Community', + description: 'Reach out on Discord or Gitter if you are stuck or want feedback.', + href: 'https://discord.gg/mfYpn4V', + }, + { + title: 'Stay Up to Date', + description: 'Follow the blog for release notes, examples, and project updates.', + to: '/blog', + }, +]; + +export default function Help() { + return ( + +
    +
    +

    Need help?

    +

    Support for docs, examples, and integration questions

    +

    + OpEn is maintained by a small team and community. These are the + fastest routes to get unstuck. +

    +
    + +
    + {helpLinks.map((item) => ( +
    +

    {item.title}

    +

    {item.description}

    + + Open + +
    + ))} +
    +
    +
    + ); +} diff --git a/docs/website/src/pages/index.js b/docs/website/src/pages/index.js new file mode 100644 index 00000000..fb458b1e --- /dev/null +++ b/docs/website/src/pages/index.js @@ -0,0 +1,483 @@ +import React, {useEffect, useState} from 'react'; +import Layout from '@theme/Layout'; +import Link from '@docusaurus/Link'; +import CodeBlock from '@theme/CodeBlock'; +import useBaseUrl from '@docusaurus/useBaseUrl'; + +const codeExample = String.raw`import opengen as og +import casadi.casadi as cs + +u = cs.SX.sym("u", 5) +p = cs.SX.sym("p", 2) +phi = og.functions.rosenbrock(u, p) +c = 1.5 * cs.cos(u[0]) - u[1] +bounds = og.constraints.Ball2(None, 1.5) + +problem = og.builder.Problem(u, p, phi) \ + .with_penalty_constraints(c) \ + .with_constraints(bounds) + +build_config = og.config.BuildConfiguration() \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_tcp_interface_config() + +meta = og.config.OptimizerMeta() \ + .with_optimizer_name("my_optimizer") + +solver_config = og.config.SolverConfiguration() \ + .with_tolerance(1e-5) + +builder = og.builder.OpEnOptimizerBuilder( + problem, + meta, + build_config, + solver_config, +) +builder.build()`; + +const ros2PromoCode = String.raw`ros2_config = og.config.RosConfiguration() \ + .with_package_name("my_ros_pkg") \ + .with_node_name("open_node_ros2") \ + .with_rate(10.0) + +build_config = og.config.BuildConfiguration() \ + .with_build_directory("my_optimizers") \ + .with_ros2(ros2_config)`; + +const dockerPromoCode = String.raw`docker pull alphaville/open:0.7.0` + +const heroStats = [ + {label: 'Core language', value: 'Rust'}, + {label: 'Primary uses', value: 'MPC, MHE, Robotics'}, + {label: 'Interfaces', value: 'Python, MATLAB, C/C++, ROS, TCP'}, +]; + +const featureCards = [ + { + title: 'Embeddable', + image: 'img/microchip.svg', + body: + 'All numerical routines are written in Rust, making OpEn a strong fit for embedded targets where speed, determinism, and memory safety matter.', + }, + { + title: 'Accurate', + image: 'img/bullseye.svg', + body: + 'OpEn combines fast convergence with a practical problem formulation for nonconvex optimization, including augmented Lagrangian and penalty updates.', + }, + { + title: 'Fast', + image: 'img/rocket.svg', + body: + 'Benchmarks and applications show sub-millisecond performance in the right settings, enabling demanding control and estimation loops.', + }, +]; + +const ecosystemCards = [ + { + title: 'Design and Deploy', + image: 'img/about-open.png', + imageClassName: 'homeCard__image--feature', + zoomAlt: 'Detailed OpEn design and deploy workflow diagram', + body: + 'Formulate your problem in Python or MATLAB, generate a Rust optimizer, and consume it over TCP, C/C++, ROS, or native Rust.', + }, + { + title: 'Benchmarks', + image: 'img/openbenchmark.png', + imageClassName: 'homeCard__image--feature homeCard__image--benchmark', + zoomAlt: 'Detailed OpEn benchmark comparison figure', + body: + 'OpEn is built for real optimization workflows, from reproducible academic experiments to embedded deployments and hardware-in-the-loop tests.', + }, +]; + +const documentationCard = { + title: 'Well Documented', + image: 'img/saturn.png', + body: + 'The documentation covers installation, interfaces, optimal control tutorials, and end-to-end examples for robotics and autonomous systems.', +}; + +const mpcCard = { + title: 'Model Predictive Control', + image: 'img/mpc56.png', + body: [ + 'Model Predictive Control (MPC) is a powerful optimization-based control methodology. It has become a standard tool in control engineering because it can handle nonlinear dynamics and state or input constraints, but it also requires solving an optimization problem in real time under tight runtime limits.', + 'When the dynamics are nonlinear or the constraints are nonconvex, implementation becomes significantly harder. This is where OpEn helps: it offers extremely fast and robust numerical optimization methods tailored for embedded applications where both speed and memory usage matter.', + ], +}; + +const mheCard = { + title: 'Moving Horizon Estimation', + image: 'img/mhe.png', + body: [ + "Moving Horizon Estimation (MHE) is the bee's knees of nonlinear estimation: it is an optimization-based estimator for constrained nonlinear systems. MHE is backed by a strong theoretical bedrock that combines Bayesian estimation and dynamic programming; however, its applicability has been hampered by the associated computational burden and has limited its use to slow or linear dynamical systems.", + 'OpEn can unlock the huge potential of MHE and facilitate its use in robotics, automotive, aerospace and other applications with high sampling frequencies.', + ], +}; + +export default function Home() { + const baseUrl = useBaseUrl('/'); + const assetUrl = (path) => `${baseUrl}${path.replace(/^\//, '')}`; + const promoGif = assetUrl('img/open-promo.gif'); + const boxLogo = assetUrl('img/box.png'); + const dockerGif = assetUrl('img/docker.gif'); + const ocpStatesImage = assetUrl('img/ocp-states.png'); + const ros2RobotImage = assetUrl('img/ros2-robot.png'); + const [zoomedImage, setZoomedImage] = useState(null); + + useEffect(() => { + if (!zoomedImage) { + return undefined; + } + + function onKeyDown(event) { + if (event.key === 'Escape') { + setZoomedImage(null); + } + } + + window.addEventListener('keydown', onKeyDown); + return () => window.removeEventListener('keydown', onKeyDown); + }, [zoomedImage]); + + return ( + +
    +
    +
    +
    + OpEn logo +
    +

    Optimization Engine

    +

    Fast and accurate embedded nonconvex optimization

    +
    +
    +

    + Build high-performance optimizers for next-generation robotics, + autonomous vehicles, and other cyber-physical systems without + hand-writing solver infrastructure. +

    +
    + + Get Started + + + Read the Paper + +
    +
    + {heroStats.map((item) => ( +
    + {item.label} + {item.value} +
    + ))} +
    +
    +
    + +
    +
    +

    Why people use OpEn

    +

    Fast embedded optimization

    +
    +
    + {featureCards.map((card) => ( +
    + +

    {card.title}

    +

    {card.body}

    +
    + ))} +
    +
    + +
    +
    +
    +

    Easy code generation

    + +

    +

    + Install OpEn in Python with pip, model your + optimization problem with CasADi, and generate a solver that you + can run through TCP, C/C++, ROS, or Rust. +

    +

    + The docs in Installation and{' '} + Python Interface walk + through the flow end to end. +

    +

    + + Try it In Colab + +

    +
    +
    + Animated overview of OpEn code generation +
    +
    + +
    + +
    +
    +

    Capabilities

    +

    Blazingly fast embedded optimization

    +
    +
    + {ecosystemCards.map((card) => ( +
    + {card.zoomAlt ? ( + + ) : ( + + )} +

    {card.title}

    +

    {card.body}

    +
    + ))} +
    +
    +
    + +

    {mpcCard.title}

    + {mpcCard.body.map((paragraph) => ( +

    {paragraph}

    + ))} +
    +
    + +

    {mheCard.title}

    + {mheCard.body.map((paragraph) => ( +

    {paragraph}

    + ))} +
    +
    +
    +
    + +

    {documentationCard.title}

    +

    {documentationCard.body}

    + + Browse the Docs + +
    +
    +
    + +
    +
    +
    +
    +

    Presentation at IFAC 2020

    +

    + A short introduction to what OpEn does, how it works, and how + to use it in practice. +

    +
    +